mirror of https://github.com/usememos/memos.git
chore(server): remove profiler code and endpoints
Remove the profiler package and all related code that provided pprof endpoints and memory monitoring. This includes: - Deleted server/profiler/profiler.go with HTTP endpoints for memory profiling - Removed profiler initialization and shutdown code from server/server.go - Removed profiler field from Server struct
This commit is contained in:
parent
bb3d808e0e
commit
775bed73f4
|
|
@ -1,120 +0,0 @@
|
||||||
package profiler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"net/http"
|
|
||||||
"net/http/pprof"
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Profiler provides HTTP endpoints for memory profiling.
|
|
||||||
type Profiler struct {
|
|
||||||
memStatsLogInterval time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProfiler creates a new profiler.
|
|
||||||
func NewProfiler() *Profiler {
|
|
||||||
return &Profiler{
|
|
||||||
memStatsLogInterval: 1 * time.Minute,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterRoutes adds profiling endpoints to the Echo server.
|
|
||||||
func (*Profiler) RegisterRoutes(e *echo.Echo) {
|
|
||||||
// Register pprof handlers
|
|
||||||
g := e.Group("/debug/pprof")
|
|
||||||
g.GET("", echo.WrapHandler(http.HandlerFunc(pprof.Index)))
|
|
||||||
g.GET("/cmdline", echo.WrapHandler(http.HandlerFunc(pprof.Cmdline)))
|
|
||||||
g.GET("/profile", echo.WrapHandler(http.HandlerFunc(pprof.Profile)))
|
|
||||||
g.POST("/symbol", echo.WrapHandler(http.HandlerFunc(pprof.Symbol)))
|
|
||||||
g.GET("/symbol", echo.WrapHandler(http.HandlerFunc(pprof.Symbol)))
|
|
||||||
g.GET("/trace", echo.WrapHandler(http.HandlerFunc(pprof.Trace)))
|
|
||||||
g.GET("/allocs", echo.WrapHandler(http.HandlerFunc(pprof.Handler("allocs").ServeHTTP)))
|
|
||||||
g.GET("/block", echo.WrapHandler(http.HandlerFunc(pprof.Handler("block").ServeHTTP)))
|
|
||||||
g.GET("/goroutine", echo.WrapHandler(http.HandlerFunc(pprof.Handler("goroutine").ServeHTTP)))
|
|
||||||
g.GET("/heap", echo.WrapHandler(http.HandlerFunc(pprof.Handler("heap").ServeHTTP)))
|
|
||||||
g.GET("/mutex", echo.WrapHandler(http.HandlerFunc(pprof.Handler("mutex").ServeHTTP)))
|
|
||||||
g.GET("/threadcreate", echo.WrapHandler(http.HandlerFunc(pprof.Handler("threadcreate").ServeHTTP)))
|
|
||||||
|
|
||||||
// Add a custom memory stats endpoint.
|
|
||||||
g.GET("/memstats", func(c echo.Context) error {
|
|
||||||
var m runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&m)
|
|
||||||
return c.JSON(http.StatusOK, map[string]interface{}{
|
|
||||||
"alloc": m.Alloc,
|
|
||||||
"totalAlloc": m.TotalAlloc,
|
|
||||||
"sys": m.Sys,
|
|
||||||
"numGC": m.NumGC,
|
|
||||||
"heapAlloc": m.HeapAlloc,
|
|
||||||
"heapSys": m.HeapSys,
|
|
||||||
"heapInuse": m.HeapInuse,
|
|
||||||
"heapObjects": m.HeapObjects,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartMemoryMonitor starts a goroutine that periodically logs memory stats.
|
|
||||||
func (p *Profiler) StartMemoryMonitor(ctx context.Context) {
|
|
||||||
go func() {
|
|
||||||
ticker := time.NewTicker(p.memStatsLogInterval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
// Store previous heap allocation to track growth.
|
|
||||||
var lastHeapAlloc uint64
|
|
||||||
var lastNumGC uint32
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
var m runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&m)
|
|
||||||
|
|
||||||
// Calculate heap growth since last check.
|
|
||||||
heapGrowth := int64(m.HeapAlloc) - int64(lastHeapAlloc)
|
|
||||||
gcCount := m.NumGC - lastNumGC
|
|
||||||
|
|
||||||
slog.Info("memory stats",
|
|
||||||
"heapAlloc", byteCountIEC(m.HeapAlloc),
|
|
||||||
"heapSys", byteCountIEC(m.HeapSys),
|
|
||||||
"heapObjects", m.HeapObjects,
|
|
||||||
"heapGrowth", byteCountIEC(uint64(heapGrowth)),
|
|
||||||
"numGoroutine", runtime.NumGoroutine(),
|
|
||||||
"numGC", m.NumGC,
|
|
||||||
"gcSince", gcCount,
|
|
||||||
"nextGC", byteCountIEC(m.NextGC),
|
|
||||||
"gcPause", time.Duration(m.PauseNs[(m.NumGC+255)%256]).String(),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Track values for next iteration.
|
|
||||||
lastHeapAlloc = m.HeapAlloc
|
|
||||||
lastNumGC = m.NumGC
|
|
||||||
|
|
||||||
// Force GC if memory usage is high to see if objects can be reclaimed.
|
|
||||||
if m.HeapAlloc > 500*1024*1024 { // 500 MB threshold
|
|
||||||
slog.Info("forcing garbage collection due to high memory usage")
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// byteCountIEC converts bytes to a human-readable string (MiB, GiB).
|
|
||||||
func byteCountIEC(b uint64) string {
|
|
||||||
const unit = 1024
|
|
||||||
if b < unit {
|
|
||||||
return fmt.Sprintf("%d B", b)
|
|
||||||
}
|
|
||||||
div, exp := uint64(unit), 0
|
|
||||||
for n := b / unit; n >= unit; n /= unit {
|
|
||||||
div *= unit
|
|
||||||
exp++
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%.1f %ciB", float64(b)/float64(div), "KMGTPE"[exp])
|
|
||||||
}
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
|
|
||||||
"github.com/usememos/memos/internal/profile"
|
"github.com/usememos/memos/internal/profile"
|
||||||
storepb "github.com/usememos/memos/proto/gen/store"
|
storepb "github.com/usememos/memos/proto/gen/store"
|
||||||
"github.com/usememos/memos/server/profiler"
|
|
||||||
apiv1 "github.com/usememos/memos/server/router/api/v1"
|
apiv1 "github.com/usememos/memos/server/router/api/v1"
|
||||||
"github.com/usememos/memos/server/router/frontend"
|
"github.com/usememos/memos/server/router/frontend"
|
||||||
"github.com/usememos/memos/server/router/rss"
|
"github.com/usememos/memos/server/router/rss"
|
||||||
|
|
@ -36,7 +35,6 @@ type Server struct {
|
||||||
|
|
||||||
echoServer *echo.Echo
|
echoServer *echo.Echo
|
||||||
grpcServer *grpc.Server
|
grpcServer *grpc.Server
|
||||||
profiler *profiler.Profiler
|
|
||||||
runnerCancelFuncs []context.CancelFunc
|
runnerCancelFuncs []context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -53,13 +51,6 @@ func NewServer(ctx context.Context, profile *profile.Profile, store *store.Store
|
||||||
echoServer.Use(middleware.Recover())
|
echoServer.Use(middleware.Recover())
|
||||||
s.echoServer = echoServer
|
s.echoServer = echoServer
|
||||||
|
|
||||||
if profile.Mode != "prod" {
|
|
||||||
// Initialize profiler
|
|
||||||
s.profiler = profiler.NewProfiler()
|
|
||||||
s.profiler.RegisterRoutes(echoServer)
|
|
||||||
s.profiler.StartMemoryMonitor(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
instanceBasicSetting, err := s.getOrUpsertInstanceBasicSetting(ctx)
|
instanceBasicSetting, err := s.getOrUpsertInstanceBasicSetting(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get instance basic setting")
|
return nil, errors.Wrap(err, "failed to get instance basic setting")
|
||||||
|
|
@ -185,20 +176,6 @@ func (s *Server) Shutdown(ctx context.Context) {
|
||||||
// Shutdown gRPC server.
|
// Shutdown gRPC server.
|
||||||
s.grpcServer.GracefulStop()
|
s.grpcServer.GracefulStop()
|
||||||
|
|
||||||
// Stop the profiler
|
|
||||||
if s.profiler != nil {
|
|
||||||
slog.Info("stopping profiler")
|
|
||||||
// Log final memory stats
|
|
||||||
var m runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&m)
|
|
||||||
slog.Info("final memory stats before exit",
|
|
||||||
"heapAlloc", m.Alloc,
|
|
||||||
"heapSys", m.Sys,
|
|
||||||
"heapObjects", m.HeapObjects,
|
|
||||||
"numGoroutine", runtime.NumGoroutine(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close database connection.
|
// Close database connection.
|
||||||
if err := s.Store.Close(); err != nil {
|
if err := s.Store.Close(); err != nil {
|
||||||
slog.Error("failed to close database", slog.String("error", err.Error()))
|
slog.Error("failed to close database", slog.String("error", err.Error()))
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue