mirror of
https://github.com/Gouryella/drip.git
synced 2026-04-29 05:40:00 +00:00
perf(client): Optimize client performance and introduce a data frame processing worker pool
- Add runtime performance optimization configurations to main.go, including setting GOMAXPROCS, adjusting GC frequency, and memory limits. - Implement a worker pool-based data frame processing mechanism in connector.go to improve processing capabilities under high concurrency. - Adjust frame writer configuration to improve batch write efficiency and enable adaptive refresh strategy. - Add callback handling support for write errors to enhance connection stability. refactor(server): Introduce an adaptive buffer pool to optimize memory usage - Add adaptive_buffer_pool.go to implement large and small buffer reuse, reducing memory allocation overhead. - Apply buffer pool management for large/medium temporary buffers in proxy handlers and TCP connections. - Change the HTTP response writer to a cached bufio.Writer to improve I/O performance. - Optimize HTTP request reading logic and response sending process. build(docker): Update mount paths and remove unused named volumes - Modify the data directory mount method in docker-compose.release.yml. ./data:/app/data - Remove the unnecessary drip-data named volume definition test(script): Add performance testing and profiling scripts - Add profile-test.sh script for automating stress testing and performance data collection - Supports collecting pprof data such as CPU, stack traces, and coroutines and generating analysis reports
This commit is contained in:
73
internal/shared/pool/adaptive_buffer_pool.go
Normal file
73
internal/shared/pool/adaptive_buffer_pool.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AdaptiveBufferPool manages reusable buffers of different sizes
|
||||
// This eliminates the massive memory allocation overhead seen in profiling
|
||||
type AdaptiveBufferPool struct {
|
||||
// Large buffers for streaming threshold (1MB)
|
||||
largePool *sync.Pool
|
||||
|
||||
// Medium buffers for temporary reads (32KB)
|
||||
mediumPool *sync.Pool
|
||||
}
|
||||
|
||||
const (
|
||||
// LargeBufferSize is 1MB for streaming threshold
|
||||
LargeBufferSize = 1 * 1024 * 1024
|
||||
|
||||
// MediumBufferSize is 32KB for temporary reads
|
||||
MediumBufferSize = 32 * 1024
|
||||
)
|
||||
|
||||
// NewAdaptiveBufferPool creates a new adaptive buffer pool
|
||||
func NewAdaptiveBufferPool() *AdaptiveBufferPool {
|
||||
return &AdaptiveBufferPool{
|
||||
largePool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, LargeBufferSize)
|
||||
return &buf
|
||||
},
|
||||
},
|
||||
mediumPool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, MediumBufferSize)
|
||||
return &buf
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetLarge returns a large buffer (1MB) from the pool
|
||||
// The returned buffer should be returned via PutLarge when done
|
||||
func (p *AdaptiveBufferPool) GetLarge() *[]byte {
|
||||
return p.largePool.Get().(*[]byte)
|
||||
}
|
||||
|
||||
// PutLarge returns a large buffer to the pool for reuse
|
||||
func (p *AdaptiveBufferPool) PutLarge(buf *[]byte) {
|
||||
if buf == nil {
|
||||
return
|
||||
}
|
||||
// Reset to full capacity to allow reuse
|
||||
*buf = (*buf)[:cap(*buf)]
|
||||
p.largePool.Put(buf)
|
||||
}
|
||||
|
||||
// GetMedium returns a medium buffer (32KB) from the pool
|
||||
// The returned buffer should be returned via PutMedium when done
|
||||
func (p *AdaptiveBufferPool) GetMedium() *[]byte {
|
||||
return p.mediumPool.Get().(*[]byte)
|
||||
}
|
||||
|
||||
// PutMedium returns a medium buffer to the pool for reuse
|
||||
func (p *AdaptiveBufferPool) PutMedium(buf *[]byte) {
|
||||
if buf == nil {
|
||||
return
|
||||
}
|
||||
// Reset to full capacity to allow reuse
|
||||
*buf = (*buf)[:cap(*buf)]
|
||||
p.mediumPool.Put(buf)
|
||||
}
|
||||
@@ -1,9 +1,15 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// NumCPU returns the number of logical CPUs available
|
||||
func NumCPU() int {
|
||||
return runtime.NumCPU()
|
||||
}
|
||||
|
||||
// WorkerPool is a fixed-size goroutine pool for handling tasks
|
||||
type WorkerPool struct {
|
||||
workers int
|
||||
|
||||
Reference in New Issue
Block a user