Files
drip/internal/shared/pool/adaptive_buffer_pool.go
Gouryella 7283180e6a perf(client): Optimize client performance and introduce a data frame processing worker pool
- Add runtime performance optimization configurations to main.go, including setting GOMAXPROCS, adjusting GC frequency, and memory limits.

- Implement a worker pool-based data frame processing mechanism in connector.go to improve processing capabilities under high concurrency.

- Adjust frame writer configuration to improve batch write efficiency and enable adaptive refresh strategy.

- Add callback handling support for write errors to enhance connection stability.

refactor(server): Introduce an adaptive buffer pool to optimize memory usage

- Add adaptive_buffer_pool.go to implement large and small buffer reuse, reducing memory allocation overhead.

- Apply buffer pool management for large/medium temporary buffers in proxy handlers and TCP connections.

- Change the HTTP response writer to a cached bufio.Writer to improve I/O performance.

- Optimize HTTP request reading logic and response sending process.

build(docker): Update mount paths and remove unused named volumes

- Modify the data directory mount method in docker-compose.release.yml. ./data:/app/data

- Remove the unnecessary drip-data named volume definition

test(script): Add performance testing and profiling scripts

- Add profile-test.sh script for automating stress testing and performance data collection

- Supports collecting pprof data such as CPU, stack traces, and coroutines and generating analysis reports
2025-12-08 12:24:42 +08:00

74 lines
1.8 KiB
Go

package pool
import (
"sync"
)
// AdaptiveBufferPool manages reusable buffers of different sizes
// This eliminates the massive memory allocation overhead seen in profiling
type AdaptiveBufferPool struct {
// Large buffers for streaming threshold (1MB)
largePool *sync.Pool
// Medium buffers for temporary reads (32KB)
mediumPool *sync.Pool
}
const (
// LargeBufferSize is 1MB for streaming threshold
LargeBufferSize = 1 * 1024 * 1024
// MediumBufferSize is 32KB for temporary reads
MediumBufferSize = 32 * 1024
)
// NewAdaptiveBufferPool creates a new adaptive buffer pool
func NewAdaptiveBufferPool() *AdaptiveBufferPool {
return &AdaptiveBufferPool{
largePool: &sync.Pool{
New: func() interface{} {
buf := make([]byte, LargeBufferSize)
return &buf
},
},
mediumPool: &sync.Pool{
New: func() interface{} {
buf := make([]byte, MediumBufferSize)
return &buf
},
},
}
}
// GetLarge returns a large buffer (1MB) from the pool
// The returned buffer should be returned via PutLarge when done
func (p *AdaptiveBufferPool) GetLarge() *[]byte {
return p.largePool.Get().(*[]byte)
}
// PutLarge returns a large buffer to the pool for reuse
func (p *AdaptiveBufferPool) PutLarge(buf *[]byte) {
if buf == nil {
return
}
// Reset to full capacity to allow reuse
*buf = (*buf)[:cap(*buf)]
p.largePool.Put(buf)
}
// GetMedium returns a medium buffer (32KB) from the pool
// The returned buffer should be returned via PutMedium when done
func (p *AdaptiveBufferPool) GetMedium() *[]byte {
return p.mediumPool.Get().(*[]byte)
}
// PutMedium returns a medium buffer to the pool for reuse
func (p *AdaptiveBufferPool) PutMedium(buf *[]byte) {
if buf == nil {
return
}
// Reset to full capacity to allow reuse
*buf = (*buf)[:cap(*buf)]
p.mediumPool.Put(buf)
}