perf(client): Optimize client performance and introduce a data frame processing worker pool

- Add runtime performance optimization configurations to main.go, including setting GOMAXPROCS, adjusting GC frequency, and memory limits.

- Implement a worker pool-based data frame processing mechanism in connector.go to improve processing capabilities under high concurrency.

- Adjust frame writer configuration to improve batch write efficiency and enable adaptive refresh strategy.

- Add callback handling support for write errors to enhance connection stability.

refactor(server): Introduce an adaptive buffer pool to optimize memory usage

- Add adaptive_buffer_pool.go to implement large and small buffer reuse, reducing memory allocation overhead.

- Apply buffer pool management for large/medium temporary buffers in proxy handlers and TCP connections.

- Change the HTTP response writer to a cached bufio.Writer to improve I/O performance.

- Optimize HTTP request reading logic and response sending process.

build(docker): Update mount paths and remove unused named volumes

- Modify the data directory mount method in docker-compose.release.yml. ./data:/app/data

- Remove the unnecessary drip-data named volume definition

test(script): Add performance testing and profiling scripts

- Add profile-test.sh script for automating stress testing and performance data collection

- Supports collecting pprof data such as CPU, stack traces, and coroutines and generating analysis reports
This commit is contained in:
Gouryella
2025-12-08 12:24:42 +08:00
parent 9e8b3b001d
commit 7283180e6a
11 changed files with 724 additions and 90 deletions

View File

@@ -22,12 +22,21 @@ type FrameWriter struct {
heartbeatCallback func() *Frame
heartbeatEnabled bool
heartbeatControl chan struct{}
// Error handling
writeErr error
errOnce sync.Once
onWriteError func(error) // Callback for write errors
// Adaptive flushing
adaptiveFlush bool // Enable adaptive flush based on queue depth
lowConcurrencyThreshold int // Queue depth threshold for immediate flush
}
func NewFrameWriter(conn io.Writer) *FrameWriter {
// Larger queue size for better burst handling across all load scenarios
// With adaptive buffer pool, memory pressure is well controlled
return NewFrameWriterWithConfig(conn, 128, 2*time.Millisecond, 2048)
w := NewFrameWriterWithConfig(conn, 256, 2*time.Millisecond, 4096)
w.EnableAdaptiveFlush(16)
return w
}
func NewFrameWriterWithConfig(conn io.Writer, maxBatch int, maxBatchWait time.Duration, queueSize int) *FrameWriter {
@@ -77,7 +86,10 @@ func (w *FrameWriter) writeLoop() {
w.mu.Lock()
w.batch = append(w.batch, frame)
if len(w.batch) >= w.maxBatch {
shouldFlushNow := len(w.batch) >= w.maxBatch ||
(w.adaptiveFlush && len(w.queue) <= w.lowConcurrencyThreshold)
if shouldFlushNow {
w.flushBatchLocked()
}
w.mu.Unlock()
@@ -127,7 +139,15 @@ func (w *FrameWriter) flushBatchLocked() {
}
for _, frame := range w.batch {
_ = WriteFrame(w.conn, frame)
if err := WriteFrame(w.conn, frame); err != nil {
w.errOnce.Do(func() {
w.writeErr = err
if w.onWriteError != nil {
go w.onWriteError(err)
}
w.closed = true
})
}
frame.Release()
}
@@ -138,6 +158,9 @@ func (w *FrameWriter) WriteFrame(frame *Frame) error {
w.mu.Lock()
if w.closed {
w.mu.Unlock()
if w.writeErr != nil {
return w.writeErr
}
return errors.New("writer closed")
}
w.mu.Unlock()
@@ -146,6 +169,12 @@ func (w *FrameWriter) WriteFrame(frame *Frame) error {
case w.queue <- frame:
return nil
case <-w.done:
w.mu.Lock()
err := w.writeErr
w.mu.Unlock()
if err != nil {
return err
}
return errors.New("writer closed")
}
}
@@ -177,7 +206,6 @@ func (w *FrameWriter) Flush() {
return
}
// First, drain the queue into batch
for {
select {
case frame, ok := <-w.queue:
@@ -190,7 +218,6 @@ func (w *FrameWriter) Flush() {
}
}
done:
// Then flush the batch
w.flushBatchLocked()
w.mu.Unlock()
}
@@ -218,3 +245,22 @@ func (w *FrameWriter) DisableHeartbeat() {
default:
}
}
func (w *FrameWriter) SetWriteErrorHandler(handler func(error)) {
w.mu.Lock()
w.onWriteError = handler
w.mu.Unlock()
}
func (w *FrameWriter) EnableAdaptiveFlush(lowConcurrencyThreshold int) {
w.mu.Lock()
w.adaptiveFlush = true
w.lowConcurrencyThreshold = lowConcurrencyThreshold
w.mu.Unlock()
}
func (w *FrameWriter) DisableAdaptiveFlush() {
w.mu.Lock()
w.adaptiveFlush = false
w.mu.Unlock()
}