mirror of
https://github.com/Gouryella/drip.git
synced 2026-02-25 13:51:21 +00:00
- Add runtime performance optimization configurations to main.go, including setting GOMAXPROCS, adjusting GC frequency, and memory limits. - Implement a worker pool-based data frame processing mechanism in connector.go to improve processing capabilities under high concurrency. - Adjust frame writer configuration to improve batch write efficiency and enable adaptive refresh strategy. - Add callback handling support for write errors to enhance connection stability. refactor(server): Introduce an adaptive buffer pool to optimize memory usage - Add adaptive_buffer_pool.go to implement large and small buffer reuse, reducing memory allocation overhead. - Apply buffer pool management for large/medium temporary buffers in proxy handlers and TCP connections. - Change the HTTP response writer to a cached bufio.Writer to improve I/O performance. - Optimize HTTP request reading logic and response sending process. build(docker): Update mount paths and remove unused named volumes - Modify the data directory mount method in docker-compose.release.yml. ./data:/app/data - Remove the unnecessary drip-data named volume definition test(script): Add performance testing and profiling scripts - Add profile-test.sh script for automating stress testing and performance data collection - Supports collecting pprof data such as CPU, stack traces, and coroutines and generating analysis reports
122 lines
2.1 KiB
Go
122 lines
2.1 KiB
Go
package pool
|
|
|
|
import (
|
|
"runtime"
|
|
"sync"
|
|
)
|
|
|
|
// NumCPU returns the number of logical CPUs available
|
|
func NumCPU() int {
|
|
return runtime.NumCPU()
|
|
}
|
|
|
|
// WorkerPool is a fixed-size goroutine pool for handling tasks
|
|
type WorkerPool struct {
|
|
workers int
|
|
jobQueue chan func()
|
|
wg sync.WaitGroup
|
|
once sync.Once
|
|
closed bool
|
|
mu sync.RWMutex
|
|
}
|
|
|
|
// NewWorkerPool creates a new worker pool with the specified number of workers
|
|
func NewWorkerPool(workers int, queueSize int) *WorkerPool {
|
|
if workers <= 0 {
|
|
workers = 50 // Default worker count
|
|
}
|
|
if queueSize <= 0 {
|
|
queueSize = 1000 // Default queue size
|
|
}
|
|
|
|
pool := &WorkerPool{
|
|
workers: workers,
|
|
jobQueue: make(chan func(), queueSize),
|
|
}
|
|
|
|
// Start worker goroutines
|
|
for i := 0; i < workers; i++ {
|
|
pool.wg.Add(1)
|
|
go pool.worker()
|
|
}
|
|
|
|
return pool
|
|
}
|
|
|
|
// worker is the worker goroutine that processes jobs from the queue
|
|
func (p *WorkerPool) worker() {
|
|
defer p.wg.Done()
|
|
|
|
for job := range p.jobQueue {
|
|
if job != nil {
|
|
job()
|
|
}
|
|
}
|
|
}
|
|
|
|
// Submit submits a job to the worker pool
|
|
// Returns false if the pool is closed or the queue is full
|
|
func (p *WorkerPool) Submit(job func()) bool {
|
|
if job == nil {
|
|
return false
|
|
}
|
|
|
|
p.mu.RLock()
|
|
if p.closed {
|
|
p.mu.RUnlock()
|
|
return false
|
|
}
|
|
p.mu.RUnlock()
|
|
|
|
// Non-blocking send
|
|
select {
|
|
case p.jobQueue <- job:
|
|
return true
|
|
default:
|
|
// Queue is full, fall back to direct execution
|
|
// This prevents blocking when pool is overloaded
|
|
go job()
|
|
return false
|
|
}
|
|
}
|
|
|
|
// SubmitWait submits a job and waits for it to complete
|
|
func (p *WorkerPool) SubmitWait(job func()) {
|
|
if job == nil {
|
|
return
|
|
}
|
|
|
|
done := make(chan struct{})
|
|
wrappedJob := func() {
|
|
defer close(done)
|
|
job()
|
|
}
|
|
|
|
if p.Submit(wrappedJob) {
|
|
<-done
|
|
} else {
|
|
// Pool is closed or full, execute directly
|
|
job()
|
|
}
|
|
}
|
|
|
|
// Close gracefully shuts down the worker pool
|
|
// It waits for all pending jobs to complete
|
|
func (p *WorkerPool) Close() {
|
|
p.once.Do(func() {
|
|
p.mu.Lock()
|
|
p.closed = true
|
|
p.mu.Unlock()
|
|
|
|
close(p.jobQueue)
|
|
p.wg.Wait()
|
|
})
|
|
}
|
|
|
|
// IsClosed returns true if the pool is closed
|
|
func (p *WorkerPool) IsClosed() bool {
|
|
p.mu.RLock()
|
|
defer p.mu.RUnlock()
|
|
return p.closed
|
|
}
|