perf(client): Optimize client performance and introduce a data frame processing worker pool

- Add runtime performance optimization configurations to main.go, including setting GOMAXPROCS, adjusting GC frequency, and memory limits.

- Implement a worker pool-based data frame processing mechanism in connector.go to improve processing capabilities under high concurrency.

- Adjust frame writer configuration to improve batch write efficiency and enable adaptive refresh strategy.

- Add callback handling support for write errors to enhance connection stability.

refactor(server): Introduce an adaptive buffer pool to optimize memory usage

- Add adaptive_buffer_pool.go to implement large and small buffer reuse, reducing memory allocation overhead.

- Apply buffer pool management for large/medium temporary buffers in proxy handlers and TCP connections.

- Change the HTTP response writer to a cached bufio.Writer to improve I/O performance.

- Optimize HTTP request reading logic and response sending process.

build(docker): Update mount paths and remove unused named volumes

- Modify the data directory mount method in docker-compose.release.yml. ./data:/app/data

- Remove the unnecessary drip-data named volume definition

test(script): Add performance testing and profiling scripts

- Add profile-test.sh script for automating stress testing and performance data collection

- Supports collecting pprof data such as CPU, stack traces, and coroutines and generating analysis reports
This commit is contained in:
Gouryella
2025-12-08 12:24:42 +08:00
parent 9e8b3b001d
commit 7283180e6a
11 changed files with 724 additions and 90 deletions

View File

@@ -25,6 +25,7 @@ type Handler struct {
domain string
authToken string
headerPool *pool.HeaderPool
bufferPool *pool.AdaptiveBufferPool
}
func NewHandler(manager *tunnel.Manager, logger *zap.Logger, responses *ResponseHandler, domain string, authToken string) *Handler {
@@ -35,6 +36,7 @@ func NewHandler(manager *tunnel.Manager, logger *zap.Logger, responses *Response
domain: domain,
authToken: authToken,
headerPool: pool.NewHeaderPool(),
bufferPool: pool.NewAdaptiveBufferPool(),
}
}
@@ -103,8 +105,16 @@ func (h *Handler) handleAdaptiveRequest(w http.ResponseWriter, r *http.Request,
defer h.responses.CleanupCancelFunc(requestID)
}
buffer := make([]byte, 0, streamingThreshold)
tempBuf := make([]byte, 32*1024)
largeBufferPtr := h.bufferPool.GetLarge()
tempBufPtr := h.bufferPool.GetMedium()
defer func() {
h.bufferPool.PutLarge(largeBufferPtr)
h.bufferPool.PutMedium(tempBufPtr)
}()
buffer := (*largeBufferPtr)[:0]
tempBuf := (*tempBufPtr)[:pool.MediumBufferSize]
var totalRead int64
var hitThreshold bool

View File

@@ -211,12 +211,15 @@ func (c *Connection) Handle() error {
return fmt.Errorf("failed to send registration ack: %w", err)
}
// Create frame writer for async writes
c.frameWriter = protocol.NewFrameWriter(c.conn)
c.frameWriter.SetWriteErrorHandler(func(err error) {
c.logger.Error("Write error detected, closing connection", zap.Error(err))
c.Close()
})
c.conn.SetReadDeadline(time.Time{})
// Start TCP proxy only for TCP tunnels
if req.TunnelType == protocol.TunnelTypeTCP {
c.proxy = NewTunnelProxy(c.port, subdomain, c.conn, c.logger)
if err := c.proxy.Start(); err != nil {
@@ -226,13 +229,10 @@ func (c *Connection) Handle() error {
go c.heartbeatChecker()
// Handle frames (pass reader for consistent buffering)
return c.handleFrames(reader)
}
// handleHTTPRequest handles HTTP requests that arrive on the TCP port
func (c *Connection) handleHTTPRequest(reader *bufio.Reader) error {
// If no HTTP handler is configured, return error
if c.httpHandler == nil {
c.logger.Warn("HTTP request received but no HTTP handler configured")
response := "HTTP/1.1 503 Service Unavailable\r\n" +
@@ -295,18 +295,19 @@ func (c *Connection) handleHTTPRequest(reader *bufio.Reader) error {
zap.String("host", req.Host),
)
// Create a response writer that writes directly to the connection
respWriter := &httpResponseWriter{
conn: c.conn,
writer: bufio.NewWriterSize(c.conn, 4096),
header: make(http.Header),
}
// Handle the request - this blocks until response is complete
c.httpHandler.ServeHTTP(respWriter, req)
// Ensure response is flushed to client
if err := respWriter.writer.Flush(); err != nil {
c.logger.Debug("Failed to flush HTTP response", zap.Error(err))
}
if tcpConn, ok := c.conn.(*net.TCPConn); ok {
// Force flush TCP buffers
tcpConn.SetNoDelay(true)
tcpConn.SetNoDelay(false)
}
@@ -316,19 +317,15 @@ func (c *Connection) handleHTTPRequest(reader *bufio.Reader) error {
zap.String("url", req.URL.String()),
)
// Check if we should close the connection
// Close if: Connection: close header, or HTTP/1.0 without Connection: keep-alive
shouldClose := false
if req.Close {
shouldClose = true
} else if req.ProtoMajor == 1 && req.ProtoMinor == 0 {
// HTTP/1.0 defaults to close unless keep-alive is explicitly requested
if req.Header.Get("Connection") != "keep-alive" {
shouldClose = true
}
}
// Also check if response indicated connection should close
if respWriter.headerWritten && respWriter.header.Get("Connection") == "close" {
shouldClose = true
}
@@ -563,20 +560,13 @@ func (c *Connection) heartbeatChecker() {
}
}
// SendFrame sends a frame to the client
func (c *Connection) SendFrame(frame *protocol.Frame) error {
if c.frameWriter == nil {
return protocol.WriteFrame(c.conn, frame)
}
if err := c.frameWriter.WriteFrame(frame); err != nil {
return err
}
// Flush immediately to ensure the frame is sent without batching delay
c.frameWriter.Flush()
return nil
return c.frameWriter.WriteFrame(frame)
}
// sendError sends an error frame to the client
func (c *Connection) sendError(code, message string) {
errMsg := protocol.ErrorMessage{
Code: code,
@@ -586,22 +576,20 @@ func (c *Connection) sendError(code, message string) {
errFrame := protocol.NewFrame(protocol.FrameTypeError, data)
if c.frameWriter == nil {
// Fallback if frameWriter not initialized (early errors)
protocol.WriteFrame(c.conn, errFrame)
} else {
c.frameWriter.WriteFrame(errFrame)
}
}
// Close closes the connection
func (c *Connection) Close() {
c.once.Do(func() {
// Unregister connection from adaptive load tracking
protocol.UnregisterConnection()
close(c.stopCh)
if c.frameWriter != nil {
c.frameWriter.Flush()
c.frameWriter.Close()
}
@@ -633,6 +621,7 @@ func (c *Connection) GetSubdomain() string {
// httpResponseWriter implements http.ResponseWriter for writing to a net.Conn
type httpResponseWriter struct {
conn net.Conn
writer *bufio.Writer // Buffered writer for efficient I/O
header http.Header
statusCode int
headerWritten bool
@@ -649,27 +638,32 @@ func (w *httpResponseWriter) WriteHeader(statusCode int) {
w.statusCode = statusCode
w.headerWritten = true
// Write status line
statusText := http.StatusText(statusCode)
if statusText == "" {
statusText = "Unknown"
}
fmt.Fprintf(w.conn, "HTTP/1.1 %d %s\r\n", statusCode, statusText)
// Write headers
w.writer.WriteString("HTTP/1.1 ")
w.writer.WriteString(fmt.Sprintf("%d", statusCode))
w.writer.WriteByte(' ')
w.writer.WriteString(statusText)
w.writer.WriteString("\r\n")
for key, values := range w.header {
for _, value := range values {
fmt.Fprintf(w.conn, "%s: %s\r\n", key, value)
w.writer.WriteString(key)
w.writer.WriteString(": ")
w.writer.WriteString(value)
w.writer.WriteString("\r\n")
}
}
// Write empty line to end headers
fmt.Fprintf(w.conn, "\r\n")
w.writer.WriteString("\r\n")
}
func (w *httpResponseWriter) Write(data []byte) (int, error) {
if !w.headerWritten {
w.WriteHeader(http.StatusOK)
}
return w.conn.Write(data)
return w.writer.Write(data)
}

View File

@@ -34,11 +34,17 @@ type Listener struct {
workerPool *pool.WorkerPool // Worker pool for connection handling
}
// NewListener creates a new TCP listener
func NewListener(address string, tlsConfig *tls.Config, authToken string, manager *tunnel.Manager, logger *zap.Logger, portAlloc *PortAllocator, domain string, publicPort int, httpHandler http.Handler, responseChans HTTPResponseHandler) *Listener {
// Create worker pool with 50 workers and queue size of 1000
// This reduces goroutine creation overhead for connection handling
workerPool := pool.NewWorkerPool(50, 1000)
numCPU := pool.NumCPU()
workers := numCPU * 5
queueSize := workers * 20
workerPool := pool.NewWorkerPool(workers, queueSize)
logger.Info("Worker pool configured",
zap.Int("cpu_cores", numCPU),
zap.Int("workers", workers),
zap.Int("queue_size", queueSize),
)
return &Listener{
address: address,
@@ -107,14 +113,11 @@ func (l *Listener) acceptLoop() {
}
}
// Handle connection using worker pool instead of creating new goroutine
// This reduces goroutine creation overhead and improves performance
l.wg.Add(1)
submitted := l.workerPool.Submit(func() {
l.handleConnection(conn)
})
// If pool is full or closed, fall back to direct goroutine
if !submitted {
go l.handleConnection(conn)
}
@@ -133,8 +136,6 @@ func (l *Listener) handleConnection(netConn net.Conn) {
}
if err := tlsConn.Handshake(); err != nil {
// TLS handshake failures are common (HTTP clients, scanners, etc.)
// Log as WARN instead of ERROR
l.logger.Warn("TLS handshake failed",
zap.String("remote_addr", netConn.RemoteAddr().String()),
zap.Error(err),
@@ -142,6 +143,14 @@ func (l *Listener) handleConnection(netConn net.Conn) {
return
}
if tcpConn, ok := tlsConn.NetConn().(*net.TCPConn); ok {
tcpConn.SetNoDelay(true)
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
tcpConn.SetReadBuffer(256 * 1024)
tcpConn.SetWriteBuffer(256 * 1024)
}
state := tlsConn.ConnectionState()
l.logger.Info("New connection",
zap.String("remote_addr", netConn.RemoteAddr().String()),