feat(client): Added the --short option to the version command to support plain text output.

Added the `--short` flag to the `version` command for printing version information without styles.

In this mode, only the version, Git commit hash, and build time in plain text format will be output, facilitating script parsing.

Optimized Windows process detection logic to improve runtime accuracy.

Removed redundant comments and simplified signal checking methods, making the code clearer and easier to maintain.

refactor(protocol): Replaced string matching of data frame types with enumeration types.

Unified the representation of data frame types in the protocol, using the `DataType` enumeration to improve performance and readability.

Introduced a pooled buffer mechanism to improve memory efficiency in high-load scenarios.

refactor(ui): Adjusted style definitions, removing hard-coded color values.

Removed fixed color settings from some lipgloss styles, providing flexibility for future theme customization.

``` docs(install): Improved the version extraction function in the installation script.

Added the `get_version_from_binary` function to enhance version identification capabilities, prioritizing plain mode output, ensuring accurate version number acquisition for the drip client or server across different terminal environments.

perf(tcp): Improved TCP processing performance and connection management capabilities.

Adjusted HTTP client transmission parameter configuration, increasing the maximum number of idle connections to accommodate higher concurrent requests.

Improved error handling logic, adding special checks for common cases such as closing network connections to avoid log pollution.

chore(writer): Expanded the FrameWriter queue length to improve batch write stability.

Increased the FrameWriter queue size from 1024 to 2048, and released pooled resources after flushing, better handling sudden traffic spikes and reducing memory usage fluctuations.
This commit is contained in:
Gouryella
2025-12-03 18:11:37 +08:00
parent bb5ed1739e
commit 35e6c86e1f
16 changed files with 315 additions and 211 deletions

View File

@@ -0,0 +1,82 @@
package protocol
import (
"sync/atomic"
"time"
"drip/internal/shared/pool"
)
// AdaptivePoolManager dynamically adjusts buffer pool usage based on load
type AdaptivePoolManager struct {
activeConnections atomic.Int64
currentThreshold atomic.Int64
highLoadConnectionThreshold int64
midLoadConnectionThreshold int64
midLoadThreshold int64
highLoadThreshold int64
}
var globalAdaptiveManager = NewAdaptivePoolManager()
func NewAdaptivePoolManager() *AdaptivePoolManager {
m := &AdaptivePoolManager{
highLoadConnectionThreshold: 300,
midLoadConnectionThreshold: 150,
midLoadThreshold: int64(pool.SizeLarge),
highLoadThreshold: int64(pool.SizeMedium),
}
m.currentThreshold.Store(m.midLoadThreshold)
go m.monitor()
return m
}
func (m *AdaptivePoolManager) monitor() {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for range ticker.C {
connections := m.activeConnections.Load()
if connections >= m.highLoadConnectionThreshold {
m.currentThreshold.Store(m.highLoadThreshold)
} else if connections < m.midLoadConnectionThreshold {
m.currentThreshold.Store(m.midLoadThreshold)
}
// Hysteresis zone (150-300): maintain current threshold
}
}
func (m *AdaptivePoolManager) GetThreshold() int {
return int(m.currentThreshold.Load())
}
func (m *AdaptivePoolManager) RegisterConnection() {
m.activeConnections.Add(1)
}
func (m *AdaptivePoolManager) UnregisterConnection() {
m.activeConnections.Add(-1)
}
func (m *AdaptivePoolManager) GetActiveConnections() int64 {
return m.activeConnections.Load()
}
func GetAdaptiveThreshold() int {
return globalAdaptiveManager.GetThreshold()
}
func RegisterConnection() {
globalAdaptiveManager.RegisterConnection()
}
func UnregisterConnection() {
globalAdaptiveManager.UnregisterConnection()
}
func GetActiveConnections() int64 {
return globalAdaptiveManager.GetActiveConnections()
}

View File

@@ -5,9 +5,9 @@ import (
"errors"
)
// DataHeaderV2 represents a binary-encoded data header (Protocol Version 2)
// This replaces JSON encoding to improve performance
type DataHeaderV2 struct {
// DataHeader represents a binary-encoded data header for data plane
// All data transmission uses pure binary encoding for performance
type DataHeader struct {
Type DataType
IsLast bool
StreamID string
@@ -81,7 +81,7 @@ const (
)
// MarshalBinary encodes the header to binary format
func (h *DataHeaderV2) MarshalBinary() []byte {
func (h *DataHeader) MarshalBinary() []byte {
streamIDLen := len(h.StreamID)
requestIDLen := len(h.RequestID)
@@ -111,7 +111,7 @@ func (h *DataHeaderV2) MarshalBinary() []byte {
}
// UnmarshalBinary decodes the header from binary format
func (h *DataHeaderV2) UnmarshalBinary(data []byte) error {
func (h *DataHeader) UnmarshalBinary(data []byte) error {
if len(data) < binaryHeaderMinSize {
return errors.New("invalid binary header: too short")
}
@@ -142,25 +142,7 @@ func (h *DataHeaderV2) UnmarshalBinary(data []byte) error {
return nil
}
// ToDataHeader converts binary header to JSON header (for compatibility)
func (h *DataHeaderV2) ToDataHeader() DataHeader {
return DataHeader{
StreamID: h.StreamID,
RequestID: h.RequestID,
Type: h.Type.String(),
IsLast: h.IsLast,
}
}
// FromDataHeader converts JSON header to binary header
func (h *DataHeaderV2) FromDataHeader(dh DataHeader) {
h.StreamID = dh.StreamID
h.RequestID = dh.RequestID
h.Type = DataTypeFromString(dh.Type)
h.IsLast = dh.IsLast
}
// Size returns the size of the binary-encoded header
func (h *DataHeaderV2) Size() int {
func (h *DataHeader) Size() int {
return binaryHeaderMinSize + len(h.StreamID) + len(h.RequestID)
}

View File

@@ -4,6 +4,7 @@ import (
"encoding/binary"
"fmt"
"io"
"net"
"drip/internal/shared/pool"
)
@@ -60,20 +61,21 @@ func WriteFrame(w io.Writer, frame *Frame) error {
return fmt.Errorf("payload too large: %d bytes (max %d)", payloadLen, MaxFrameSize)
}
lengthBuf := make([]byte, 4)
binary.BigEndian.PutUint32(lengthBuf, uint32(payloadLen))
if _, err := w.Write(lengthBuf); err != nil {
return fmt.Errorf("failed to write length: %w", err)
}
var header [FrameHeaderSize]byte
binary.BigEndian.PutUint32(header[0:4], uint32(payloadLen))
header[4] = byte(frame.Type)
if _, err := w.Write([]byte{byte(frame.Type)}); err != nil {
return fmt.Errorf("failed to write type: %w", err)
}
if payloadLen > 0 {
if _, err := w.Write(frame.Payload); err != nil {
return fmt.Errorf("failed to write payload: %w", err)
if payloadLen == 0 {
if _, err := w.Write(header[:]); err != nil {
return fmt.Errorf("failed to write frame header: %w", err)
}
return nil
}
// net.Buffers will use writev for TCP connections and falls back to
// sequential writes for other io.Writer implementations (e.g. TLS).
if _, err := (&net.Buffers{header[:], frame.Payload}).WriteTo(w); err != nil {
return fmt.Errorf("failed to write frame: %w", err)
}
return nil
@@ -134,3 +136,13 @@ func NewFrame(frameType FrameType, payload []byte) *Frame {
Payload: payload,
}
}
// NewFramePooled creates a new frame with a pooled buffer
// The poolBuffer will be automatically released after the frame is written
func NewFramePooled(frameType FrameType, payload []byte, poolBuffer *[]byte) *Frame {
return &Frame{
Type: frameType,
Payload: payload,
poolBuffer: poolBuffer,
}
}

View File

@@ -24,22 +24,10 @@ type ErrorMessage struct {
Message string `json:"message"` // Error message
}
// DataHeader represents metadata for a data frame
type DataHeader struct {
StreamID string `json:"stream_id"` // Unique stream identifier
RequestID string `json:"request_id"` // Request identifier (for HTTP)
Type string `json:"type"` // "data", "response", "close", "http_request", "http_response"
IsLast bool `json:"is_last"` // Is this the last frame for this stream
}
// Note: DataHeader is now defined in binary_header.go as a pure binary structure
// TCPData has been removed - use DataHeader + raw bytes directly
// TCPData represents TCP tunnel data
type TCPData struct {
StreamID string `json:"stream_id"` // Stream identifier
Data []byte `json:"data"` // Raw TCP data
IsClose bool `json:"is_close"` // Close this stream
}
// Marshal helpers
// Marshal helpers for control plane messages (JSON encoding)
func MarshalJSON(v interface{}) ([]byte, error) {
return json.Marshal(v)
}

View File

@@ -1,129 +1,106 @@
package protocol
import (
json "github.com/goccy/go-json"
"encoding/binary"
"errors"
"drip/internal/shared/pool"
)
// EncodeDataPayload encodes a data header and payload into a frame payload
// Uses binary encoding (optimized format)
// EncodeDataPayload encodes a data header and payload into a frame payload.
// Deprecated: Use EncodeDataPayloadPooled for better performance.
func EncodeDataPayload(header DataHeader, data []byte) ([]byte, error) {
return EncodeDataPayloadV2(header, data)
streamIDLen := len(header.StreamID)
requestIDLen := len(header.RequestID)
totalLen := binaryHeaderMinSize + streamIDLen + requestIDLen + len(data)
payload := make([]byte, totalLen)
flags := uint8(header.Type) & 0x07
if header.IsLast {
flags |= 0x08
}
payload[0] = flags
binary.BigEndian.PutUint16(payload[1:3], uint16(streamIDLen))
binary.BigEndian.PutUint16(payload[3:5], uint16(requestIDLen))
offset := binaryHeaderMinSize
copy(payload[offset:], header.StreamID)
offset += streamIDLen
copy(payload[offset:], header.RequestID)
offset += requestIDLen
copy(payload[offset:], data)
return payload, nil
}
// EncodeDataPayloadV1 encodes using JSON (legacy)
// Format: JSON_HEADER\nDATA
func EncodeDataPayloadV1(header DataHeader, data []byte) ([]byte, error) {
headerBytes, err := json.Marshal(header)
if err != nil {
return nil, err
// EncodeDataPayloadPooled encodes with adaptive allocation based on load.
// Returns payload slice and pool buffer pointer (may be nil).
//
// Adaptive strategy:
// - Mid-load (<150 conn): 256KB threshold, pool disabled → max QPS
// - High-load (≥300 conn): 32KB threshold, pool enabled → stable latency
// - Transition (150-300): Hysteresis to prevent flapping
func EncodeDataPayloadPooled(header DataHeader, data []byte) (payload []byte, poolBuffer *[]byte, err error) {
streamIDLen := len(header.StreamID)
requestIDLen := len(header.RequestID)
totalLen := binaryHeaderMinSize + streamIDLen + requestIDLen + len(data)
dynamicThreshold := GetAdaptiveThreshold()
if totalLen < dynamicThreshold {
regularPayload, err := EncodeDataPayload(header, data)
return regularPayload, nil, err
}
// Combine: header + newline + data
payload := make([]byte, 0, len(headerBytes)+1+len(data))
payload = append(payload, headerBytes...)
payload = append(payload, '\n')
payload = append(payload, data...)
if totalLen > pool.SizeLarge {
regularPayload, err := EncodeDataPayload(header, data)
return regularPayload, nil, err
}
return payload, nil
poolBuffer = pool.GetBuffer(totalLen)
payload = (*poolBuffer)[:totalLen]
flags := uint8(header.Type) & 0x07
if header.IsLast {
flags |= 0x08
}
payload[0] = flags
binary.BigEndian.PutUint16(payload[1:3], uint16(streamIDLen))
binary.BigEndian.PutUint16(payload[3:5], uint16(requestIDLen))
offset := binaryHeaderMinSize
copy(payload[offset:], header.StreamID)
offset += streamIDLen
copy(payload[offset:], header.RequestID)
offset += requestIDLen
copy(payload[offset:], data)
return payload, poolBuffer, nil
}
// EncodeDataPayloadV2 encodes using binary format (optimized)
// Format: BINARY_HEADER + DATA
func EncodeDataPayloadV2(header DataHeader, data []byte) ([]byte, error) {
// Convert to binary header
var h2 DataHeaderV2
h2.FromDataHeader(header)
// Encode header to binary
headerBytes := h2.MarshalBinary()
// Combine: binary header + data
payload := make([]byte, 0, len(headerBytes)+len(data))
payload = append(payload, headerBytes...)
payload = append(payload, data...)
return payload, nil
}
// DecodeDataPayload decodes a frame payload into header and data
// Auto-detects protocol version
// DecodeDataPayload decodes a frame payload into header and data.
func DecodeDataPayload(payload []byte) (DataHeader, []byte, error) {
if len(payload) == 0 {
return DataHeader{}, nil, errors.New("empty payload")
}
// Try to detect version:
// - V1 (JSON): starts with '{'
// - V2 (Binary): first byte is flags (0x00-0x1F typically)
if payload[0] == '{' {
// V1: JSON format
return DecodeDataPayloadV1(payload)
}
// V2: Binary format
return DecodeDataPayloadV2(payload)
}
// DecodeDataPayloadV1 decodes JSON format (legacy)
// Format: JSON_HEADER\nDATA
func DecodeDataPayloadV1(payload []byte) (DataHeader, []byte, error) {
// Find newline separator
sepIdx := -1
for i, b := range payload {
if b == '\n' {
sepIdx = i
break
}
}
if sepIdx == -1 {
return DataHeader{}, nil, errors.New("invalid v1 payload: no newline separator")
}
// Parse JSON header
var header DataHeader
if err := json.Unmarshal(payload[:sepIdx], &header); err != nil {
return DataHeader{}, nil, err
}
// Extract data (after newline)
data := payload[sepIdx+1:]
return header, data, nil
}
// DecodeDataPayloadV2 decodes binary format (optimized)
// Format: BINARY_HEADER + DATA
func DecodeDataPayloadV2(payload []byte) (DataHeader, []byte, error) {
if len(payload) < binaryHeaderMinSize {
return DataHeader{}, nil, errors.New("invalid v2 payload: too short")
return DataHeader{}, nil, errors.New("invalid payload: too short")
}
// Decode binary header
var h2 DataHeaderV2
if err := h2.UnmarshalBinary(payload); err != nil {
var header DataHeader
if err := header.UnmarshalBinary(payload); err != nil {
return DataHeader{}, nil, err
}
// Extract data (after header)
headerSize := h2.Size()
headerSize := header.Size()
if len(payload) < headerSize {
return DataHeader{}, nil, errors.New("invalid v2 payload: data missing")
return DataHeader{}, nil, errors.New("invalid payload: data missing")
}
data := payload[headerSize:]
// Convert to DataHeader
header := h2.ToDataHeader()
return header, data, nil
}
// GetPayloadHeaderSize returns the size of the header in the payload
// This is useful for pre-allocating buffers
func GetPayloadHeaderSize(header DataHeader) int {
var h2 DataHeaderV2
h2.FromDataHeader(header)
return h2.Size()
return header.Size()
}

View File

@@ -25,7 +25,9 @@ type FrameWriter struct {
}
func NewFrameWriter(conn io.Writer) *FrameWriter {
return NewFrameWriterWithConfig(conn, 128, 2*time.Millisecond, 1024)
// Larger queue size for better burst handling across all load scenarios
// With adaptive buffer pool, memory pressure is well controlled
return NewFrameWriterWithConfig(conn, 128, 2*time.Millisecond, 2048)
}
func NewFrameWriterWithConfig(conn io.Writer, maxBatch int, maxBatchWait time.Duration, queueSize int) *FrameWriter {
@@ -126,6 +128,8 @@ func (w *FrameWriter) flushBatchLocked() {
for _, frame := range w.batch {
_ = WriteFrame(w.conn, frame)
// Release pooled buffer after writing
frame.Release()
}
w.batch = w.batch[:0]