mirror of
https://github.com/Gouryella/drip.git
synced 2026-02-27 14:50:52 +00:00
feat(tunnel): switch to yamux stream proxying and connection pooling
- Introduce pooled tunnel sessions (TunnelID/DataConnect) on client/server - Proxy HTTP/HTTPS via raw HTTP over yamux streams; pipe TCP streams directly - Move UI/stats into internal/shared; refactor CLI tunnel helpers; drop msgpack/hpack legacy
This commit is contained in:
@@ -1,73 +0,0 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AdaptiveBufferPool manages reusable buffers of different sizes
|
||||
// This eliminates the massive memory allocation overhead seen in profiling
|
||||
type AdaptiveBufferPool struct {
|
||||
// Large buffers for streaming threshold (1MB)
|
||||
largePool *sync.Pool
|
||||
|
||||
// Medium buffers for temporary reads (32KB)
|
||||
mediumPool *sync.Pool
|
||||
}
|
||||
|
||||
const (
|
||||
// LargeBufferSize is 1MB for streaming threshold
|
||||
LargeBufferSize = 1 * 1024 * 1024
|
||||
|
||||
// MediumBufferSize is 32KB for temporary reads
|
||||
MediumBufferSize = 32 * 1024
|
||||
)
|
||||
|
||||
// NewAdaptiveBufferPool creates a new adaptive buffer pool
|
||||
func NewAdaptiveBufferPool() *AdaptiveBufferPool {
|
||||
return &AdaptiveBufferPool{
|
||||
largePool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, LargeBufferSize)
|
||||
return &buf
|
||||
},
|
||||
},
|
||||
mediumPool: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, MediumBufferSize)
|
||||
return &buf
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetLarge returns a large buffer (1MB) from the pool
|
||||
// The returned buffer should be returned via PutLarge when done
|
||||
func (p *AdaptiveBufferPool) GetLarge() *[]byte {
|
||||
return p.largePool.Get().(*[]byte)
|
||||
}
|
||||
|
||||
// PutLarge returns a large buffer to the pool for reuse
|
||||
func (p *AdaptiveBufferPool) PutLarge(buf *[]byte) {
|
||||
if buf == nil {
|
||||
return
|
||||
}
|
||||
// Reset to full capacity to allow reuse
|
||||
*buf = (*buf)[:cap(*buf)]
|
||||
p.largePool.Put(buf)
|
||||
}
|
||||
|
||||
// GetMedium returns a medium buffer (32KB) from the pool
|
||||
// The returned buffer should be returned via PutMedium when done
|
||||
func (p *AdaptiveBufferPool) GetMedium() *[]byte {
|
||||
return p.mediumPool.Get().(*[]byte)
|
||||
}
|
||||
|
||||
// PutMedium returns a medium buffer to the pool for reuse
|
||||
func (p *AdaptiveBufferPool) PutMedium(buf *[]byte) {
|
||||
if buf == nil {
|
||||
return
|
||||
}
|
||||
// Reset to full capacity to allow reuse
|
||||
*buf = (*buf)[:cap(*buf)]
|
||||
p.mediumPool.Put(buf)
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// HeaderPool manages a pool of http.Header objects for reuse.
|
||||
type HeaderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewHeaderPool creates a new header pool
|
||||
func NewHeaderPool() *HeaderPool {
|
||||
return &HeaderPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make(http.Header, 12)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a header from the pool.
|
||||
func (p *HeaderPool) Get() http.Header {
|
||||
h := p.pool.Get().(http.Header)
|
||||
for k := range h {
|
||||
delete(h, k)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Put returns a header to the pool.
|
||||
func (p *HeaderPool) Put(h http.Header) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
p.pool.Put(h)
|
||||
}
|
||||
|
||||
// Clone creates a copy of src into dst, reusing dst's underlying storage
|
||||
// This is more efficient than creating a new header from scratch
|
||||
func (p *HeaderPool) Clone(dst, src http.Header) {
|
||||
// Clear dst first
|
||||
for k := range dst {
|
||||
delete(dst, k)
|
||||
}
|
||||
|
||||
// Copy all headers from src to dst
|
||||
for k, vv := range src {
|
||||
// Allocate new slice with exact capacity to avoid over-allocation
|
||||
dst[k] = make([]string, len(vv))
|
||||
copy(dst[k], vv)
|
||||
}
|
||||
}
|
||||
|
||||
// CloneWithExtra clones src into dst and adds/overwrites extra headers
|
||||
// This is optimized for the common pattern of cloning + adding Host header
|
||||
func (p *HeaderPool) CloneWithExtra(dst, src http.Header, extraKey, extraValue string) {
|
||||
// Clear dst first
|
||||
for k := range dst {
|
||||
delete(dst, k)
|
||||
}
|
||||
|
||||
// Copy all headers from src to dst
|
||||
for k, vv := range src {
|
||||
dst[k] = make([]string, len(vv))
|
||||
copy(dst[k], vv)
|
||||
}
|
||||
|
||||
// Set extra header (overwrite if exists)
|
||||
dst.Set(extraKey, extraValue)
|
||||
}
|
||||
|
||||
// globalHeaderPool is a package-level pool for convenience
|
||||
var globalHeaderPool = NewHeaderPool()
|
||||
|
||||
// GetHeader retrieves a header from the global pool
|
||||
func GetHeader() http.Header {
|
||||
return globalHeaderPool.Get()
|
||||
}
|
||||
|
||||
// PutHeader returns a header to the global pool
|
||||
func PutHeader(h http.Header) {
|
||||
globalHeaderPool.Put(h)
|
||||
}
|
||||
Reference in New Issue
Block a user