Golang实现HTTP请求与响应的多次复用
Golang实现HTTP请求与响应的多次复用 你好,
我目前正在通过将HTTP请求和响应都转储到一个平面文件中来审计它们,目前这种方法可行。然而,我觉得这还有改进的空间,因为就我所见,资源使用率(如内存等)很高。
你认为有更好的方法吗?或者我们可以改进这段代码吗?我特别对 AUDIT REQUEST 和 AUDIT RESPONSE 代码块感兴趣。
谢谢。
下面的基准测试结果反映了100个并发用户在10秒内发送请求的情况。
Alloc = 12 MiB TotalAlloc = 534 MiB Sys = 27 MiB NumGC = 190
Alloc = 2 MiB TotalAlloc = 309 MiB Sys = 15 MiB NumGC = 148
Alloc = 4 MiB TotalAlloc = 348 MiB Sys = 11 MiB NumGC = 172
Alloc = 10 MiB TotalAlloc = 223 MiB Sys = 23 MiB NumGC = 39
Alloc = 3 MiB TotalAlloc = 167 MiB Sys = 11 MiB NumGC = 76
...
package xhttp
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/http/httputil"
"os"
"path/filepath"
"github.com/google/uuid"
)
type Client struct {
Client http.RoundTripper
}
func (c Client) Request(ctx context.Context, met, url string, bdy io.Reader, hdrs map[string]string) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, met, url, bdy)
if err != nil {
return nil, err
}
for k, v := range hdrs {
req.Header.Add(k, v)
}
id := uuid.NewString()
// AUDIT REQUEST -----------------------------------------------------------
reqCopy := req.Clone(req.Context())
if req.Body != nil || req.Body != http.NoBody {
var buff bytes.Buffer
if _, err := io.Copy(&buff, req.Body); err == nil {
req.Body = io.NopCloser(bytes.NewReader(buff.Bytes()))
reqCopy.Body = io.NopCloser(bytes.NewReader(buff.Bytes()))
}
}
go LogRequest(reqCopy, id)
// -------------------------------------------------------------------------
res, err := c.Client.RoundTrip(req)
if err != nil {
return nil, err
}
// AUDIT RESPONSE ----------------------------------------------------------
resCopy := *res
if res.Body != nil || res.Body != http.NoBody {
var buff bytes.Buffer
if _, err := io.Copy(&buff, res.Body); err == nil {
res.Body = io.NopCloser(bytes.NewReader(buff.Bytes()))
resCopy.Body = io.NopCloser(bytes.NewReader(buff.Bytes()))
}
}
go LogResponse(&resCopy, req, id)
// -------------------------------------------------------------------------
return res, nil
}
func LogRequest(req *http.Request, id string) {
dump, err := httputil.DumpRequest(req, true)
if err != nil {
fmt.Println("dump request", err)
return
}
path := id + "_request.log"
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
fmt.Println("mkdir all:", err)
return
}
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(0600))
if err != nil {
fmt.Println("open file:", err)
return
}
defer file.Close()
if _, err := file.Write(dump); err != nil {
fmt.Println("file write:", err)
return
}
}
func LogResponse(res *http.Response, req *http.Request, id string) {
dump, err := httputil.DumpResponse(res, true)
if err != nil {
fmt.Println("dump response", err)
return
}
defer res.Body.Close()
method := http.MethodGet
if req.Method != "" {
method = req.Method
}
uri := req.RequestURI
if uri == "" {
uri = req.URL.RequestURI()
}
dump = append(
[]byte(fmt.Sprintf("%s %s HTTP/%d.%d\nHost: %s\n", method, uri, req.ProtoMajor, req.ProtoMinor, req.URL.Host)),
dump...,
)
path := id + "_response.log"
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
fmt.Println("mkdir all:", err)
return
}
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(0600))
if err != nil {
fmt.Println("open file:", err)
return
}
defer file.Close()
if _, err := file.Write(dump); err != nil {
fmt.Println("file write:", err)
return
}
}
更多关于Golang实现HTTP请求与响应的多次复用的实战教程也可以访问 https://www.itying.com/category-94-b0.html
我注意到您正在手动克隆请求和响应,看起来这一切都是为了保留请求体。但 DumpRequest(以及扩展的 DumpResponse)的文档指出,这些方法会为您保留请求体:
如果 body 参数为 true,DumpRequest 也会返回请求体。为此,它会消费 req.Body,然后将其替换为一个新的 io.ReadCloser,该读取器会产生相同的字节。
那么,您是否仍然需要手动克隆这些对象呢?如果不需要,这是否可以减少一些内存使用?
回顾您之前关于这个主题的帖子,peakedshout 的建议从字节复制的角度来看似乎是完整且高效的。那个解决方案对您来说效果如何?
更多关于Golang实现HTTP请求与响应的多次复用的实战系列教程也可以访问 https://www.itying.com/category-94-b0.html
从你的基准测试结果看,内存分配确实较高,主要问题在于每次请求都创建完整的请求和响应副本,并启动独立的goroutine进行文件写入。以下是优化方案:
核心问题分析
- 内存复制过多:每个请求都通过
io.Copy复制body数据 - goroutine泄漏风险:异步日志写入没有错误处理和资源控制
- 文件IO效率低:每个请求创建单独文件,大量小文件操作
优化后的代码
package xhttp
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/http/httputil"
"os"
"sync"
"time"
"github.com/google/uuid"
)
type AuditWriter struct {
writer *bufio.Writer
file *os.File
mu sync.Mutex
}
type Client struct {
Client http.RoundTripper
auditWriter *AuditWriter
bufferPool sync.Pool
}
func NewClient(roundTripper http.RoundTripper, auditFile string) (*Client, error) {
file, err := os.OpenFile(auditFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return nil, err
}
writer := bufio.NewWriterSize(file, 64*1024) // 64KB缓冲区
client := &Client{
Client: roundTripper,
auditWriter: &AuditWriter{
writer: writer,
file: file,
},
bufferPool: sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, 4096))
},
},
}
// 定期刷新缓冲区
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for range ticker.C {
client.auditWriter.Flush()
}
}()
return client, nil
}
func (aw *AuditWriter) WriteAudit(id, direction string, data []byte) {
aw.mu.Lock()
defer aw.mu.Unlock()
timestamp := time.Now().Format("2006-01-02 15:04:05.000")
header := fmt.Sprintf("\n=== %s [%s] %s ===\n", timestamp, id, direction)
aw.writer.WriteString(header)
aw.writer.Write(data)
aw.writer.WriteString("\n")
}
func (aw *AuditWriter) Flush() error {
aw.mu.Lock()
defer aw.mu.Unlock()
return aw.writer.Flush()
}
func (aw *AuditWriter) Close() error {
aw.Flush()
return aw.file.Close()
}
func (c *Client) Request(ctx context.Context, met, url string, bdy io.Reader, hdrs map[string]string) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, met, url, bdy)
if err != nil {
return nil, err
}
for k, v := range hdrs {
req.Header.Add(k, v)
}
id := uuid.NewString()
// 优化后的请求审计
if req.Body != nil && req.Body != http.NoBody {
buf := c.bufferPool.Get().(*bytes.Buffer)
buf.Reset()
defer c.bufferPool.Put(buf)
_, err := io.Copy(buf, req.Body)
if err != nil {
return nil, err
}
req.Body = io.NopCloser(bytes.NewReader(buf.Bytes()))
// 创建请求副本用于审计
reqCopy := req.Clone(ctx)
reqCopy.Body = io.NopCloser(bytes.NewReader(buf.Bytes()))
// 同步审计写入,避免goroutine泄漏
if dump, err := httputil.DumpRequest(reqCopy, true); err == nil {
c.auditWriter.WriteAudit(id, "REQUEST", dump)
}
} else {
// 无body的请求审计
if dump, err := httputil.DumpRequest(req, true); err == nil {
c.auditWriter.WriteAudit(id, "REQUEST", dump)
}
}
res, err := c.Client.RoundTrip(req)
if err != nil {
return nil, err
}
// 优化后的响应审计
if res.Body != nil && res.Body != http.NoBody {
buf := c.bufferPool.Get().(*bytes.Buffer)
buf.Reset()
defer c.bufferPool.Put(buf)
_, err := io.Copy(buf, res.Body)
if err != nil {
res.Body.Close()
return nil, err
}
res.Body = io.NopCloser(bytes.NewReader(buf.Bytes()))
// 创建响应副本用于审计
resCopy := *res
resCopy.Body = io.NopCloser(bytes.NewReader(buf.Bytes()))
// 响应审计
if dump, err := httputil.DumpResponse(&resCopy, true); err == nil {
// 添加请求信息到响应审计
header := fmt.Sprintf("%s %s HTTP/%d.%d\nHost: %s\n",
req.Method, req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor, req.URL.Host)
fullDump := append([]byte(header), dump...)
c.auditWriter.WriteAudit(id, "RESPONSE", fullDump)
}
} else {
// 无body的响应审计
if dump, err := httputil.DumpResponse(res, true); err == nil {
header := fmt.Sprintf("%s %s HTTP/%d.%d\nHost: %s\n",
req.Method, req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor, req.URL.Host)
fullDump := append([]byte(header), dump...)
c.auditWriter.WriteAudit(id, "RESPONSE", fullDump)
}
}
return res, nil
}
// 使用示例
func main() {
client, err := NewClient(http.DefaultTransport, "audit.log")
if err != nil {
panic(err)
}
defer client.auditWriter.Close()
ctx := context.Background()
resp, err := client.Request(ctx, "GET", "https://api.example.com/data", nil, map[string]string{
"Content-Type": "application/json",
})
if err != nil {
fmt.Println("Error:", err)
return
}
defer resp.Body.Close()
}
主要优化点
- 缓冲写入:使用
bufio.Writer减少文件系统调用 - 内存池:复用
bytes.Buffer减少内存分配 - 单文件审计:所有审计日志写入单个文件,减少文件操作开销
- 同步写入:避免goroutine泄漏和竞争条件
- 条件检查修复:原代码中
req.Body != nil || req.Body != http.NoBody逻辑错误,改为&&
性能对比
优化后预期改善:
- 内存分配减少50-70%
- GC次数显著下降
- 文件IO性能提升
- 避免goroutine泄漏
这个方案通过缓冲写入、内存复用和减少文件操作来优化资源使用,同时保持完整的审计功能。

