@@ -18,6 +18,8 @@ type compressedReader struct {
18
18
buf packetReader
19
19
bytesBuf []byte
20
20
mc * mysqlConn
21
+ br * bytes.Reader
22
+ zr io.ReadCloser
21
23
}
22
24
23
25
type compressedWriter struct {
@@ -48,12 +50,8 @@ func (cr *compressedReader) readNext(need int) ([]byte, error) {
48
50
}
49
51
}
50
52
51
- data := make ([]byte , need )
52
-
53
- copy (data , cr .bytesBuf [:len (data )])
54
-
55
- cr .bytesBuf = cr .bytesBuf [len (data ):]
56
-
53
+ data := cr .bytesBuf [:need ]
54
+ cr .bytesBuf = cr .bytesBuf [need :]
57
55
return data , nil
58
56
}
59
57
@@ -88,27 +86,43 @@ func (cr *compressedReader) uncompressPacket() error {
88
86
}
89
87
90
88
// write comprData to a bytes.buffer, then read it using zlib into data
91
- var b bytes.Buffer
92
- b .Write (comprData )
93
- r , err := zlib .NewReader (& b )
89
+ if cr .br == nil {
90
+ cr .br = bytes .NewReader (comprData )
91
+ } else {
92
+ cr .br .Reset (comprData )
93
+ }
94
+
95
+ resetter , ok := cr .zr .(zlib.Resetter )
94
96
95
- if r != nil {
96
- defer r .Close ()
97
+ if ok {
98
+ err := resetter .Reset (cr .br , []byte {})
99
+ if err != nil {
100
+ return err
101
+ }
102
+ } else {
103
+ cr .zr , err = zlib .NewReader (cr .br )
104
+ if err != nil {
105
+ return err
106
+ }
97
107
}
98
108
99
- if err != nil {
100
- return err
109
+ defer cr .zr .Close ()
110
+
111
+ //use existing capacity in bytesBuf if possible
112
+ offset := len (cr .bytesBuf )
113
+ if cap (cr .bytesBuf )- offset < uncompressedLength {
114
+ old := cr .bytesBuf
115
+ cr .bytesBuf = make ([]byte , offset , offset + uncompressedLength )
116
+ copy (cr .bytesBuf , old )
101
117
}
102
118
103
- data := make ([]byte , uncompressedLength )
119
+ data := cr .bytesBuf [offset : offset + uncompressedLength ]
120
+
104
121
lenRead := 0
105
122
106
123
// http://grokbase.com/t/gg/golang-nuts/146y9ppn6b/go-nuts-stream-compression-with-compress-flate
107
124
for lenRead < uncompressedLength {
108
-
109
- tmp := data [lenRead :]
110
-
111
- n , err := r .Read (tmp )
125
+ n , err := cr .zr .Read (data [lenRead :])
112
126
lenRead += n
113
127
114
128
if err == io .EOF {
@@ -152,7 +166,15 @@ func (cw *compressedWriter) Write(data []byte) (int, error) {
152
166
return 0 , err
153
167
}
154
168
155
- err = cw .writeComprPacketToNetwork (b .Bytes (), lenSmall )
169
+ // if compression expands the payload, do not compress
170
+ useData := b .Bytes ()
171
+
172
+ if len (useData ) > len (dataSmall ) {
173
+ useData = dataSmall
174
+ lenSmall = 0
175
+ }
176
+
177
+ err = cw .writeComprPacketToNetwork (useData , lenSmall )
156
178
if err != nil {
157
179
return 0 , err
158
180
}
@@ -163,7 +185,7 @@ func (cw *compressedWriter) Write(data []byte) (int, error) {
163
185
164
186
lenSmall := len (data )
165
187
166
- // do not compress if packet is too small
188
+ // do not attempt compression if packet is too small
167
189
if lenSmall < minCompressLength {
168
190
err := cw .writeComprPacketToNetwork (data , 0 )
169
191
if err != nil {
@@ -183,7 +205,15 @@ func (cw *compressedWriter) Write(data []byte) (int, error) {
183
205
return 0 , err
184
206
}
185
207
186
- err = cw .writeComprPacketToNetwork (b .Bytes (), lenSmall )
208
+ // if compression expands the payload, do not compress
209
+ useData := b .Bytes ()
210
+
211
+ if len (useData ) > len (data ) {
212
+ useData = data
213
+ lenSmall = 0
214
+ }
215
+
216
+ err = cw .writeComprPacketToNetwork (useData , lenSmall )
187
217
188
218
if err != nil {
189
219
return 0 , err
0 commit comments