1 |
747 |
jeremybenn |
// Copyright 2009 The Go Authors. All rights reserved.
|
2 |
|
|
// Use of this source code is governed by a BSD-style
|
3 |
|
|
// license that can be found in the LICENSE file.
|
4 |
|
|
|
5 |
|
|
package gob
|
6 |
|
|
|
7 |
|
|
// TODO(rsc): When garbage collector changes, revisit
|
8 |
|
|
// the allocations in this file that use unsafe.Pointer.
|
9 |
|
|
|
10 |
|
|
import (
|
11 |
|
|
"bytes"
|
12 |
|
|
"errors"
|
13 |
|
|
"io"
|
14 |
|
|
"math"
|
15 |
|
|
"reflect"
|
16 |
|
|
"unsafe"
|
17 |
|
|
)
|
18 |
|
|
|
19 |
|
|
var (
|
20 |
|
|
errBadUint = errors.New("gob: encoded unsigned integer out of range")
|
21 |
|
|
errBadType = errors.New("gob: unknown type id or corrupted data")
|
22 |
|
|
errRange = errors.New("gob: bad data: field numbers out of bounds")
|
23 |
|
|
)
|
24 |
|
|
|
25 |
|
|
// decoderState is the execution state of an instance of the decoder. A new state
|
26 |
|
|
// is created for nested objects.
|
27 |
|
|
type decoderState struct {
|
28 |
|
|
dec *Decoder
|
29 |
|
|
// The buffer is stored with an extra indirection because it may be replaced
|
30 |
|
|
// if we load a type during decode (when reading an interface value).
|
31 |
|
|
b *bytes.Buffer
|
32 |
|
|
fieldnum int // the last field number read.
|
33 |
|
|
buf []byte
|
34 |
|
|
next *decoderState // for free list
|
35 |
|
|
}
|
36 |
|
|
|
37 |
|
|
// We pass the bytes.Buffer separately for easier testing of the infrastructure
|
38 |
|
|
// without requiring a full Decoder.
|
39 |
|
|
func (dec *Decoder) newDecoderState(buf *bytes.Buffer) *decoderState {
|
40 |
|
|
d := dec.freeList
|
41 |
|
|
if d == nil {
|
42 |
|
|
d = new(decoderState)
|
43 |
|
|
d.dec = dec
|
44 |
|
|
d.buf = make([]byte, uint64Size)
|
45 |
|
|
} else {
|
46 |
|
|
dec.freeList = d.next
|
47 |
|
|
}
|
48 |
|
|
d.b = buf
|
49 |
|
|
return d
|
50 |
|
|
}
|
51 |
|
|
|
52 |
|
|
func (dec *Decoder) freeDecoderState(d *decoderState) {
|
53 |
|
|
d.next = dec.freeList
|
54 |
|
|
dec.freeList = d
|
55 |
|
|
}
|
56 |
|
|
|
57 |
|
|
func overflow(name string) error {
|
58 |
|
|
return errors.New(`value for "` + name + `" out of range`)
|
59 |
|
|
}
|
60 |
|
|
|
61 |
|
|
// decodeUintReader reads an encoded unsigned integer from an io.Reader.
|
62 |
|
|
// Used only by the Decoder to read the message length.
|
63 |
|
|
func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err error) {
|
64 |
|
|
width = 1
|
65 |
|
|
_, err = r.Read(buf[0:width])
|
66 |
|
|
if err != nil {
|
67 |
|
|
return
|
68 |
|
|
}
|
69 |
|
|
b := buf[0]
|
70 |
|
|
if b <= 0x7f {
|
71 |
|
|
return uint64(b), width, nil
|
72 |
|
|
}
|
73 |
|
|
n := -int(int8(b))
|
74 |
|
|
if n > uint64Size {
|
75 |
|
|
err = errBadUint
|
76 |
|
|
return
|
77 |
|
|
}
|
78 |
|
|
width, err = io.ReadFull(r, buf[0:n])
|
79 |
|
|
if err != nil {
|
80 |
|
|
if err == io.EOF {
|
81 |
|
|
err = io.ErrUnexpectedEOF
|
82 |
|
|
}
|
83 |
|
|
return
|
84 |
|
|
}
|
85 |
|
|
// Could check that the high byte is zero but it's not worth it.
|
86 |
|
|
for _, b := range buf[0:width] {
|
87 |
|
|
x = x<<8 | uint64(b)
|
88 |
|
|
}
|
89 |
|
|
width++ // +1 for length byte
|
90 |
|
|
return
|
91 |
|
|
}
|
92 |
|
|
|
93 |
|
|
// decodeUint reads an encoded unsigned integer from state.r.
|
94 |
|
|
// Does not check for overflow.
|
95 |
|
|
func (state *decoderState) decodeUint() (x uint64) {
|
96 |
|
|
b, err := state.b.ReadByte()
|
97 |
|
|
if err != nil {
|
98 |
|
|
error_(err)
|
99 |
|
|
}
|
100 |
|
|
if b <= 0x7f {
|
101 |
|
|
return uint64(b)
|
102 |
|
|
}
|
103 |
|
|
n := -int(int8(b))
|
104 |
|
|
if n > uint64Size {
|
105 |
|
|
error_(errBadUint)
|
106 |
|
|
}
|
107 |
|
|
width, err := state.b.Read(state.buf[0:n])
|
108 |
|
|
if err != nil {
|
109 |
|
|
error_(err)
|
110 |
|
|
}
|
111 |
|
|
// Don't need to check error; it's safe to loop regardless.
|
112 |
|
|
// Could check that the high byte is zero but it's not worth it.
|
113 |
|
|
for _, b := range state.buf[0:width] {
|
114 |
|
|
x = x<<8 | uint64(b)
|
115 |
|
|
}
|
116 |
|
|
return x
|
117 |
|
|
}
|
118 |
|
|
|
119 |
|
|
// decodeInt reads an encoded signed integer from state.r.
|
120 |
|
|
// Does not check for overflow.
|
121 |
|
|
func (state *decoderState) decodeInt() int64 {
|
122 |
|
|
x := state.decodeUint()
|
123 |
|
|
if x&1 != 0 {
|
124 |
|
|
return ^int64(x >> 1)
|
125 |
|
|
}
|
126 |
|
|
return int64(x >> 1)
|
127 |
|
|
}
|
128 |
|
|
|
129 |
|
|
// decOp is the signature of a decoding operator for a given type.
|
130 |
|
|
type decOp func(i *decInstr, state *decoderState, p unsafe.Pointer)
|
131 |
|
|
|
132 |
|
|
// The 'instructions' of the decoding machine
|
133 |
|
|
type decInstr struct {
|
134 |
|
|
op decOp
|
135 |
|
|
field int // field number of the wire type
|
136 |
|
|
indir int // how many pointer indirections to reach the value in the struct
|
137 |
|
|
offset uintptr // offset in the structure of the field to encode
|
138 |
|
|
ovfl error // error message for overflow/underflow (for arrays, of the elements)
|
139 |
|
|
}
|
140 |
|
|
|
141 |
|
|
// Since the encoder writes no zeros, if we arrive at a decoder we have
|
142 |
|
|
// a value to extract and store. The field number has already been read
|
143 |
|
|
// (it's how we knew to call this decoder).
|
144 |
|
|
// Each decoder is responsible for handling any indirections associated
|
145 |
|
|
// with the data structure. If any pointer so reached is nil, allocation must
|
146 |
|
|
// be done.
|
147 |
|
|
|
148 |
|
|
// Walk the pointer hierarchy, allocating if we find a nil. Stop one before the end.
|
149 |
|
|
func decIndirect(p unsafe.Pointer, indir int) unsafe.Pointer {
|
150 |
|
|
for ; indir > 1; indir-- {
|
151 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
152 |
|
|
// Allocation required
|
153 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(unsafe.Pointer))
|
154 |
|
|
}
|
155 |
|
|
p = *(*unsafe.Pointer)(p)
|
156 |
|
|
}
|
157 |
|
|
return p
|
158 |
|
|
}
|
159 |
|
|
|
160 |
|
|
// ignoreUint discards a uint value with no destination.
|
161 |
|
|
func ignoreUint(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
162 |
|
|
state.decodeUint()
|
163 |
|
|
}
|
164 |
|
|
|
165 |
|
|
// ignoreTwoUints discards a uint value with no destination. It's used to skip
|
166 |
|
|
// complex values.
|
167 |
|
|
func ignoreTwoUints(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
168 |
|
|
state.decodeUint()
|
169 |
|
|
state.decodeUint()
|
170 |
|
|
}
|
171 |
|
|
|
172 |
|
|
// decBool decodes a uint and stores it as a boolean through p.
|
173 |
|
|
func decBool(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
174 |
|
|
if i.indir > 0 {
|
175 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
176 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(bool))
|
177 |
|
|
}
|
178 |
|
|
p = *(*unsafe.Pointer)(p)
|
179 |
|
|
}
|
180 |
|
|
*(*bool)(p) = state.decodeUint() != 0
|
181 |
|
|
}
|
182 |
|
|
|
183 |
|
|
// decInt8 decodes an integer and stores it as an int8 through p.
|
184 |
|
|
func decInt8(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
185 |
|
|
if i.indir > 0 {
|
186 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
187 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int8))
|
188 |
|
|
}
|
189 |
|
|
p = *(*unsafe.Pointer)(p)
|
190 |
|
|
}
|
191 |
|
|
v := state.decodeInt()
|
192 |
|
|
if v < math.MinInt8 || math.MaxInt8 < v {
|
193 |
|
|
error_(i.ovfl)
|
194 |
|
|
} else {
|
195 |
|
|
*(*int8)(p) = int8(v)
|
196 |
|
|
}
|
197 |
|
|
}
|
198 |
|
|
|
199 |
|
|
// decUint8 decodes an unsigned integer and stores it as a uint8 through p.
|
200 |
|
|
func decUint8(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
201 |
|
|
if i.indir > 0 {
|
202 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
203 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint8))
|
204 |
|
|
}
|
205 |
|
|
p = *(*unsafe.Pointer)(p)
|
206 |
|
|
}
|
207 |
|
|
v := state.decodeUint()
|
208 |
|
|
if math.MaxUint8 < v {
|
209 |
|
|
error_(i.ovfl)
|
210 |
|
|
} else {
|
211 |
|
|
*(*uint8)(p) = uint8(v)
|
212 |
|
|
}
|
213 |
|
|
}
|
214 |
|
|
|
215 |
|
|
// decInt16 decodes an integer and stores it as an int16 through p.
|
216 |
|
|
func decInt16(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
217 |
|
|
if i.indir > 0 {
|
218 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
219 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int16))
|
220 |
|
|
}
|
221 |
|
|
p = *(*unsafe.Pointer)(p)
|
222 |
|
|
}
|
223 |
|
|
v := state.decodeInt()
|
224 |
|
|
if v < math.MinInt16 || math.MaxInt16 < v {
|
225 |
|
|
error_(i.ovfl)
|
226 |
|
|
} else {
|
227 |
|
|
*(*int16)(p) = int16(v)
|
228 |
|
|
}
|
229 |
|
|
}
|
230 |
|
|
|
231 |
|
|
// decUint16 decodes an unsigned integer and stores it as a uint16 through p.
|
232 |
|
|
func decUint16(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
233 |
|
|
if i.indir > 0 {
|
234 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
235 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint16))
|
236 |
|
|
}
|
237 |
|
|
p = *(*unsafe.Pointer)(p)
|
238 |
|
|
}
|
239 |
|
|
v := state.decodeUint()
|
240 |
|
|
if math.MaxUint16 < v {
|
241 |
|
|
error_(i.ovfl)
|
242 |
|
|
} else {
|
243 |
|
|
*(*uint16)(p) = uint16(v)
|
244 |
|
|
}
|
245 |
|
|
}
|
246 |
|
|
|
247 |
|
|
// decInt32 decodes an integer and stores it as an int32 through p.
|
248 |
|
|
func decInt32(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
249 |
|
|
if i.indir > 0 {
|
250 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
251 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int32))
|
252 |
|
|
}
|
253 |
|
|
p = *(*unsafe.Pointer)(p)
|
254 |
|
|
}
|
255 |
|
|
v := state.decodeInt()
|
256 |
|
|
if v < math.MinInt32 || math.MaxInt32 < v {
|
257 |
|
|
error_(i.ovfl)
|
258 |
|
|
} else {
|
259 |
|
|
*(*int32)(p) = int32(v)
|
260 |
|
|
}
|
261 |
|
|
}
|
262 |
|
|
|
263 |
|
|
// decUint32 decodes an unsigned integer and stores it as a uint32 through p.
|
264 |
|
|
func decUint32(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
265 |
|
|
if i.indir > 0 {
|
266 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
267 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint32))
|
268 |
|
|
}
|
269 |
|
|
p = *(*unsafe.Pointer)(p)
|
270 |
|
|
}
|
271 |
|
|
v := state.decodeUint()
|
272 |
|
|
if math.MaxUint32 < v {
|
273 |
|
|
error_(i.ovfl)
|
274 |
|
|
} else {
|
275 |
|
|
*(*uint32)(p) = uint32(v)
|
276 |
|
|
}
|
277 |
|
|
}
|
278 |
|
|
|
279 |
|
|
// decInt64 decodes an integer and stores it as an int64 through p.
|
280 |
|
|
func decInt64(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
281 |
|
|
if i.indir > 0 {
|
282 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
283 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(int64))
|
284 |
|
|
}
|
285 |
|
|
p = *(*unsafe.Pointer)(p)
|
286 |
|
|
}
|
287 |
|
|
*(*int64)(p) = int64(state.decodeInt())
|
288 |
|
|
}
|
289 |
|
|
|
290 |
|
|
// decUint64 decodes an unsigned integer and stores it as a uint64 through p.
|
291 |
|
|
func decUint64(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
292 |
|
|
if i.indir > 0 {
|
293 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
294 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint64))
|
295 |
|
|
}
|
296 |
|
|
p = *(*unsafe.Pointer)(p)
|
297 |
|
|
}
|
298 |
|
|
*(*uint64)(p) = uint64(state.decodeUint())
|
299 |
|
|
}
|
300 |
|
|
|
301 |
|
|
// Floating-point numbers are transmitted as uint64s holding the bits
|
302 |
|
|
// of the underlying representation. They are sent byte-reversed, with
|
303 |
|
|
// the exponent end coming out first, so integer floating point numbers
|
304 |
|
|
// (for example) transmit more compactly. This routine does the
|
305 |
|
|
// unswizzling.
|
306 |
|
|
func floatFromBits(u uint64) float64 {
|
307 |
|
|
var v uint64
|
308 |
|
|
for i := 0; i < 8; i++ {
|
309 |
|
|
v <<= 8
|
310 |
|
|
v |= u & 0xFF
|
311 |
|
|
u >>= 8
|
312 |
|
|
}
|
313 |
|
|
return math.Float64frombits(v)
|
314 |
|
|
}
|
315 |
|
|
|
316 |
|
|
// storeFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
|
317 |
|
|
// number, and stores it through p. It's a helper function for float32 and complex64.
|
318 |
|
|
func storeFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
319 |
|
|
v := floatFromBits(state.decodeUint())
|
320 |
|
|
av := v
|
321 |
|
|
if av < 0 {
|
322 |
|
|
av = -av
|
323 |
|
|
}
|
324 |
|
|
// +Inf is OK in both 32- and 64-bit floats. Underflow is always OK.
|
325 |
|
|
if math.MaxFloat32 < av && av <= math.MaxFloat64 {
|
326 |
|
|
error_(i.ovfl)
|
327 |
|
|
} else {
|
328 |
|
|
*(*float32)(p) = float32(v)
|
329 |
|
|
}
|
330 |
|
|
}
|
331 |
|
|
|
332 |
|
|
// decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
|
333 |
|
|
// number, and stores it through p.
|
334 |
|
|
func decFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
335 |
|
|
if i.indir > 0 {
|
336 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
337 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(float32))
|
338 |
|
|
}
|
339 |
|
|
p = *(*unsafe.Pointer)(p)
|
340 |
|
|
}
|
341 |
|
|
storeFloat32(i, state, p)
|
342 |
|
|
}
|
343 |
|
|
|
344 |
|
|
// decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point
|
345 |
|
|
// number, and stores it through p.
|
346 |
|
|
func decFloat64(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
347 |
|
|
if i.indir > 0 {
|
348 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
349 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(float64))
|
350 |
|
|
}
|
351 |
|
|
p = *(*unsafe.Pointer)(p)
|
352 |
|
|
}
|
353 |
|
|
*(*float64)(p) = floatFromBits(uint64(state.decodeUint()))
|
354 |
|
|
}
|
355 |
|
|
|
356 |
|
|
// decComplex64 decodes a pair of unsigned integers, treats them as a
|
357 |
|
|
// pair of floating point numbers, and stores them as a complex64 through p.
|
358 |
|
|
// The real part comes first.
|
359 |
|
|
func decComplex64(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
360 |
|
|
if i.indir > 0 {
|
361 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
362 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex64))
|
363 |
|
|
}
|
364 |
|
|
p = *(*unsafe.Pointer)(p)
|
365 |
|
|
}
|
366 |
|
|
storeFloat32(i, state, p)
|
367 |
|
|
storeFloat32(i, state, unsafe.Pointer(uintptr(p)+unsafe.Sizeof(float32(0))))
|
368 |
|
|
}
|
369 |
|
|
|
370 |
|
|
// decComplex128 decodes a pair of unsigned integers, treats them as a
|
371 |
|
|
// pair of floating point numbers, and stores them as a complex128 through p.
|
372 |
|
|
// The real part comes first.
|
373 |
|
|
func decComplex128(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
374 |
|
|
if i.indir > 0 {
|
375 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
376 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex128))
|
377 |
|
|
}
|
378 |
|
|
p = *(*unsafe.Pointer)(p)
|
379 |
|
|
}
|
380 |
|
|
real := floatFromBits(uint64(state.decodeUint()))
|
381 |
|
|
imag := floatFromBits(uint64(state.decodeUint()))
|
382 |
|
|
*(*complex128)(p) = complex(real, imag)
|
383 |
|
|
}
|
384 |
|
|
|
385 |
|
|
// decUint8Slice decodes a byte slice and stores through p a slice header
|
386 |
|
|
// describing the data.
|
387 |
|
|
// uint8 slices are encoded as an unsigned count followed by the raw bytes.
|
388 |
|
|
func decUint8Slice(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
389 |
|
|
if i.indir > 0 {
|
390 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
391 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new([]uint8))
|
392 |
|
|
}
|
393 |
|
|
p = *(*unsafe.Pointer)(p)
|
394 |
|
|
}
|
395 |
|
|
n := int(state.decodeUint())
|
396 |
|
|
if n < 0 {
|
397 |
|
|
errorf("negative length decoding []byte")
|
398 |
|
|
}
|
399 |
|
|
slice := (*[]uint8)(p)
|
400 |
|
|
if cap(*slice) < n {
|
401 |
|
|
*slice = make([]uint8, n)
|
402 |
|
|
} else {
|
403 |
|
|
*slice = (*slice)[0:n]
|
404 |
|
|
}
|
405 |
|
|
if _, err := state.b.Read(*slice); err != nil {
|
406 |
|
|
errorf("error decoding []byte: %s", err)
|
407 |
|
|
}
|
408 |
|
|
}
|
409 |
|
|
|
410 |
|
|
// decString decodes byte array and stores through p a string header
|
411 |
|
|
// describing the data.
|
412 |
|
|
// Strings are encoded as an unsigned count followed by the raw bytes.
|
413 |
|
|
func decString(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
414 |
|
|
if i.indir > 0 {
|
415 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
416 |
|
|
*(*unsafe.Pointer)(p) = unsafe.Pointer(new(string))
|
417 |
|
|
}
|
418 |
|
|
p = *(*unsafe.Pointer)(p)
|
419 |
|
|
}
|
420 |
|
|
b := make([]byte, state.decodeUint())
|
421 |
|
|
state.b.Read(b)
|
422 |
|
|
// It would be a shame to do the obvious thing here,
|
423 |
|
|
// *(*string)(p) = string(b)
|
424 |
|
|
// because we've already allocated the storage and this would
|
425 |
|
|
// allocate again and copy. So we do this ugly hack, which is even
|
426 |
|
|
// even more unsafe than it looks as it depends the memory
|
427 |
|
|
// representation of a string matching the beginning of the memory
|
428 |
|
|
// representation of a byte slice (a byte slice is longer).
|
429 |
|
|
*(*string)(p) = *(*string)(unsafe.Pointer(&b))
|
430 |
|
|
}
|
431 |
|
|
|
432 |
|
|
// ignoreUint8Array skips over the data for a byte slice value with no destination.
|
433 |
|
|
func ignoreUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
434 |
|
|
b := make([]byte, state.decodeUint())
|
435 |
|
|
state.b.Read(b)
|
436 |
|
|
}
|
437 |
|
|
|
438 |
|
|
// Execution engine
|
439 |
|
|
|
440 |
|
|
// The encoder engine is an array of instructions indexed by field number of the incoming
|
441 |
|
|
// decoder. It is executed with random access according to field number.
|
442 |
|
|
type decEngine struct {
|
443 |
|
|
instr []decInstr
|
444 |
|
|
numInstr int // the number of active instructions
|
445 |
|
|
}
|
446 |
|
|
|
447 |
|
|
// allocate makes sure storage is available for an object of underlying type rtyp
|
448 |
|
|
// that is indir levels of indirection through p.
|
449 |
|
|
func allocate(rtyp reflect.Type, p uintptr, indir int) uintptr {
|
450 |
|
|
if indir == 0 {
|
451 |
|
|
return p
|
452 |
|
|
}
|
453 |
|
|
up := unsafe.Pointer(p)
|
454 |
|
|
if indir > 1 {
|
455 |
|
|
up = decIndirect(up, indir)
|
456 |
|
|
}
|
457 |
|
|
if *(*unsafe.Pointer)(up) == nil {
|
458 |
|
|
// Allocate object.
|
459 |
|
|
*(*unsafe.Pointer)(up) = unsafe.New(rtyp)
|
460 |
|
|
}
|
461 |
|
|
return *(*uintptr)(up)
|
462 |
|
|
}
|
463 |
|
|
|
464 |
|
|
// decodeSingle decodes a top-level value that is not a struct and stores it through p.
|
465 |
|
|
// Such values are preceded by a zero, making them have the memory layout of a
|
466 |
|
|
// struct field (although with an illegal field number).
|
467 |
|
|
func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, basep uintptr) (err error) {
|
468 |
|
|
state := dec.newDecoderState(&dec.buf)
|
469 |
|
|
state.fieldnum = singletonField
|
470 |
|
|
delta := int(state.decodeUint())
|
471 |
|
|
if delta != 0 {
|
472 |
|
|
errorf("decode: corrupted data: non-zero delta for singleton")
|
473 |
|
|
}
|
474 |
|
|
instr := &engine.instr[singletonField]
|
475 |
|
|
if instr.indir != ut.indir {
|
476 |
|
|
return errors.New("gob: internal error: inconsistent indirection")
|
477 |
|
|
}
|
478 |
|
|
ptr := unsafe.Pointer(basep) // offset will be zero
|
479 |
|
|
if instr.indir > 1 {
|
480 |
|
|
ptr = decIndirect(ptr, instr.indir)
|
481 |
|
|
}
|
482 |
|
|
instr.op(instr, state, ptr)
|
483 |
|
|
dec.freeDecoderState(state)
|
484 |
|
|
return nil
|
485 |
|
|
}
|
486 |
|
|
|
487 |
|
|
// decodeSingle decodes a top-level struct and stores it through p.
|
488 |
|
|
// Indir is for the value, not the type. At the time of the call it may
|
489 |
|
|
// differ from ut.indir, which was computed when the engine was built.
|
490 |
|
|
// This state cannot arise for decodeSingle, which is called directly
|
491 |
|
|
// from the user's value, not from the innards of an engine.
|
492 |
|
|
func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) {
|
493 |
|
|
p = allocate(ut.base, p, indir)
|
494 |
|
|
state := dec.newDecoderState(&dec.buf)
|
495 |
|
|
state.fieldnum = -1
|
496 |
|
|
basep := p
|
497 |
|
|
for state.b.Len() > 0 {
|
498 |
|
|
delta := int(state.decodeUint())
|
499 |
|
|
if delta < 0 {
|
500 |
|
|
errorf("decode: corrupted data: negative delta")
|
501 |
|
|
}
|
502 |
|
|
if delta == 0 { // struct terminator is zero delta fieldnum
|
503 |
|
|
break
|
504 |
|
|
}
|
505 |
|
|
fieldnum := state.fieldnum + delta
|
506 |
|
|
if fieldnum >= len(engine.instr) {
|
507 |
|
|
error_(errRange)
|
508 |
|
|
break
|
509 |
|
|
}
|
510 |
|
|
instr := &engine.instr[fieldnum]
|
511 |
|
|
p := unsafe.Pointer(basep + instr.offset)
|
512 |
|
|
if instr.indir > 1 {
|
513 |
|
|
p = decIndirect(p, instr.indir)
|
514 |
|
|
}
|
515 |
|
|
instr.op(instr, state, p)
|
516 |
|
|
state.fieldnum = fieldnum
|
517 |
|
|
}
|
518 |
|
|
dec.freeDecoderState(state)
|
519 |
|
|
}
|
520 |
|
|
|
521 |
|
|
// ignoreStruct discards the data for a struct with no destination.
|
522 |
|
|
func (dec *Decoder) ignoreStruct(engine *decEngine) {
|
523 |
|
|
state := dec.newDecoderState(&dec.buf)
|
524 |
|
|
state.fieldnum = -1
|
525 |
|
|
for state.b.Len() > 0 {
|
526 |
|
|
delta := int(state.decodeUint())
|
527 |
|
|
if delta < 0 {
|
528 |
|
|
errorf("ignore decode: corrupted data: negative delta")
|
529 |
|
|
}
|
530 |
|
|
if delta == 0 { // struct terminator is zero delta fieldnum
|
531 |
|
|
break
|
532 |
|
|
}
|
533 |
|
|
fieldnum := state.fieldnum + delta
|
534 |
|
|
if fieldnum >= len(engine.instr) {
|
535 |
|
|
error_(errRange)
|
536 |
|
|
}
|
537 |
|
|
instr := &engine.instr[fieldnum]
|
538 |
|
|
instr.op(instr, state, unsafe.Pointer(nil))
|
539 |
|
|
state.fieldnum = fieldnum
|
540 |
|
|
}
|
541 |
|
|
dec.freeDecoderState(state)
|
542 |
|
|
}
|
543 |
|
|
|
544 |
|
|
// ignoreSingle discards the data for a top-level non-struct value with no
|
545 |
|
|
// destination. It's used when calling Decode with a nil value.
|
546 |
|
|
func (dec *Decoder) ignoreSingle(engine *decEngine) {
|
547 |
|
|
state := dec.newDecoderState(&dec.buf)
|
548 |
|
|
state.fieldnum = singletonField
|
549 |
|
|
delta := int(state.decodeUint())
|
550 |
|
|
if delta != 0 {
|
551 |
|
|
errorf("decode: corrupted data: non-zero delta for singleton")
|
552 |
|
|
}
|
553 |
|
|
instr := &engine.instr[singletonField]
|
554 |
|
|
instr.op(instr, state, unsafe.Pointer(nil))
|
555 |
|
|
dec.freeDecoderState(state)
|
556 |
|
|
}
|
557 |
|
|
|
558 |
|
|
// decodeArrayHelper does the work for decoding arrays and slices.
|
559 |
|
|
func (dec *Decoder) decodeArrayHelper(state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl error) {
|
560 |
|
|
instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl}
|
561 |
|
|
for i := 0; i < length; i++ {
|
562 |
|
|
up := unsafe.Pointer(p)
|
563 |
|
|
if elemIndir > 1 {
|
564 |
|
|
up = decIndirect(up, elemIndir)
|
565 |
|
|
}
|
566 |
|
|
elemOp(instr, state, up)
|
567 |
|
|
p += uintptr(elemWid)
|
568 |
|
|
}
|
569 |
|
|
}
|
570 |
|
|
|
571 |
|
|
// decodeArray decodes an array and stores it through p, that is, p points to the zeroth element.
|
572 |
|
|
// The length is an unsigned integer preceding the elements. Even though the length is redundant
|
573 |
|
|
// (it's part of the type), it's a useful check and is included in the encoding.
|
574 |
|
|
func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl error) {
|
575 |
|
|
if indir > 0 {
|
576 |
|
|
p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect
|
577 |
|
|
}
|
578 |
|
|
if n := state.decodeUint(); n != uint64(length) {
|
579 |
|
|
errorf("length mismatch in decodeArray")
|
580 |
|
|
}
|
581 |
|
|
dec.decodeArrayHelper(state, p, elemOp, elemWid, length, elemIndir, ovfl)
|
582 |
|
|
}
|
583 |
|
|
|
584 |
|
|
// decodeIntoValue is a helper for map decoding. Since maps are decoded using reflection,
|
585 |
|
|
// unlike the other items we can't use a pointer directly.
|
586 |
|
|
func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl error) reflect.Value {
|
587 |
|
|
instr := &decInstr{op, 0, indir, 0, ovfl}
|
588 |
|
|
up := unsafe.Pointer(unsafeAddr(v))
|
589 |
|
|
if indir > 1 {
|
590 |
|
|
up = decIndirect(up, indir)
|
591 |
|
|
}
|
592 |
|
|
op(instr, state, up)
|
593 |
|
|
return v
|
594 |
|
|
}
|
595 |
|
|
|
596 |
|
|
// decodeMap decodes a map and stores its header through p.
|
597 |
|
|
// Maps are encoded as a length followed by key:value pairs.
|
598 |
|
|
// Because the internals of maps are not visible to us, we must
|
599 |
|
|
// use reflection rather than pointer magic.
|
600 |
|
|
func (dec *Decoder) decodeMap(mtyp reflect.Type, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl error) {
|
601 |
|
|
if indir > 0 {
|
602 |
|
|
p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect
|
603 |
|
|
}
|
604 |
|
|
up := unsafe.Pointer(p)
|
605 |
|
|
if *(*unsafe.Pointer)(up) == nil { // maps are represented as a pointer in the runtime
|
606 |
|
|
// Allocate map.
|
607 |
|
|
*(*unsafe.Pointer)(up) = unsafe.Pointer(reflect.MakeMap(mtyp).Pointer())
|
608 |
|
|
}
|
609 |
|
|
// Maps cannot be accessed by moving addresses around the way
|
610 |
|
|
// that slices etc. can. We must recover a full reflection value for
|
611 |
|
|
// the iteration.
|
612 |
|
|
v := reflect.ValueOf(unsafe.Unreflect(mtyp, unsafe.Pointer(p)))
|
613 |
|
|
n := int(state.decodeUint())
|
614 |
|
|
for i := 0; i < n; i++ {
|
615 |
|
|
key := decodeIntoValue(state, keyOp, keyIndir, allocValue(mtyp.Key()), ovfl)
|
616 |
|
|
elem := decodeIntoValue(state, elemOp, elemIndir, allocValue(mtyp.Elem()), ovfl)
|
617 |
|
|
v.SetMapIndex(key, elem)
|
618 |
|
|
}
|
619 |
|
|
}
|
620 |
|
|
|
621 |
|
|
// ignoreArrayHelper does the work for discarding arrays and slices.
|
622 |
|
|
func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) {
|
623 |
|
|
instr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
|
624 |
|
|
for i := 0; i < length; i++ {
|
625 |
|
|
elemOp(instr, state, nil)
|
626 |
|
|
}
|
627 |
|
|
}
|
628 |
|
|
|
629 |
|
|
// ignoreArray discards the data for an array value with no destination.
|
630 |
|
|
func (dec *Decoder) ignoreArray(state *decoderState, elemOp decOp, length int) {
|
631 |
|
|
if n := state.decodeUint(); n != uint64(length) {
|
632 |
|
|
errorf("length mismatch in ignoreArray")
|
633 |
|
|
}
|
634 |
|
|
dec.ignoreArrayHelper(state, elemOp, length)
|
635 |
|
|
}
|
636 |
|
|
|
637 |
|
|
// ignoreMap discards the data for a map value with no destination.
|
638 |
|
|
func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
|
639 |
|
|
n := int(state.decodeUint())
|
640 |
|
|
keyInstr := &decInstr{keyOp, 0, 0, 0, errors.New("no error")}
|
641 |
|
|
elemInstr := &decInstr{elemOp, 0, 0, 0, errors.New("no error")}
|
642 |
|
|
for i := 0; i < n; i++ {
|
643 |
|
|
keyOp(keyInstr, state, nil)
|
644 |
|
|
elemOp(elemInstr, state, nil)
|
645 |
|
|
}
|
646 |
|
|
}
|
647 |
|
|
|
648 |
|
|
// decodeSlice decodes a slice and stores the slice header through p.
|
649 |
|
|
// Slices are encoded as an unsigned length followed by the elements.
|
650 |
|
|
func (dec *Decoder) decodeSlice(atyp reflect.Type, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl error) {
|
651 |
|
|
n := int(uintptr(state.decodeUint()))
|
652 |
|
|
if indir > 0 {
|
653 |
|
|
up := unsafe.Pointer(p)
|
654 |
|
|
if *(*unsafe.Pointer)(up) == nil {
|
655 |
|
|
// Allocate the slice header.
|
656 |
|
|
*(*unsafe.Pointer)(up) = unsafe.Pointer(new([]unsafe.Pointer))
|
657 |
|
|
}
|
658 |
|
|
p = *(*uintptr)(up)
|
659 |
|
|
}
|
660 |
|
|
// Allocate storage for the slice elements, that is, the underlying array,
|
661 |
|
|
// if the existing slice does not have the capacity.
|
662 |
|
|
// Always write a header at p.
|
663 |
|
|
hdrp := (*reflect.SliceHeader)(unsafe.Pointer(p))
|
664 |
|
|
if hdrp.Cap < n {
|
665 |
|
|
hdrp.Data = uintptr(unsafe.NewArray(atyp.Elem(), n))
|
666 |
|
|
hdrp.Cap = n
|
667 |
|
|
}
|
668 |
|
|
hdrp.Len = n
|
669 |
|
|
dec.decodeArrayHelper(state, hdrp.Data, elemOp, elemWid, n, elemIndir, ovfl)
|
670 |
|
|
}
|
671 |
|
|
|
672 |
|
|
// ignoreSlice skips over the data for a slice value with no destination.
|
673 |
|
|
func (dec *Decoder) ignoreSlice(state *decoderState, elemOp decOp) {
|
674 |
|
|
dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint()))
|
675 |
|
|
}
|
676 |
|
|
|
677 |
|
|
// setInterfaceValue sets an interface value to a concrete value,
|
678 |
|
|
// but first it checks that the assignment will succeed.
|
679 |
|
|
func setInterfaceValue(ivalue reflect.Value, value reflect.Value) {
|
680 |
|
|
if !value.Type().AssignableTo(ivalue.Type()) {
|
681 |
|
|
errorf("cannot assign value of type %s to %s", value.Type(), ivalue.Type())
|
682 |
|
|
}
|
683 |
|
|
ivalue.Set(value)
|
684 |
|
|
}
|
685 |
|
|
|
686 |
|
|
// decodeInterface decodes an interface value and stores it through p.
|
687 |
|
|
// Interfaces are encoded as the name of a concrete type followed by a value.
|
688 |
|
|
// If the name is empty, the value is nil and no value is sent.
|
689 |
|
|
func (dec *Decoder) decodeInterface(ityp reflect.Type, state *decoderState, p uintptr, indir int) {
|
690 |
|
|
// Create a writable interface reflect.Value. We need one even for the nil case.
|
691 |
|
|
ivalue := allocValue(ityp)
|
692 |
|
|
// Read the name of the concrete type.
|
693 |
|
|
nr := state.decodeUint()
|
694 |
|
|
if nr < 0 || nr > 1<<31 { // zero is permissible for anonymous types
|
695 |
|
|
errorf("invalid type name length %d", nr)
|
696 |
|
|
}
|
697 |
|
|
b := make([]byte, nr)
|
698 |
|
|
state.b.Read(b)
|
699 |
|
|
name := string(b)
|
700 |
|
|
if name == "" {
|
701 |
|
|
// Copy the representation of the nil interface value to the target.
|
702 |
|
|
// This is horribly unsafe and special.
|
703 |
|
|
*(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData()
|
704 |
|
|
return
|
705 |
|
|
}
|
706 |
|
|
// The concrete type must be registered.
|
707 |
|
|
typ, ok := nameToConcreteType[name]
|
708 |
|
|
if !ok {
|
709 |
|
|
errorf("name not registered for interface: %q", name)
|
710 |
|
|
}
|
711 |
|
|
// Read the type id of the concrete value.
|
712 |
|
|
concreteId := dec.decodeTypeSequence(true)
|
713 |
|
|
if concreteId < 0 {
|
714 |
|
|
error_(dec.err)
|
715 |
|
|
}
|
716 |
|
|
// Byte count of value is next; we don't care what it is (it's there
|
717 |
|
|
// in case we want to ignore the value by skipping it completely).
|
718 |
|
|
state.decodeUint()
|
719 |
|
|
// Read the concrete value.
|
720 |
|
|
value := allocValue(typ)
|
721 |
|
|
dec.decodeValue(concreteId, value)
|
722 |
|
|
if dec.err != nil {
|
723 |
|
|
error_(dec.err)
|
724 |
|
|
}
|
725 |
|
|
// Allocate the destination interface value.
|
726 |
|
|
if indir > 0 {
|
727 |
|
|
p = allocate(ityp, p, 1) // All but the last level has been allocated by dec.Indirect
|
728 |
|
|
}
|
729 |
|
|
// Assign the concrete value to the interface.
|
730 |
|
|
// Tread carefully; it might not satisfy the interface.
|
731 |
|
|
setInterfaceValue(ivalue, value)
|
732 |
|
|
// Copy the representation of the interface value to the target.
|
733 |
|
|
// This is horribly unsafe and special.
|
734 |
|
|
*(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.InterfaceData()
|
735 |
|
|
}
|
736 |
|
|
|
737 |
|
|
// ignoreInterface discards the data for an interface value with no destination.
|
738 |
|
|
func (dec *Decoder) ignoreInterface(state *decoderState) {
|
739 |
|
|
// Read the name of the concrete type.
|
740 |
|
|
b := make([]byte, state.decodeUint())
|
741 |
|
|
_, err := state.b.Read(b)
|
742 |
|
|
if err != nil {
|
743 |
|
|
error_(err)
|
744 |
|
|
}
|
745 |
|
|
id := dec.decodeTypeSequence(true)
|
746 |
|
|
if id < 0 {
|
747 |
|
|
error_(dec.err)
|
748 |
|
|
}
|
749 |
|
|
// At this point, the decoder buffer contains a delimited value. Just toss it.
|
750 |
|
|
state.b.Next(int(state.decodeUint()))
|
751 |
|
|
}
|
752 |
|
|
|
753 |
|
|
// decodeGobDecoder decodes something implementing the GobDecoder interface.
|
754 |
|
|
// The data is encoded as a byte slice.
|
755 |
|
|
func (dec *Decoder) decodeGobDecoder(state *decoderState, v reflect.Value) {
|
756 |
|
|
// Read the bytes for the value.
|
757 |
|
|
b := make([]byte, state.decodeUint())
|
758 |
|
|
_, err := state.b.Read(b)
|
759 |
|
|
if err != nil {
|
760 |
|
|
error_(err)
|
761 |
|
|
}
|
762 |
|
|
// We know it's a GobDecoder, so just call the method directly.
|
763 |
|
|
err = v.Interface().(GobDecoder).GobDecode(b)
|
764 |
|
|
if err != nil {
|
765 |
|
|
error_(err)
|
766 |
|
|
}
|
767 |
|
|
}
|
768 |
|
|
|
769 |
|
|
// ignoreGobDecoder discards the data for a GobDecoder value with no destination.
|
770 |
|
|
func (dec *Decoder) ignoreGobDecoder(state *decoderState) {
|
771 |
|
|
// Read the bytes for the value.
|
772 |
|
|
b := make([]byte, state.decodeUint())
|
773 |
|
|
_, err := state.b.Read(b)
|
774 |
|
|
if err != nil {
|
775 |
|
|
error_(err)
|
776 |
|
|
}
|
777 |
|
|
}
|
778 |
|
|
|
779 |
|
|
// Index by Go types.
|
780 |
|
|
var decOpTable = [...]decOp{
|
781 |
|
|
reflect.Bool: decBool,
|
782 |
|
|
reflect.Int8: decInt8,
|
783 |
|
|
reflect.Int16: decInt16,
|
784 |
|
|
reflect.Int32: decInt32,
|
785 |
|
|
reflect.Int64: decInt64,
|
786 |
|
|
reflect.Uint8: decUint8,
|
787 |
|
|
reflect.Uint16: decUint16,
|
788 |
|
|
reflect.Uint32: decUint32,
|
789 |
|
|
reflect.Uint64: decUint64,
|
790 |
|
|
reflect.Float32: decFloat32,
|
791 |
|
|
reflect.Float64: decFloat64,
|
792 |
|
|
reflect.Complex64: decComplex64,
|
793 |
|
|
reflect.Complex128: decComplex128,
|
794 |
|
|
reflect.String: decString,
|
795 |
|
|
}
|
796 |
|
|
|
797 |
|
|
// Indexed by gob types. tComplex will be added during type.init().
|
798 |
|
|
var decIgnoreOpMap = map[typeId]decOp{
|
799 |
|
|
tBool: ignoreUint,
|
800 |
|
|
tInt: ignoreUint,
|
801 |
|
|
tUint: ignoreUint,
|
802 |
|
|
tFloat: ignoreUint,
|
803 |
|
|
tBytes: ignoreUint8Array,
|
804 |
|
|
tString: ignoreUint8Array,
|
805 |
|
|
tComplex: ignoreTwoUints,
|
806 |
|
|
}
|
807 |
|
|
|
808 |
|
|
// decOpFor returns the decoding op for the base type under rt and
|
809 |
|
|
// the indirection count to reach it.
|
810 |
|
|
func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) {
|
811 |
|
|
ut := userType(rt)
|
812 |
|
|
// If the type implements GobEncoder, we handle it without further processing.
|
813 |
|
|
if ut.isGobDecoder {
|
814 |
|
|
return dec.gobDecodeOpFor(ut)
|
815 |
|
|
}
|
816 |
|
|
// If this type is already in progress, it's a recursive type (e.g. map[string]*T).
|
817 |
|
|
// Return the pointer to the op we're already building.
|
818 |
|
|
if opPtr := inProgress[rt]; opPtr != nil {
|
819 |
|
|
return opPtr, ut.indir
|
820 |
|
|
}
|
821 |
|
|
typ := ut.base
|
822 |
|
|
indir := ut.indir
|
823 |
|
|
var op decOp
|
824 |
|
|
k := typ.Kind()
|
825 |
|
|
if int(k) < len(decOpTable) {
|
826 |
|
|
op = decOpTable[k]
|
827 |
|
|
}
|
828 |
|
|
if op == nil {
|
829 |
|
|
inProgress[rt] = &op
|
830 |
|
|
// Special cases
|
831 |
|
|
switch t := typ; t.Kind() {
|
832 |
|
|
case reflect.Array:
|
833 |
|
|
name = "element of " + name
|
834 |
|
|
elemId := dec.wireType[wireId].ArrayT.Elem
|
835 |
|
|
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
|
836 |
|
|
ovfl := overflow(name)
|
837 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
838 |
|
|
state.dec.decodeArray(t, state, uintptr(p), *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl)
|
839 |
|
|
}
|
840 |
|
|
|
841 |
|
|
case reflect.Map:
|
842 |
|
|
name = "element of " + name
|
843 |
|
|
keyId := dec.wireType[wireId].MapT.Key
|
844 |
|
|
elemId := dec.wireType[wireId].MapT.Elem
|
845 |
|
|
keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress)
|
846 |
|
|
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
|
847 |
|
|
ovfl := overflow(name)
|
848 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
849 |
|
|
up := unsafe.Pointer(p)
|
850 |
|
|
state.dec.decodeMap(t, state, uintptr(up), *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl)
|
851 |
|
|
}
|
852 |
|
|
|
853 |
|
|
case reflect.Slice:
|
854 |
|
|
name = "element of " + name
|
855 |
|
|
if t.Elem().Kind() == reflect.Uint8 {
|
856 |
|
|
op = decUint8Slice
|
857 |
|
|
break
|
858 |
|
|
}
|
859 |
|
|
var elemId typeId
|
860 |
|
|
if tt, ok := builtinIdToType[wireId]; ok {
|
861 |
|
|
elemId = tt.(*sliceType).Elem
|
862 |
|
|
} else {
|
863 |
|
|
elemId = dec.wireType[wireId].SliceT.Elem
|
864 |
|
|
}
|
865 |
|
|
elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress)
|
866 |
|
|
ovfl := overflow(name)
|
867 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
868 |
|
|
state.dec.decodeSlice(t, state, uintptr(p), *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl)
|
869 |
|
|
}
|
870 |
|
|
|
871 |
|
|
case reflect.Struct:
|
872 |
|
|
// Generate a closure that calls out to the engine for the nested type.
|
873 |
|
|
enginePtr, err := dec.getDecEnginePtr(wireId, userType(typ))
|
874 |
|
|
if err != nil {
|
875 |
|
|
error_(err)
|
876 |
|
|
}
|
877 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
878 |
|
|
// indirect through enginePtr to delay evaluation for recursive structs.
|
879 |
|
|
dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir)
|
880 |
|
|
}
|
881 |
|
|
case reflect.Interface:
|
882 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
883 |
|
|
state.dec.decodeInterface(t, state, uintptr(p), i.indir)
|
884 |
|
|
}
|
885 |
|
|
}
|
886 |
|
|
}
|
887 |
|
|
if op == nil {
|
888 |
|
|
errorf("decode can't handle type %s", rt)
|
889 |
|
|
}
|
890 |
|
|
return &op, indir
|
891 |
|
|
}
|
892 |
|
|
|
893 |
|
|
// decIgnoreOpFor returns the decoding op for a field that has no destination.
|
894 |
|
|
func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp {
|
895 |
|
|
op, ok := decIgnoreOpMap[wireId]
|
896 |
|
|
if !ok {
|
897 |
|
|
if wireId == tInterface {
|
898 |
|
|
// Special case because it's a method: the ignored item might
|
899 |
|
|
// define types and we need to record their state in the decoder.
|
900 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
901 |
|
|
state.dec.ignoreInterface(state)
|
902 |
|
|
}
|
903 |
|
|
return op
|
904 |
|
|
}
|
905 |
|
|
// Special cases
|
906 |
|
|
wire := dec.wireType[wireId]
|
907 |
|
|
switch {
|
908 |
|
|
case wire == nil:
|
909 |
|
|
errorf("bad data: undefined type %s", wireId.string())
|
910 |
|
|
case wire.ArrayT != nil:
|
911 |
|
|
elemId := wire.ArrayT.Elem
|
912 |
|
|
elemOp := dec.decIgnoreOpFor(elemId)
|
913 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
914 |
|
|
state.dec.ignoreArray(state, elemOp, wire.ArrayT.Len)
|
915 |
|
|
}
|
916 |
|
|
|
917 |
|
|
case wire.MapT != nil:
|
918 |
|
|
keyId := dec.wireType[wireId].MapT.Key
|
919 |
|
|
elemId := dec.wireType[wireId].MapT.Elem
|
920 |
|
|
keyOp := dec.decIgnoreOpFor(keyId)
|
921 |
|
|
elemOp := dec.decIgnoreOpFor(elemId)
|
922 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
923 |
|
|
state.dec.ignoreMap(state, keyOp, elemOp)
|
924 |
|
|
}
|
925 |
|
|
|
926 |
|
|
case wire.SliceT != nil:
|
927 |
|
|
elemId := wire.SliceT.Elem
|
928 |
|
|
elemOp := dec.decIgnoreOpFor(elemId)
|
929 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
930 |
|
|
state.dec.ignoreSlice(state, elemOp)
|
931 |
|
|
}
|
932 |
|
|
|
933 |
|
|
case wire.StructT != nil:
|
934 |
|
|
// Generate a closure that calls out to the engine for the nested type.
|
935 |
|
|
enginePtr, err := dec.getIgnoreEnginePtr(wireId)
|
936 |
|
|
if err != nil {
|
937 |
|
|
error_(err)
|
938 |
|
|
}
|
939 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
940 |
|
|
// indirect through enginePtr to delay evaluation for recursive structs
|
941 |
|
|
state.dec.ignoreStruct(*enginePtr)
|
942 |
|
|
}
|
943 |
|
|
|
944 |
|
|
case wire.GobEncoderT != nil:
|
945 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
946 |
|
|
state.dec.ignoreGobDecoder(state)
|
947 |
|
|
}
|
948 |
|
|
}
|
949 |
|
|
}
|
950 |
|
|
if op == nil {
|
951 |
|
|
errorf("bad data: ignore can't handle type %s", wireId.string())
|
952 |
|
|
}
|
953 |
|
|
return op
|
954 |
|
|
}
|
955 |
|
|
|
956 |
|
|
// gobDecodeOpFor returns the op for a type that is known to implement
|
957 |
|
|
// GobDecoder.
|
958 |
|
|
func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) {
|
959 |
|
|
rcvrType := ut.user
|
960 |
|
|
if ut.decIndir == -1 {
|
961 |
|
|
rcvrType = reflect.PtrTo(rcvrType)
|
962 |
|
|
} else if ut.decIndir > 0 {
|
963 |
|
|
for i := int8(0); i < ut.decIndir; i++ {
|
964 |
|
|
rcvrType = rcvrType.Elem()
|
965 |
|
|
}
|
966 |
|
|
}
|
967 |
|
|
var op decOp
|
968 |
|
|
op = func(i *decInstr, state *decoderState, p unsafe.Pointer) {
|
969 |
|
|
// Caller has gotten us to within one indirection of our value.
|
970 |
|
|
if i.indir > 0 {
|
971 |
|
|
if *(*unsafe.Pointer)(p) == nil {
|
972 |
|
|
*(*unsafe.Pointer)(p) = unsafe.New(ut.base)
|
973 |
|
|
}
|
974 |
|
|
}
|
975 |
|
|
// Now p is a pointer to the base type. Do we need to climb out to
|
976 |
|
|
// get to the receiver type?
|
977 |
|
|
var v reflect.Value
|
978 |
|
|
if ut.decIndir == -1 {
|
979 |
|
|
v = reflect.ValueOf(unsafe.Unreflect(rcvrType, unsafe.Pointer(&p)))
|
980 |
|
|
} else {
|
981 |
|
|
v = reflect.ValueOf(unsafe.Unreflect(rcvrType, p))
|
982 |
|
|
}
|
983 |
|
|
state.dec.decodeGobDecoder(state, v)
|
984 |
|
|
}
|
985 |
|
|
return &op, int(ut.indir)
|
986 |
|
|
|
987 |
|
|
}
|
988 |
|
|
|
989 |
|
|
// compatibleType asks: Are these two gob Types compatible?
|
990 |
|
|
// Answers the question for basic types, arrays, maps and slices, plus
|
991 |
|
|
// GobEncoder/Decoder pairs.
|
992 |
|
|
// Structs are considered ok; fields will be checked later.
|
993 |
|
|
func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool {
|
994 |
|
|
if rhs, ok := inProgress[fr]; ok {
|
995 |
|
|
return rhs == fw
|
996 |
|
|
}
|
997 |
|
|
inProgress[fr] = fw
|
998 |
|
|
ut := userType(fr)
|
999 |
|
|
wire, ok := dec.wireType[fw]
|
1000 |
|
|
// If fr is a GobDecoder, the wire type must be GobEncoder.
|
1001 |
|
|
// And if fr is not a GobDecoder, the wire type must not be either.
|
1002 |
|
|
if ut.isGobDecoder != (ok && wire.GobEncoderT != nil) { // the parentheses look odd but are correct.
|
1003 |
|
|
return false
|
1004 |
|
|
}
|
1005 |
|
|
if ut.isGobDecoder { // This test trumps all others.
|
1006 |
|
|
return true
|
1007 |
|
|
}
|
1008 |
|
|
switch t := ut.base; t.Kind() {
|
1009 |
|
|
default:
|
1010 |
|
|
// chan, etc: cannot handle.
|
1011 |
|
|
return false
|
1012 |
|
|
case reflect.Bool:
|
1013 |
|
|
return fw == tBool
|
1014 |
|
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
1015 |
|
|
return fw == tInt
|
1016 |
|
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
1017 |
|
|
return fw == tUint
|
1018 |
|
|
case reflect.Float32, reflect.Float64:
|
1019 |
|
|
return fw == tFloat
|
1020 |
|
|
case reflect.Complex64, reflect.Complex128:
|
1021 |
|
|
return fw == tComplex
|
1022 |
|
|
case reflect.String:
|
1023 |
|
|
return fw == tString
|
1024 |
|
|
case reflect.Interface:
|
1025 |
|
|
return fw == tInterface
|
1026 |
|
|
case reflect.Array:
|
1027 |
|
|
if !ok || wire.ArrayT == nil {
|
1028 |
|
|
return false
|
1029 |
|
|
}
|
1030 |
|
|
array := wire.ArrayT
|
1031 |
|
|
return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress)
|
1032 |
|
|
case reflect.Map:
|
1033 |
|
|
if !ok || wire.MapT == nil {
|
1034 |
|
|
return false
|
1035 |
|
|
}
|
1036 |
|
|
MapType := wire.MapT
|
1037 |
|
|
return dec.compatibleType(t.Key(), MapType.Key, inProgress) && dec.compatibleType(t.Elem(), MapType.Elem, inProgress)
|
1038 |
|
|
case reflect.Slice:
|
1039 |
|
|
// Is it an array of bytes?
|
1040 |
|
|
if t.Elem().Kind() == reflect.Uint8 {
|
1041 |
|
|
return fw == tBytes
|
1042 |
|
|
}
|
1043 |
|
|
// Extract and compare element types.
|
1044 |
|
|
var sw *sliceType
|
1045 |
|
|
if tt, ok := builtinIdToType[fw]; ok {
|
1046 |
|
|
sw, _ = tt.(*sliceType)
|
1047 |
|
|
} else if wire != nil {
|
1048 |
|
|
sw = wire.SliceT
|
1049 |
|
|
}
|
1050 |
|
|
elem := userType(t.Elem()).base
|
1051 |
|
|
return sw != nil && dec.compatibleType(elem, sw.Elem, inProgress)
|
1052 |
|
|
case reflect.Struct:
|
1053 |
|
|
return true
|
1054 |
|
|
}
|
1055 |
|
|
return true
|
1056 |
|
|
}
|
1057 |
|
|
|
1058 |
|
|
// typeString returns a human-readable description of the type identified by remoteId.
|
1059 |
|
|
func (dec *Decoder) typeString(remoteId typeId) string {
|
1060 |
|
|
if t := idToType[remoteId]; t != nil {
|
1061 |
|
|
// globally known type.
|
1062 |
|
|
return t.string()
|
1063 |
|
|
}
|
1064 |
|
|
return dec.wireType[remoteId].string()
|
1065 |
|
|
}
|
1066 |
|
|
|
1067 |
|
|
// compileSingle compiles the decoder engine for a non-struct top-level value, including
|
1068 |
|
|
// GobDecoders.
|
1069 |
|
|
func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
|
1070 |
|
|
rt := ut.user
|
1071 |
|
|
engine = new(decEngine)
|
1072 |
|
|
engine.instr = make([]decInstr, 1) // one item
|
1073 |
|
|
name := rt.String() // best we can do
|
1074 |
|
|
if !dec.compatibleType(rt, remoteId, make(map[reflect.Type]typeId)) {
|
1075 |
|
|
remoteType := dec.typeString(remoteId)
|
1076 |
|
|
// Common confusing case: local interface type, remote concrete type.
|
1077 |
|
|
if ut.base.Kind() == reflect.Interface && remoteId != tInterface {
|
1078 |
|
|
return nil, errors.New("gob: local interface type " + name + " can only be decoded from remote interface type; received concrete type " + remoteType)
|
1079 |
|
|
}
|
1080 |
|
|
return nil, errors.New("gob: decoding into local type " + name + ", received remote type " + remoteType)
|
1081 |
|
|
}
|
1082 |
|
|
op, indir := dec.decOpFor(remoteId, rt, name, make(map[reflect.Type]*decOp))
|
1083 |
|
|
ovfl := errors.New(`value for "` + name + `" out of range`)
|
1084 |
|
|
engine.instr[singletonField] = decInstr{*op, singletonField, indir, 0, ovfl}
|
1085 |
|
|
engine.numInstr = 1
|
1086 |
|
|
return
|
1087 |
|
|
}
|
1088 |
|
|
|
1089 |
|
|
// compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded.
|
1090 |
|
|
func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err error) {
|
1091 |
|
|
engine = new(decEngine)
|
1092 |
|
|
engine.instr = make([]decInstr, 1) // one item
|
1093 |
|
|
op := dec.decIgnoreOpFor(remoteId)
|
1094 |
|
|
ovfl := overflow(dec.typeString(remoteId))
|
1095 |
|
|
engine.instr[0] = decInstr{op, 0, 0, 0, ovfl}
|
1096 |
|
|
engine.numInstr = 1
|
1097 |
|
|
return
|
1098 |
|
|
}
|
1099 |
|
|
|
1100 |
|
|
// compileDec compiles the decoder engine for a value. If the value is not a struct,
|
1101 |
|
|
// it calls out to compileSingle.
|
1102 |
|
|
func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err error) {
|
1103 |
|
|
rt := ut.base
|
1104 |
|
|
srt := rt
|
1105 |
|
|
if srt.Kind() != reflect.Struct ||
|
1106 |
|
|
ut.isGobDecoder {
|
1107 |
|
|
return dec.compileSingle(remoteId, ut)
|
1108 |
|
|
}
|
1109 |
|
|
var wireStruct *structType
|
1110 |
|
|
// Builtin types can come from global pool; the rest must be defined by the decoder.
|
1111 |
|
|
// Also we know we're decoding a struct now, so the client must have sent one.
|
1112 |
|
|
if t, ok := builtinIdToType[remoteId]; ok {
|
1113 |
|
|
wireStruct, _ = t.(*structType)
|
1114 |
|
|
} else {
|
1115 |
|
|
wire := dec.wireType[remoteId]
|
1116 |
|
|
if wire == nil {
|
1117 |
|
|
error_(errBadType)
|
1118 |
|
|
}
|
1119 |
|
|
wireStruct = wire.StructT
|
1120 |
|
|
}
|
1121 |
|
|
if wireStruct == nil {
|
1122 |
|
|
errorf("type mismatch in decoder: want struct type %s; got non-struct", rt)
|
1123 |
|
|
}
|
1124 |
|
|
engine = new(decEngine)
|
1125 |
|
|
engine.instr = make([]decInstr, len(wireStruct.Field))
|
1126 |
|
|
seen := make(map[reflect.Type]*decOp)
|
1127 |
|
|
// Loop over the fields of the wire type.
|
1128 |
|
|
for fieldnum := 0; fieldnum < len(wireStruct.Field); fieldnum++ {
|
1129 |
|
|
wireField := wireStruct.Field[fieldnum]
|
1130 |
|
|
if wireField.Name == "" {
|
1131 |
|
|
errorf("empty name for remote field of type %s", wireStruct.Name)
|
1132 |
|
|
}
|
1133 |
|
|
ovfl := overflow(wireField.Name)
|
1134 |
|
|
// Find the field of the local type with the same name.
|
1135 |
|
|
localField, present := srt.FieldByName(wireField.Name)
|
1136 |
|
|
// TODO(r): anonymous names
|
1137 |
|
|
if !present || !isExported(wireField.Name) {
|
1138 |
|
|
op := dec.decIgnoreOpFor(wireField.Id)
|
1139 |
|
|
engine.instr[fieldnum] = decInstr{op, fieldnum, 0, 0, ovfl}
|
1140 |
|
|
continue
|
1141 |
|
|
}
|
1142 |
|
|
if !dec.compatibleType(localField.Type, wireField.Id, make(map[reflect.Type]typeId)) {
|
1143 |
|
|
errorf("wrong type (%s) for received field %s.%s", localField.Type, wireStruct.Name, wireField.Name)
|
1144 |
|
|
}
|
1145 |
|
|
op, indir := dec.decOpFor(wireField.Id, localField.Type, localField.Name, seen)
|
1146 |
|
|
engine.instr[fieldnum] = decInstr{*op, fieldnum, indir, uintptr(localField.Offset), ovfl}
|
1147 |
|
|
engine.numInstr++
|
1148 |
|
|
}
|
1149 |
|
|
return
|
1150 |
|
|
}
|
1151 |
|
|
|
1152 |
|
|
// getDecEnginePtr returns the engine for the specified type.
|
1153 |
|
|
func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err error) {
|
1154 |
|
|
rt := ut.base
|
1155 |
|
|
decoderMap, ok := dec.decoderCache[rt]
|
1156 |
|
|
if !ok {
|
1157 |
|
|
decoderMap = make(map[typeId]**decEngine)
|
1158 |
|
|
dec.decoderCache[rt] = decoderMap
|
1159 |
|
|
}
|
1160 |
|
|
if enginePtr, ok = decoderMap[remoteId]; !ok {
|
1161 |
|
|
// To handle recursive types, mark this engine as underway before compiling.
|
1162 |
|
|
enginePtr = new(*decEngine)
|
1163 |
|
|
decoderMap[remoteId] = enginePtr
|
1164 |
|
|
*enginePtr, err = dec.compileDec(remoteId, ut)
|
1165 |
|
|
if err != nil {
|
1166 |
|
|
delete(decoderMap, remoteId)
|
1167 |
|
|
}
|
1168 |
|
|
}
|
1169 |
|
|
return
|
1170 |
|
|
}
|
1171 |
|
|
|
1172 |
|
|
// emptyStruct is the type we compile into when ignoring a struct value.
|
1173 |
|
|
type emptyStruct struct{}
|
1174 |
|
|
|
1175 |
|
|
var emptyStructType = reflect.TypeOf(emptyStruct{})
|
1176 |
|
|
|
1177 |
|
|
// getDecEnginePtr returns the engine for the specified type when the value is to be discarded.
|
1178 |
|
|
func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err error) {
|
1179 |
|
|
var ok bool
|
1180 |
|
|
if enginePtr, ok = dec.ignorerCache[wireId]; !ok {
|
1181 |
|
|
// To handle recursive types, mark this engine as underway before compiling.
|
1182 |
|
|
enginePtr = new(*decEngine)
|
1183 |
|
|
dec.ignorerCache[wireId] = enginePtr
|
1184 |
|
|
wire := dec.wireType[wireId]
|
1185 |
|
|
if wire != nil && wire.StructT != nil {
|
1186 |
|
|
*enginePtr, err = dec.compileDec(wireId, userType(emptyStructType))
|
1187 |
|
|
} else {
|
1188 |
|
|
*enginePtr, err = dec.compileIgnoreSingle(wireId)
|
1189 |
|
|
}
|
1190 |
|
|
if err != nil {
|
1191 |
|
|
delete(dec.ignorerCache, wireId)
|
1192 |
|
|
}
|
1193 |
|
|
}
|
1194 |
|
|
return
|
1195 |
|
|
}
|
1196 |
|
|
|
1197 |
|
|
// decodeValue decodes the data stream representing a value and stores it in val.
|
1198 |
|
|
func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) {
|
1199 |
|
|
defer catchError(&dec.err)
|
1200 |
|
|
// If the value is nil, it means we should just ignore this item.
|
1201 |
|
|
if !val.IsValid() {
|
1202 |
|
|
dec.decodeIgnoredValue(wireId)
|
1203 |
|
|
return
|
1204 |
|
|
}
|
1205 |
|
|
// Dereference down to the underlying type.
|
1206 |
|
|
ut := userType(val.Type())
|
1207 |
|
|
base := ut.base
|
1208 |
|
|
var enginePtr **decEngine
|
1209 |
|
|
enginePtr, dec.err = dec.getDecEnginePtr(wireId, ut)
|
1210 |
|
|
if dec.err != nil {
|
1211 |
|
|
return
|
1212 |
|
|
}
|
1213 |
|
|
engine := *enginePtr
|
1214 |
|
|
if st := base; st.Kind() == reflect.Struct && !ut.isGobDecoder {
|
1215 |
|
|
if engine.numInstr == 0 && st.NumField() > 0 && len(dec.wireType[wireId].StructT.Field) > 0 {
|
1216 |
|
|
name := base.Name()
|
1217 |
|
|
errorf("type mismatch: no fields matched compiling decoder for %s", name)
|
1218 |
|
|
}
|
1219 |
|
|
dec.decodeStruct(engine, ut, uintptr(unsafeAddr(val)), ut.indir)
|
1220 |
|
|
} else {
|
1221 |
|
|
dec.decodeSingle(engine, ut, uintptr(unsafeAddr(val)))
|
1222 |
|
|
}
|
1223 |
|
|
}
|
1224 |
|
|
|
1225 |
|
|
// decodeIgnoredValue decodes the data stream representing a value of the specified type and discards it.
|
1226 |
|
|
func (dec *Decoder) decodeIgnoredValue(wireId typeId) {
|
1227 |
|
|
var enginePtr **decEngine
|
1228 |
|
|
enginePtr, dec.err = dec.getIgnoreEnginePtr(wireId)
|
1229 |
|
|
if dec.err != nil {
|
1230 |
|
|
return
|
1231 |
|
|
}
|
1232 |
|
|
wire := dec.wireType[wireId]
|
1233 |
|
|
if wire != nil && wire.StructT != nil {
|
1234 |
|
|
dec.ignoreStruct(*enginePtr)
|
1235 |
|
|
} else {
|
1236 |
|
|
dec.ignoreSingle(*enginePtr)
|
1237 |
|
|
}
|
1238 |
|
|
}
|
1239 |
|
|
|
1240 |
|
|
func init() {
|
1241 |
|
|
var iop, uop decOp
|
1242 |
|
|
switch reflect.TypeOf(int(0)).Bits() {
|
1243 |
|
|
case 32:
|
1244 |
|
|
iop = decInt32
|
1245 |
|
|
uop = decUint32
|
1246 |
|
|
case 64:
|
1247 |
|
|
iop = decInt64
|
1248 |
|
|
uop = decUint64
|
1249 |
|
|
default:
|
1250 |
|
|
panic("gob: unknown size of int/uint")
|
1251 |
|
|
}
|
1252 |
|
|
decOpTable[reflect.Int] = iop
|
1253 |
|
|
decOpTable[reflect.Uint] = uop
|
1254 |
|
|
|
1255 |
|
|
// Finally uintptr
|
1256 |
|
|
switch reflect.TypeOf(uintptr(0)).Bits() {
|
1257 |
|
|
case 32:
|
1258 |
|
|
uop = decUint32
|
1259 |
|
|
case 64:
|
1260 |
|
|
uop = decUint64
|
1261 |
|
|
default:
|
1262 |
|
|
panic("gob: unknown size of uintptr")
|
1263 |
|
|
}
|
1264 |
|
|
decOpTable[reflect.Uintptr] = uop
|
1265 |
|
|
}
|
1266 |
|
|
|
1267 |
|
|
// Gob assumes it can call UnsafeAddr on any Value
|
1268 |
|
|
// in order to get a pointer it can copy data from.
|
1269 |
|
|
// Values that have just been created and do not point
|
1270 |
|
|
// into existing structs or slices cannot be addressed,
|
1271 |
|
|
// so simulate it by returning a pointer to a copy.
|
1272 |
|
|
// Each call allocates once.
|
1273 |
|
|
func unsafeAddr(v reflect.Value) uintptr {
|
1274 |
|
|
if v.CanAddr() {
|
1275 |
|
|
return v.UnsafeAddr()
|
1276 |
|
|
}
|
1277 |
|
|
x := reflect.New(v.Type()).Elem()
|
1278 |
|
|
x.Set(v)
|
1279 |
|
|
return x.UnsafeAddr()
|
1280 |
|
|
}
|
1281 |
|
|
|
1282 |
|
|
// Gob depends on being able to take the address
|
1283 |
|
|
// of zeroed Values it creates, so use this wrapper instead
|
1284 |
|
|
// of the standard reflect.Zero.
|
1285 |
|
|
// Each call allocates once.
|
1286 |
|
|
func allocValue(t reflect.Type) reflect.Value {
|
1287 |
|
|
return reflect.New(t).Elem()
|
1288 |
|
|
}
|