mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-30 06:38:37 -04:00 
			
		
		
		
	Macaron 1.5 (#12596)
* update macaron to v1.5 of fork * update macaron to v1.5 of fork * test gzip PR * add push method impl to context_tests * use proper gzip commit Co-authored-by: zeripath <art27@cantab.net> Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
This commit is contained in:
		
							
								
								
									
										77
									
								
								vendor/github.com/klauspost/compress/zstd/blockdec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/klauspost/compress/zstd/blockdec.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -75,21 +75,29 @@ type blockDec struct { | ||||
|  | ||||
| 	// Window size of the block. | ||||
| 	WindowSize uint64 | ||||
| 	Type       blockType | ||||
| 	RLESize    uint32 | ||||
|  | ||||
| 	history     chan *history | ||||
| 	input       chan struct{} | ||||
| 	result      chan decodeOutput | ||||
| 	sequenceBuf []seq | ||||
| 	err         error | ||||
| 	decWG       sync.WaitGroup | ||||
|  | ||||
| 	// Frame to use for singlethreaded decoding. | ||||
| 	// Should not be used by the decoder itself since parent may be another frame. | ||||
| 	localFrame *frameDec | ||||
|  | ||||
| 	// Block is RLE, this is the size. | ||||
| 	RLESize uint32 | ||||
| 	tmp     [4]byte | ||||
|  | ||||
| 	Type blockType | ||||
|  | ||||
| 	// Is this the last block of a frame? | ||||
| 	Last bool | ||||
|  | ||||
| 	// Use less memory | ||||
| 	lowMem      bool | ||||
| 	history     chan *history | ||||
| 	input       chan struct{} | ||||
| 	result      chan decodeOutput | ||||
| 	sequenceBuf []seq | ||||
| 	tmp         [4]byte | ||||
| 	err         error | ||||
| 	decWG       sync.WaitGroup | ||||
| 	lowMem bool | ||||
| } | ||||
|  | ||||
| func (b *blockDec) String() string { | ||||
| @@ -127,25 +135,37 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { | ||||
| 	b.Type = blockType((bh >> 1) & 3) | ||||
| 	// find size. | ||||
| 	cSize := int(bh >> 3) | ||||
| 	maxSize := maxBlockSize | ||||
| 	switch b.Type { | ||||
| 	case blockTypeReserved: | ||||
| 		return ErrReservedBlockType | ||||
| 	case blockTypeRLE: | ||||
| 		b.RLESize = uint32(cSize) | ||||
| 		if b.lowMem { | ||||
| 			maxSize = cSize | ||||
| 		} | ||||
| 		cSize = 1 | ||||
| 	case blockTypeCompressed: | ||||
| 		if debug { | ||||
| 			println("Data size on stream:", cSize) | ||||
| 		} | ||||
| 		b.RLESize = 0 | ||||
| 		maxSize = maxCompressedBlockSize | ||||
| 		if windowSize < maxCompressedBlockSize && b.lowMem { | ||||
| 			maxSize = int(windowSize) | ||||
| 		} | ||||
| 		if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { | ||||
| 			if debug { | ||||
| 				printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) | ||||
| 			} | ||||
| 			return ErrCompressedSizeTooBig | ||||
| 		} | ||||
| 	default: | ||||
| 	case blockTypeRaw: | ||||
| 		b.RLESize = 0 | ||||
| 		// We do not need a destination for raw blocks. | ||||
| 		maxSize = -1 | ||||
| 	default: | ||||
| 		panic("Invalid block type") | ||||
| 	} | ||||
|  | ||||
| 	// Read block data. | ||||
| @@ -156,8 +176,8 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { | ||||
| 			b.dataStorage = make([]byte, 0, maxBlockSize) | ||||
| 		} | ||||
| 	} | ||||
| 	if cap(b.dst) <= maxBlockSize { | ||||
| 		b.dst = make([]byte, 0, maxBlockSize+1) | ||||
| 	if cap(b.dst) <= maxSize { | ||||
| 		b.dst = make([]byte, 0, maxSize+1) | ||||
| 	} | ||||
| 	var err error | ||||
| 	b.data, err = br.readBig(cSize, b.dataStorage) | ||||
| @@ -445,26 +465,22 @@ func (b *blockDec) decodeCompressed(hist *history) error { | ||||
| 		if huff == nil { | ||||
| 			huff = &huff0.Scratch{} | ||||
| 		} | ||||
| 		huff.Out = b.literalBuf[:0] | ||||
| 		huff, literals, err = huff0.ReadTable(literals, huff) | ||||
| 		if err != nil { | ||||
| 			println("reading huffman table:", err) | ||||
| 			return err | ||||
| 		} | ||||
| 		// Use our out buffer. | ||||
| 		huff.Out = b.literalBuf[:0] | ||||
| 		huff.MaxDecodedSize = litRegenSize | ||||
| 		if fourStreams { | ||||
| 			literals, err = huff.Decompress4X(literals, litRegenSize) | ||||
| 			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) | ||||
| 		} else { | ||||
| 			literals, err = huff.Decompress1X(literals) | ||||
| 			literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			println("decoding compressed literals:", err) | ||||
| 			return err | ||||
| 		} | ||||
| 		// Make sure we don't leak our literals buffer | ||||
| 		huff.Out = nil | ||||
| 		if len(literals) != litRegenSize { | ||||
| 			return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) | ||||
| 		} | ||||
| @@ -615,15 +631,12 @@ func (b *blockDec) decodeCompressed(hist *history) error { | ||||
| 		var err error | ||||
| 		// Use our out buffer. | ||||
| 		huff = hist.huffTree | ||||
| 		huff.Out = b.literalBuf[:0] | ||||
| 		huff.MaxDecodedSize = litRegenSize | ||||
| 		if fourStreams { | ||||
| 			literals, err = huff.Decompress4X(literals, litRegenSize) | ||||
| 			literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) | ||||
| 		} else { | ||||
| 			literals, err = huff.Decompress1X(literals) | ||||
| 			literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) | ||||
| 		} | ||||
| 		// Make sure we don't leak our literals buffer | ||||
| 		huff.Out = nil | ||||
| 		if err != nil { | ||||
| 			println("decompressing literals:", err) | ||||
| 			return err | ||||
| @@ -633,12 +646,13 @@ func (b *blockDec) decodeCompressed(hist *history) error { | ||||
| 		} | ||||
| 	} else { | ||||
| 		if hist.huffTree != nil && huff != nil { | ||||
| 			huffDecoderPool.Put(hist.huffTree) | ||||
| 			if hist.dict == nil || hist.dict.litDec != hist.huffTree { | ||||
| 				huffDecoderPool.Put(hist.huffTree) | ||||
| 			} | ||||
| 			hist.huffTree = nil | ||||
| 		} | ||||
| 	} | ||||
| 	if huff != nil { | ||||
| 		huff.Out = nil | ||||
| 		hist.huffTree = huff | ||||
| 	} | ||||
| 	if debug { | ||||
| @@ -671,12 +685,21 @@ func (b *blockDec) decodeCompressed(hist *history) error { | ||||
| 	//   If only recent offsets were not transferred, this would be an obvious win. | ||||
| 	// 	 Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. | ||||
|  | ||||
| 	hbytes := hist.b | ||||
| 	if len(hbytes) > hist.windowSize { | ||||
| 		hbytes = hbytes[len(hbytes)-hist.windowSize:] | ||||
| 		// We do not need history any more. | ||||
| 		if hist.dict != nil { | ||||
| 			hist.dict.content = nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if err := seqs.initialize(br, hist, literals, b.dst); err != nil { | ||||
| 		println("initializing sequences:", err) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	err = seqs.decode(nSeqs, br, hist.b) | ||||
| 	err = seqs.decode(nSeqs, br, hbytes) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|   | ||||
		Reference in New Issue
	
	Block a user