1
0
mirror of https://github.com/v2fly/v2ray-core.git synced 2024-06-09 09:20:45 +00:00

update to IETF QUIC draft-29 (#287)

* update to IETF QUIC draft-29
This commit is contained in:
Kslr 2020-10-09 10:04:35 +08:00 committed by GitHub
parent 8e8127b2be
commit a44e1ede5b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
179 changed files with 206 additions and 35687 deletions

7
external/README.md vendored
View File

@ -1,7 +0,0 @@
# Note
The `external` dir in the project exists for historical reasons. We will migrate to use go mod to maintain 3rd party libraries.
Fow now, all modules under external are used by "quic-go" to support quic protocol, and can't migrate without a breaking change.
The plan is that we will remove the whole `external` dir when `quic-go` is tested to be matured enough in production.

View File

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 cheekybits
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,2 +0,0 @@
// Package generic contains the generic marker types.
package generic

View File

@ -1,13 +0,0 @@
package generic
// Type is the placeholder type that indicates a generic value.
// When genny is executed, variables of this type will be replaced with
// references to the specific types.
// var GenericType generic.Type
type Type interface{}
// Number is the placeholder type that indiccates a generic numerical value.
// When genny is executed, variables of this type will be replaced with
// references to the specific types.
// var GenericType generic.Number
type Number float64

View File

@ -1,57 +0,0 @@
Copyright (c) 2017 Cloudflare. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Cloudflare nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
========================================================================
The x64 field arithmetic implementation was derived from the Microsoft Research
SIDH implementation, <https://github.com/Microsoft/PQCrypto-SIDH>, available
under the following license:
========================================================================
MIT License
Copyright (c) Microsoft Corporation. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View File

@ -1,66 +0,0 @@
// +build noasm !amd64
package internal
// helper used for Uint128 representation
type Uint128 struct {
H, L uint64
}
// Adds 2 64bit digits in constant time.
// Returns result and carry (1 or 0)
func Addc64(cin, a, b uint64) (ret, cout uint64) {
t := a + cin
ret = b + t
cout = ((a & b) | ((a | b) & (^ret))) >> 63
return
}
// Subtracts 2 64bit digits in constant time.
// Returns result and borrow (1 or 0)
func Subc64(bIn, a, b uint64) (ret, bOut uint64) {
var tmp1 = a - b
// Set bOut if bIn!=0 and tmp1==0 in constant time
bOut = bIn & (1 ^ ((tmp1 | uint64(0-tmp1)) >> 63))
// Constant time check if x<y
bOut |= (a ^ ((a ^ b) | (uint64(a-b) ^ b))) >> 63
ret = tmp1 - bIn
return
}
// Multiplies 2 64bit digits in constant time
func Mul64(a, b uint64) (res Uint128) {
var al, bl, ah, bh, albl, albh, ahbl, ahbh uint64
var res1, res2, res3 uint64
var carry, maskL, maskH, temp uint64
maskL = (^maskL) >> 32
maskH = ^maskL
al = a & maskL
ah = a >> 32
bl = b & maskL
bh = b >> 32
albl = al * bl
albh = al * bh
ahbl = ah * bl
ahbh = ah * bh
res.L = albl & maskL
res1 = albl >> 32
res2 = ahbl & maskL
res3 = albh & maskL
temp = res1 + res2 + res3
carry = temp >> 32
res.L ^= temp << 32
res1 = ahbl >> 32
res2 = albh >> 32
res3 = ahbh & maskL
temp = res1 + res2 + res3 + carry
res.H = temp & maskL
carry = temp & maskH
res.H ^= (ahbh & maskH) + carry
return
}

View File

@ -1,440 +0,0 @@
package internal
type CurveOperations struct {
Params *SidhParams
}
// Computes j-invariant for a curve y2=x3+A/Cx+x with A,C in F_(p^2). Result
// is returned in jBytes buffer, encoded in little-endian format. Caller
// provided jBytes buffer has to be big enough to j-invariant value. In case
// of SIDH, buffer size must be at least size of shared secret.
// Implementation corresponds to Algorithm 9 from SIKE.
func (c *CurveOperations) Jinvariant(cparams *ProjectiveCurveParameters, jBytes []byte) {
var j, t0, t1 Fp2Element
op := c.Params.Op
op.Square(&j, &cparams.A) // j = A^2
op.Square(&t1, &cparams.C) // t1 = C^2
op.Add(&t0, &t1, &t1) // t0 = t1 + t1
op.Sub(&t0, &j, &t0) // t0 = j - t0
op.Sub(&t0, &t0, &t1) // t0 = t0 - t1
op.Sub(&j, &t0, &t1) // t0 = t0 - t1
op.Square(&t1, &t1) // t1 = t1^2
op.Mul(&j, &j, &t1) // j = j * t1
op.Add(&t0, &t0, &t0) // t0 = t0 + t0
op.Add(&t0, &t0, &t0) // t0 = t0 + t0
op.Square(&t1, &t0) // t1 = t0^2
op.Mul(&t0, &t0, &t1) // t0 = t0 * t1
op.Add(&t0, &t0, &t0) // t0 = t0 + t0
op.Add(&t0, &t0, &t0) // t0 = t0 + t0
op.Inv(&j, &j) // j = 1/j
op.Mul(&j, &t0, &j) // j = t0 * j
c.Fp2ToBytes(jBytes, &j)
}
// Given affine points x(P), x(Q) and x(Q-P) in a extension field F_{p^2}, function
// recorvers projective coordinate A of a curve. This is Algorithm 10 from SIKE.
func (c *CurveOperations) RecoverCoordinateA(curve *ProjectiveCurveParameters, xp, xq, xr *Fp2Element) {
var t0, t1 Fp2Element
op := c.Params.Op
op.Add(&t1, xp, xq) // t1 = Xp + Xq
op.Mul(&t0, xp, xq) // t0 = Xp * Xq
op.Mul(&curve.A, xr, &t1) // A = X(q-p) * t1
op.Add(&curve.A, &curve.A, &t0) // A = A + t0
op.Mul(&t0, &t0, xr) // t0 = t0 * X(q-p)
op.Sub(&curve.A, &curve.A, &c.Params.OneFp2) // A = A - 1
op.Add(&t0, &t0, &t0) // t0 = t0 + t0
op.Add(&t1, &t1, xr) // t1 = t1 + X(q-p)
op.Add(&t0, &t0, &t0) // t0 = t0 + t0
op.Square(&curve.A, &curve.A) // A = A^2
op.Inv(&t0, &t0) // t0 = 1/t0
op.Mul(&curve.A, &curve.A, &t0) // A = A * t0
op.Sub(&curve.A, &curve.A, &t1) // A = A - t1
}
// Computes equivalence (A:C) ~ (A+2C : A-2C)
func (c *CurveOperations) CalcCurveParamsEquiv3(cparams *ProjectiveCurveParameters) CurveCoefficientsEquiv {
var coef CurveCoefficientsEquiv
var c2 Fp2Element
var op = c.Params.Op
op.Add(&c2, &cparams.C, &cparams.C)
// A24p = A+2*C
op.Add(&coef.A, &cparams.A, &c2)
// A24m = A-2*C
op.Sub(&coef.C, &cparams.A, &c2)
return coef
}
// Computes equivalence (A:C) ~ (A+2C : 4C)
func (c *CurveOperations) CalcCurveParamsEquiv4(cparams *ProjectiveCurveParameters) CurveCoefficientsEquiv {
var coefEq CurveCoefficientsEquiv
var op = c.Params.Op
op.Add(&coefEq.C, &cparams.C, &cparams.C)
// A24p = A+2C
op.Add(&coefEq.A, &cparams.A, &coefEq.C)
// C24 = 4*C
op.Add(&coefEq.C, &coefEq.C, &coefEq.C)
return coefEq
}
// Helper function for RightToLeftLadder(). Returns A+2C / 4.
func (c *CurveOperations) CalcAplus2Over4(cparams *ProjectiveCurveParameters) (ret Fp2Element) {
var tmp Fp2Element
var op = c.Params.Op
// 2C
op.Add(&tmp, &cparams.C, &cparams.C)
// A+2C
op.Add(&ret, &cparams.A, &tmp)
// 1/4C
op.Add(&tmp, &tmp, &tmp)
op.Inv(&tmp, &tmp)
// A+2C/4C
op.Mul(&ret, &ret, &tmp)
return
}
// Recovers (A:C) curve parameters from projectively equivalent (A+2C:A-2C).
func (c *CurveOperations) RecoverCurveCoefficients3(cparams *ProjectiveCurveParameters, coefEq *CurveCoefficientsEquiv) {
var op = c.Params.Op
op.Add(&cparams.A, &coefEq.A, &coefEq.C)
// cparams.A = 2*(A+2C+A-2C) = 4A
op.Add(&cparams.A, &cparams.A, &cparams.A)
// cparams.C = (A+2C-A+2C) = 4C
op.Sub(&cparams.C, &coefEq.A, &coefEq.C)
return
}
// Recovers (A:C) curve parameters from projectively equivalent (A+2C:4C).
func (c *CurveOperations) RecoverCurveCoefficients4(cparams *ProjectiveCurveParameters, coefEq *CurveCoefficientsEquiv) {
var op = c.Params.Op
// cparams.C = (4C)*1/2=2C
op.Mul(&cparams.C, &coefEq.C, &c.Params.HalfFp2)
// cparams.A = A+2C - 2C = A
op.Sub(&cparams.A, &coefEq.A, &cparams.C)
// cparams.C = 2C * 1/2 = C
op.Mul(&cparams.C, &cparams.C, &c.Params.HalfFp2)
return
}
// Combined coordinate doubling and differential addition. Takes projective points
// P,Q,Q-P and (A+2C)/4C curve E coefficient. Returns 2*P and P+Q calculated on E.
// Function is used only by RightToLeftLadder. Corresponds to Algorithm 5 of SIKE
func (c *CurveOperations) xDblAdd(P, Q, QmP *ProjectivePoint, a24 *Fp2Element) (dblP, PaQ ProjectivePoint) {
var t0, t1, t2 Fp2Element
var op = c.Params.Op
xQmP, zQmP := &QmP.X, &QmP.Z
xPaQ, zPaQ := &PaQ.X, &PaQ.Z
x2P, z2P := &dblP.X, &dblP.Z
xP, zP := &P.X, &P.Z
xQ, zQ := &Q.X, &Q.Z
op.Add(&t0, xP, zP) // t0 = Xp+Zp
op.Sub(&t1, xP, zP) // t1 = Xp-Zp
op.Square(x2P, &t0) // 2P.X = t0^2
op.Sub(&t2, xQ, zQ) // t2 = Xq-Zq
op.Add(xPaQ, xQ, zQ) // Xp+q = Xq+Zq
op.Mul(&t0, &t0, &t2) // t0 = t0 * t2
op.Mul(z2P, &t1, &t1) // 2P.Z = t1 * t1
op.Mul(&t1, &t1, xPaQ) // t1 = t1 * Xp+q
op.Sub(&t2, x2P, z2P) // t2 = 2P.X - 2P.Z
op.Mul(x2P, x2P, z2P) // 2P.X = 2P.X * 2P.Z
op.Mul(xPaQ, a24, &t2) // Xp+q = A24 * t2
op.Sub(zPaQ, &t0, &t1) // Zp+q = t0 - t1
op.Add(z2P, xPaQ, z2P) // 2P.Z = Xp+q + 2P.Z
op.Add(xPaQ, &t0, &t1) // Xp+q = t0 + t1
op.Mul(z2P, z2P, &t2) // 2P.Z = 2P.Z * t2
op.Square(zPaQ, zPaQ) // Zp+q = Zp+q ^ 2
op.Square(xPaQ, xPaQ) // Xp+q = Xp+q ^ 2
op.Mul(zPaQ, xQmP, zPaQ) // Zp+q = Xq-p * Zp+q
op.Mul(xPaQ, zQmP, xPaQ) // Xp+q = Zq-p * Xp+q
return
}
// Given the curve parameters, xP = x(P), computes xP = x([2^k]P)
// Safe to overlap xP, x2P.
func (c *CurveOperations) Pow2k(xP *ProjectivePoint, params *CurveCoefficientsEquiv, k uint32) {
var t0, t1 Fp2Element
var op = c.Params.Op
x, z := &xP.X, &xP.Z
for i := uint32(0); i < k; i++ {
op.Sub(&t0, x, z) // t0 = Xp - Zp
op.Add(&t1, x, z) // t1 = Xp + Zp
op.Square(&t0, &t0) // t0 = t0 ^ 2
op.Square(&t1, &t1) // t1 = t1 ^ 2
op.Mul(z, &params.C, &t0) // Z2p = C24 * t0
op.Mul(x, z, &t1) // X2p = Z2p * t1
op.Sub(&t1, &t1, &t0) // t1 = t1 - t0
op.Mul(&t0, &params.A, &t1) // t0 = A24+ * t1
op.Add(z, z, &t0) // Z2p = Z2p + t0
op.Mul(z, z, &t1) // Zp = Z2p * t1
}
}
// Given the curve parameters, xP = x(P), and k >= 0, compute xP = x([3^k]P).
//
// Safe to overlap xP, xR.
func (c *CurveOperations) Pow3k(xP *ProjectivePoint, params *CurveCoefficientsEquiv, k uint32) {
var t0, t1, t2, t3, t4, t5, t6 Fp2Element
var op = c.Params.Op
x, z := &xP.X, &xP.Z
for i := uint32(0); i < k; i++ {
op.Sub(&t0, x, z) // t0 = Xp - Zp
op.Square(&t2, &t0) // t2 = t0^2
op.Add(&t1, x, z) // t1 = Xp + Zp
op.Square(&t3, &t1) // t3 = t1^2
op.Add(&t4, &t1, &t0) // t4 = t1 + t0
op.Sub(&t0, &t1, &t0) // t0 = t1 - t0
op.Square(&t1, &t4) // t1 = t4^2
op.Sub(&t1, &t1, &t3) // t1 = t1 - t3
op.Sub(&t1, &t1, &t2) // t1 = t1 - t2
op.Mul(&t5, &t3, &params.A) // t5 = t3 * A24+
op.Mul(&t3, &t3, &t5) // t3 = t5 * t3
op.Mul(&t6, &t2, &params.C) // t6 = t2 * A24-
op.Mul(&t2, &t2, &t6) // t2 = t2 * t6
op.Sub(&t3, &t2, &t3) // t3 = t2 - t3
op.Sub(&t2, &t5, &t6) // t2 = t5 - t6
op.Mul(&t1, &t2, &t1) // t1 = t2 * t1
op.Add(&t2, &t3, &t1) // t2 = t3 + t1
op.Square(&t2, &t2) // t2 = t2^2
op.Mul(x, &t2, &t4) // X3p = t2 * t4
op.Sub(&t1, &t3, &t1) // t1 = t3 - t1
op.Square(&t1, &t1) // t1 = t1^2
op.Mul(z, &t1, &t0) // Z3p = t1 * t0
}
}
// Set (y1, y2, y3) = (1/x1, 1/x2, 1/x3).
//
// All xi, yi must be distinct.
func (c *CurveOperations) Fp2Batch3Inv(x1, x2, x3, y1, y2, y3 *Fp2Element) {
var x1x2, t Fp2Element
var op = c.Params.Op
op.Mul(&x1x2, x1, x2) // x1*x2
op.Mul(&t, &x1x2, x3) // 1/(x1*x2*x3)
op.Inv(&t, &t)
op.Mul(y1, &t, x2) // 1/x1
op.Mul(y1, y1, x3)
op.Mul(y2, &t, x1) // 1/x2
op.Mul(y2, y2, x3)
op.Mul(y3, &t, &x1x2) // 1/x3
}
// ScalarMul3Pt is a right-to-left point multiplication that given the
// x-coordinate of P, Q and P-Q calculates the x-coordinate of R=Q+[scalar]P.
// nbits must be smaller or equal to len(scalar).
func (c *CurveOperations) ScalarMul3Pt(cparams *ProjectiveCurveParameters, P, Q, PmQ *ProjectivePoint, nbits uint, scalar []uint8) ProjectivePoint {
var R0, R2, R1 ProjectivePoint
var op = c.Params.Op
aPlus2Over4 := c.CalcAplus2Over4(cparams)
R1 = *P
R2 = *PmQ
R0 = *Q
// Iterate over the bits of the scalar, bottom to top
prevBit := uint8(0)
for i := uint(0); i < nbits; i++ {
bit := (scalar[i>>3] >> (i & 7) & 1)
swap := prevBit ^ bit
prevBit = bit
op.CondSwap(&R1.X, &R1.Z, &R2.X, &R2.Z, swap)
R0, R2 = c.xDblAdd(&R0, &R2, &R1, &aPlus2Over4)
}
op.CondSwap(&R1.X, &R1.Z, &R2.X, &R2.Z, prevBit)
return R1
}
// Convert the input to wire format.
//
// The output byte slice must be at least 2*bytelen(p) bytes long.
func (c *CurveOperations) Fp2ToBytes(output []byte, fp2 *Fp2Element) {
if len(output) < 2*c.Params.Bytelen {
panic("output byte slice too short")
}
var a Fp2Element
c.Params.Op.FromMontgomery(fp2, &a)
// convert to bytes in little endian form
for i := 0; i < c.Params.Bytelen; i++ {
// set i = j*8 + k
fp2 := i / 8
k := uint64(i % 8)
output[i] = byte(a.A[fp2] >> (8 * k))
output[i+c.Params.Bytelen] = byte(a.B[fp2] >> (8 * k))
}
}
// Read 2*bytelen(p) bytes into the given ExtensionFieldElement.
//
// It is an error to call this function if the input byte slice is less than 2*bytelen(p) bytes long.
func (c *CurveOperations) Fp2FromBytes(fp2 *Fp2Element, input []byte) {
if len(input) < 2*c.Params.Bytelen {
panic("input byte slice too short")
}
for i := 0; i < c.Params.Bytelen; i++ {
j := i / 8
k := uint64(i % 8)
fp2.A[j] |= uint64(input[i]) << (8 * k)
fp2.B[j] |= uint64(input[i+c.Params.Bytelen]) << (8 * k)
}
c.Params.Op.ToMontgomery(fp2)
}
/* -------------------------------------------------------------------------
Mechnisms used for isogeny calculations
-------------------------------------------------------------------------*/
// Constructs isogeny3 objects
func Newisogeny3(op FieldOps) Isogeny {
return &isogeny3{Field: op}
}
// Constructs isogeny4 objects
func Newisogeny4(op FieldOps) Isogeny {
return &isogeny4{isogeny3: isogeny3{Field: op}}
}
// Given a three-torsion point p = x(PB) on the curve E_(A:C), construct the
// three-isogeny phi : E_(A:C) -> E_(A:C)/<P_3> = E_(A':C').
//
// Input: (XP_3: ZP_3), where P_3 has exact order 3 on E_A/C
// Output: * Curve coordinates (A' + 2C', A' - 2C') corresponding to E_A'/C' = A_E/C/<P3>
// * Isogeny phi with constants in F_p^2
func (phi *isogeny3) GenerateCurve(p *ProjectivePoint) CurveCoefficientsEquiv {
var t0, t1, t2, t3, t4 Fp2Element
var coefEq CurveCoefficientsEquiv
var K1, K2 = &phi.K1, &phi.K2
op := phi.Field
op.Sub(K1, &p.X, &p.Z) // K1 = XP3 - ZP3
op.Square(&t0, K1) // t0 = K1^2
op.Add(K2, &p.X, &p.Z) // K2 = XP3 + ZP3
op.Square(&t1, K2) // t1 = K2^2
op.Add(&t2, &t0, &t1) // t2 = t0 + t1
op.Add(&t3, K1, K2) // t3 = K1 + K2
op.Square(&t3, &t3) // t3 = t3^2
op.Sub(&t3, &t3, &t2) // t3 = t3 - t2
op.Add(&t2, &t1, &t3) // t2 = t1 + t3
op.Add(&t3, &t3, &t0) // t3 = t3 + t0
op.Add(&t4, &t3, &t0) // t4 = t3 + t0
op.Add(&t4, &t4, &t4) // t4 = t4 + t4
op.Add(&t4, &t1, &t4) // t4 = t1 + t4
op.Mul(&coefEq.C, &t2, &t4) // A24m = t2 * t4
op.Add(&t4, &t1, &t2) // t4 = t1 + t2
op.Add(&t4, &t4, &t4) // t4 = t4 + t4
op.Add(&t4, &t0, &t4) // t4 = t0 + t4
op.Mul(&t4, &t3, &t4) // t4 = t3 * t4
op.Sub(&t0, &t4, &coefEq.C) // t0 = t4 - A24m
op.Add(&coefEq.A, &coefEq.C, &t0) // A24p = A24m + t0
return coefEq
}
// Given a 3-isogeny phi and a point pB = x(PB), compute x(QB), the x-coordinate
// of the image QB = phi(PB) of PB under phi : E_(A:C) -> E_(A':C').
//
// The output xQ = x(Q) is then a point on the curve E_(A':C'); the curve
// parameters are returned by the GenerateCurve function used to construct phi.
func (phi *isogeny3) EvaluatePoint(p *ProjectivePoint) ProjectivePoint {
var t0, t1, t2 Fp2Element
var q ProjectivePoint
var K1, K2 = &phi.K1, &phi.K2
var px, pz = &p.X, &p.Z
op := phi.Field
op.Add(&t0, px, pz) // t0 = XQ + ZQ
op.Sub(&t1, px, pz) // t1 = XQ - ZQ
op.Mul(&t0, K1, &t0) // t2 = K1 * t0
op.Mul(&t1, K2, &t1) // t1 = K2 * t1
op.Add(&t2, &t0, &t1) // t2 = t0 + t1
op.Sub(&t0, &t1, &t0) // t0 = t1 - t0
op.Square(&t2, &t2) // t2 = t2 ^ 2
op.Square(&t0, &t0) // t0 = t0 ^ 2
op.Mul(&q.X, px, &t2) // XQ'= XQ * t2
op.Mul(&q.Z, pz, &t0) // ZQ'= ZQ * t0
return q
}
// Given a four-torsion point p = x(PB) on the curve E_(A:C), construct the
// four-isogeny phi : E_(A:C) -> E_(A:C)/<P_4> = E_(A':C').
//
// Input: (XP_4: ZP_4), where P_4 has exact order 4 on E_A/C
// Output: * Curve coordinates (A' + 2C', 4C') corresponding to E_A'/C' = A_E/C/<P4>
// * Isogeny phi with constants in F_p^2
func (phi *isogeny4) GenerateCurve(p *ProjectivePoint) CurveCoefficientsEquiv {
var coefEq CurveCoefficientsEquiv
var xp4, zp4 = &p.X, &p.Z
var K1, K2, K3 = &phi.K1, &phi.K2, &phi.K3
op := phi.Field
op.Sub(K2, xp4, zp4)
op.Add(K3, xp4, zp4)
op.Square(K1, zp4)
op.Add(K1, K1, K1)
op.Square(&coefEq.C, K1)
op.Add(K1, K1, K1)
op.Square(&coefEq.A, xp4)
op.Add(&coefEq.A, &coefEq.A, &coefEq.A)
op.Square(&coefEq.A, &coefEq.A)
return coefEq
}
// Given a 4-isogeny phi and a point xP = x(P), compute x(Q), the x-coordinate
// of the image Q = phi(P) of P under phi : E_(A:C) -> E_(A':C').
//
// Input: Isogeny returned by GenerateCurve and point q=(Qx,Qz) from E0_A/C
// Output: Corresponding point q from E1_A'/C', where E1 is 4-isogenous to E0
func (phi *isogeny4) EvaluatePoint(p *ProjectivePoint) ProjectivePoint {
var t0, t1 Fp2Element
var q = *p
var xq, zq = &q.X, &q.Z
var K1, K2, K3 = &phi.K1, &phi.K2, &phi.K3
op := phi.Field
op.Add(&t0, xq, zq)
op.Sub(&t1, xq, zq)
op.Mul(xq, &t0, K2)
op.Mul(zq, &t1, K3)
op.Mul(&t0, &t0, &t1)
op.Mul(&t0, &t0, K1)
op.Add(&t1, xq, zq)
op.Sub(zq, xq, zq)
op.Square(&t1, &t1)
op.Square(zq, zq)
op.Add(xq, &t0, &t1)
op.Sub(&t0, zq, &t0)
op.Mul(xq, xq, &t1)
op.Mul(zq, zq, &t0)
return q
}
/* -------------------------------------------------------------------------
Utils
-------------------------------------------------------------------------*/
func (point *ProjectivePoint) ToAffine(c *CurveOperations) *Fp2Element {
var affine_x Fp2Element
c.Params.Op.Inv(&affine_x, &point.Z)
c.Params.Op.Mul(&affine_x, &affine_x, &point.X)
return &affine_x
}
// Cleans data in fp
func (fp *Fp2Element) Zeroize() {
// Zeroizing in 2 separated loops tells compiler to
// use fast runtime.memclr()
for i := range fp.A {
fp.A[i] = 0
}
for i := range fp.B {
fp.B[i] = 0
}
}

View File

@ -1,140 +0,0 @@
package internal
const (
FP_MAX_WORDS = 12 // Currently p751.NumWords
)
// Representation of an element of the base field F_p.
//
// No particular meaning is assigned to the representation -- it could represent
// an element in Montgomery form, or not. Tracking the meaning of the field
// element is left to higher types.
type FpElement [FP_MAX_WORDS]uint64
// Represents an intermediate product of two elements of the base field F_p.
type FpElementX2 [2 * FP_MAX_WORDS]uint64
// Represents an element of the extended field Fp^2 = Fp(x+i)
type Fp2Element struct {
A FpElement
B FpElement
}
type DomainParams struct {
// P, Q and R=P-Q base points
Affine_P, Affine_Q, Affine_R Fp2Element
// Size of a compuatation strategy for x-torsion group
IsogenyStrategy []uint32
// Max size of secret key for x-torsion group
SecretBitLen uint
// Max size of secret key for x-torsion group
SecretByteLen uint
}
type SidhParams struct {
Id uint8
// Bytelen of P
Bytelen int
// The public key size, in bytes.
PublicKeySize int
// The shared secret size, in bytes.
SharedSecretSize uint
// 2- and 3-torsion group parameter definitions
A, B DomainParams
// Precomputed identity element in the Fp2 in Montgomery domain
OneFp2 Fp2Element
// Precomputed 1/2 in the Fp2 in Montgomery domain
HalfFp2 Fp2Element
// Length of SIKE secret message. Must be one of {24,32,40},
// depending on size of prime field used (see [SIKE], 1.4 and 5.1)
MsgLen uint
// Length of SIKE ephemeral KEM key (see [SIKE], 1.4 and 5.1)
KemSize uint
// Access to field arithmetic
Op FieldOps
}
// Interface for working with isogenies.
type Isogeny interface {
// Given a torsion point on a curve computes isogenous curve.
// Returns curve coefficients (A:C), so that E_(A/C) = E_(A/C)/<P>,
// where P is a provided projective point. Sets also isogeny constants
// that are needed for isogeny evaluation.
GenerateCurve(*ProjectivePoint) CurveCoefficientsEquiv
// Evaluates isogeny at caller provided point. Requires isogeny curve constants
// to be earlier computed by GenerateCurve.
EvaluatePoint(*ProjectivePoint) ProjectivePoint
}
// Stores curve projective parameters equivalent to A/C. Meaning of the
// values depends on the context. When working with isogenies over
// subgroup that are powers of:
// * three then (A:C) ~ (A+2C:A-2C)
// * four then (A:C) ~ (A+2C: 4C)
// See Appendix A of SIKE for more details
type CurveCoefficientsEquiv struct {
A Fp2Element
C Fp2Element
}
// A point on the projective line P^1(F_{p^2}).
//
// This represents a point on the Kummer line of a Montgomery curve. The
// curve is specified by a ProjectiveCurveParameters struct.
type ProjectivePoint struct {
X Fp2Element
Z Fp2Element
}
// A point on the projective line P^1(F_{p^2}).
//
// This is used to work projectively with the curve coefficients.
type ProjectiveCurveParameters struct {
A Fp2Element
C Fp2Element
}
// Stores Isogeny 3 curve constants
type isogeny3 struct {
Field FieldOps
K1 Fp2Element
K2 Fp2Element
}
// Stores Isogeny 4 curve constants
type isogeny4 struct {
isogeny3
K3 Fp2Element
}
type FieldOps interface {
// Set res = lhs + rhs.
//
// Allowed to overlap lhs or rhs with res.
Add(res, lhs, rhs *Fp2Element)
// Set res = lhs - rhs.
//
// Allowed to overlap lhs or rhs with res.
Sub(res, lhs, rhs *Fp2Element)
// Set res = lhs * rhs.
//
// Allowed to overlap lhs or rhs with res.
Mul(res, lhs, rhs *Fp2Element)
// Set res = x * x
//
// Allowed to overlap res with x.
Square(res, x *Fp2Element)
// Set res = 1/x
//
// Allowed to overlap res with x.
Inv(res, x *Fp2Element)
// If choice = 1u8, set (x,y) = (y,x). If choice = 0u8, set (x,y) = (x,y).
CondSwap(xPx, xPz, xQx, xQz *Fp2Element, choice uint8)
// Converts Fp2Element to Montgomery domain (x*R mod p)
ToMontgomery(x *Fp2Element)
// Converts 'a' in montgomery domain to element from Fp2Element
// and stores it in 'x'
FromMontgomery(x *Fp2Element, a *Fp2Element)
}

View File

@ -1,11 +0,0 @@
package utils
type x86 struct {
// Signals support for MULX which is in BMI2
HasBMI2 bool
// Signals support for ADX
HasADX bool
}
var X86 x86

View File

@ -1,29 +0,0 @@
// +build amd64,!noasm
// Sets capabilities flags for x86 according to information received from
// CPUID. It was written in accordance with
// "Intel® 64 and IA-32 Architectures Developer's Manual: Vol. 2A".
// https://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-vol-2a-manual.html
package utils
// Performs CPUID and returns values of registers
// go:nosplit
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// Returns true in case bit 'n' in 'bits' is set, otherwise false
func bitn(bits uint32, n uint8) bool {
return (bits>>n)&1 == 1
}
func init() {
// CPUID returns max possible input that can be requested
max, _, _, _ := cpuid(0, 0)
if max < 7 {
return
}
_, ebx, _, _ := cpuid(7, 0)
X86.HasBMI2 = bitn(ebx, 8)
X86.HasADX = bitn(ebx, 19)
}

View File

@ -1,13 +0,0 @@
// +build amd64,!noasm
#include "textflag.h"
TEXT ·cpuid(SB), NOSPLIT, $0-4
MOVL eaxArg+0(FP), AX
MOVL ecxArg+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET

File diff suppressed because it is too large Load Diff

View File

@ -1,802 +0,0 @@
// +build arm64,!noasm
#include "textflag.h"
TEXT ·fp503ConditionalSwap(SB), NOSPLIT, $0-17
MOVD x+0(FP), R0
MOVD y+8(FP), R1
MOVB choice+16(FP), R2
// Set flags
// If choice is not 0 or 1, this implementation will swap completely
CMP $0, R2
LDP 0(R0), (R3, R4)
LDP 0(R1), (R5, R6)
CSEL EQ, R3, R5, R7
CSEL EQ, R4, R6, R8
STP (R7, R8), 0(R0)
CSEL NE, R3, R5, R9
CSEL NE, R4, R6, R10
STP (R9, R10), 0(R1)
LDP 16(R0), (R3, R4)
LDP 16(R1), (R5, R6)
CSEL EQ, R3, R5, R7
CSEL EQ, R4, R6, R8
STP (R7, R8), 16(R0)
CSEL NE, R3, R5, R9
CSEL NE, R4, R6, R10
STP (R9, R10), 16(R1)
LDP 32(R0), (R3, R4)
LDP 32(R1), (R5, R6)
CSEL EQ, R3, R5, R7
CSEL EQ, R4, R6, R8
STP (R7, R8), 32(R0)
CSEL NE, R3, R5, R9
CSEL NE, R4, R6, R10
STP (R9, R10), 32(R1)
LDP 48(R0), (R3, R4)
LDP 48(R1), (R5, R6)
CSEL EQ, R3, R5, R7
CSEL EQ, R4, R6, R8
STP (R7, R8), 48(R0)
CSEL NE, R3, R5, R9
CSEL NE, R4, R6, R10
STP (R9, R10), 48(R1)
RET
TEXT ·fp503AddReduced(SB), NOSPLIT, $0-24
MOVD z+0(FP), R2
MOVD x+8(FP), R0
MOVD y+16(FP), R1
// Load first summand into R3-R10
// Add first summand and second summand and store result in R3-R10
LDP 0(R0), (R3, R4)
LDP 0(R1), (R11, R12)
LDP 16(R0), (R5, R6)
LDP 16(R1), (R13, R14)
ADDS R11, R3
ADCS R12, R4
ADCS R13, R5
ADCS R14, R6
LDP 32(R0), (R7, R8)
LDP 32(R1), (R11, R12)
LDP 48(R0), (R9, R10)
LDP 48(R1), (R13, R14)
ADCS R11, R7
ADCS R12, R8
ADCS R13, R9
ADC R14, R10
// Subtract 2 * p503 in R11-R17 from the result in R3-R10
LDP ·p503x2+0(SB), (R11, R12)
LDP ·p503x2+24(SB), (R13, R14)
SUBS R11, R3
SBCS R12, R4
LDP ·p503x2+40(SB), (R15, R16)
SBCS R12, R5
SBCS R13, R6
MOVD ·p503x2+56(SB), R17
SBCS R14, R7
SBCS R15, R8
SBCS R16, R9
SBCS R17, R10
SBC ZR, ZR, R19
// If x + y - 2 * p503 < 0, R19 is 1 and 2 * p503 should be added
AND R19, R11
AND R19, R12
AND R19, R13
AND R19, R14
AND R19, R15
AND R19, R16
AND R19, R17
ADDS R11, R3
ADCS R12, R4
STP (R3, R4), 0(R2)
ADCS R12, R5
ADCS R13, R6
STP (R5, R6), 16(R2)
ADCS R14, R7
ADCS R15, R8
STP (R7, R8), 32(R2)
ADCS R16, R9
ADC R17, R10
STP (R9, R10), 48(R2)
RET
TEXT ·fp503SubReduced(SB), NOSPLIT, $0-24
MOVD z+0(FP), R2
MOVD x+8(FP), R0
MOVD y+16(FP), R1
// Load x into R3-R10
// Subtract y from x and store result in R3-R10
LDP 0(R0), (R3, R4)
LDP 0(R1), (R11, R12)
LDP 16(R0), (R5, R6)
LDP 16(R1), (R13, R14)
SUBS R11, R3
SBCS R12, R4
SBCS R13, R5
SBCS R14, R6
LDP 32(R0), (R7, R8)
LDP 32(R1), (R11, R12)
LDP 48(R0), (R9, R10)
LDP 48(R1), (R13, R14)
SBCS R11, R7
SBCS R12, R8
SBCS R13, R9
SBCS R14, R10
SBC ZR, ZR, R19
// If x - y < 0, R19 is 1 and 2 * p503 should be added
LDP ·p503x2+0(SB), (R11, R12)
LDP ·p503x2+24(SB), (R13, R14)
AND R19, R11
AND R19, R12
LDP ·p503x2+40(SB), (R15, R16)
AND R19, R13
AND R19, R14
MOVD ·p503x2+56(SB), R17
AND R19, R15
AND R19, R16
AND R19, R17
ADDS R11, R3
ADCS R12, R4
STP (R3, R4), 0(R2)
ADCS R12, R5
ADCS R13, R6
STP (R5, R6), 16(R2)
ADCS R14, R7
ADCS R15, R8
STP (R7, R8), 32(R2)
ADCS R16, R9
ADC R17, R10
STP (R9, R10), 48(R2)
RET
TEXT ·fp503AddLazy(SB), NOSPLIT, $0-24
MOVD z+0(FP), R2
MOVD x+8(FP), R0
MOVD y+16(FP), R1
// Load first summand into R3-R10
// Add first summand and second summand and store result in R3-R10
LDP 0(R0), (R3, R4)
LDP 0(R1), (R11, R12)
LDP 16(R0), (R5, R6)
LDP 16(R1), (R13, R14)
ADDS R11, R3
ADCS R12, R4
STP (R3, R4), 0(R2)
ADCS R13, R5
ADCS R14, R6
STP (R5, R6), 16(R2)
LDP 32(R0), (R7, R8)
LDP 32(R1), (R11, R12)
LDP 48(R0), (R9, R10)
LDP 48(R1), (R13, R14)
ADCS R11, R7
ADCS R12, R8
STP (R7, R8), 32(R2)
ADCS R13, R9
ADC R14, R10
STP (R9, R10), 48(R2)
RET
TEXT ·fp503X2AddLazy(SB), NOSPLIT, $0-24
MOVD z+0(FP), R2
MOVD x+8(FP), R0
MOVD y+16(FP), R1
LDP 0(R0), (R3, R4)
LDP 0(R1), (R11, R12)
LDP 16(R0), (R5, R6)
LDP 16(R1), (R13, R14)
ADDS R11, R3
ADCS R12, R4
STP (R3, R4), 0(R2)
ADCS R13, R5
ADCS R14, R6
STP (R5, R6), 16(R2)
LDP 32(R0), (R7, R8)
LDP 32(R1), (R11, R12)
LDP 48(R0), (R9, R10)
LDP 48(R1), (R13, R14)
ADCS R11, R7
ADCS R12, R8
STP (R7, R8), 32(R2)
ADCS R13, R9
ADCS R14, R10
STP (R9, R10), 48(R2)
LDP 64(R0), (R3, R4)
LDP 64(R1), (R11, R12)
LDP 80(R0), (R5, R6)
LDP 80(R1), (R13, R14)
ADCS R11, R3
ADCS R12, R4
STP (R3, R4), 64(R2)
ADCS R13, R5
ADCS R14, R6
STP (R5, R6), 80(R2)
LDP 96(R0), (R7, R8)
LDP 96(R1), (R11, R12)
LDP 112(R0), (R9, R10)
LDP 112(R1), (R13, R14)
ADCS R11, R7
ADCS R12, R8
STP (R7, R8), 96(R2)
ADCS R13, R9
ADC R14, R10
STP (R9, R10), 112(R2)
RET
TEXT ·fp503X2SubLazy(SB), NOSPLIT, $0-24
MOVD z+0(FP), R2
MOVD x+8(FP), R0
MOVD y+16(FP), R1
LDP 0(R0), (R3, R4)
LDP 0(R1), (R11, R12)
LDP 16(R0), (R5, R6)
LDP 16(R1), (R13, R14)
SUBS R11, R3
SBCS R12, R4
STP (R3, R4), 0(R2)
SBCS R13, R5
SBCS R14, R6
STP (R5, R6), 16(R2)
LDP 32(R0), (R7, R8)
LDP 32(R1), (R11, R12)
LDP 48(R0), (R9, R10)
LDP 48(R1), (R13, R14)
SBCS R11, R7
SBCS R12, R8
STP (R7, R8), 32(R2)
SBCS R13, R9
SBCS R14, R10
STP (R9, R10), 48(R2)
LDP 64(R0), (R3, R4)
LDP 64(R1), (R11, R12)
LDP 80(R0), (R5, R6)
LDP 80(R1), (R13, R14)
SBCS R11, R3
SBCS R12, R4
SBCS R13, R5
SBCS R14, R6
LDP 96(R0), (R7, R8)
LDP 96(R1), (R11, R12)
LDP 112(R0), (R9, R10)
LDP 112(R1), (R13, R14)
SBCS R11, R7
SBCS R12, R8
SBCS R13, R9
SBCS R14, R10
SBC ZR, ZR, R15
// If x - y < 0, R15 is 1 and p503 should be added
LDP ·p503+16(SB), (R16, R17)
LDP ·p503+32(SB), (R19, R20)
AND R15, R16
AND R15, R17
LDP ·p503+48(SB), (R21, R22)
AND R15, R19
AND R15, R20
AND R15, R21
AND R15, R22
ADDS R16, R3
ADCS R16, R4
STP (R3, R4), 64(R2)
ADCS R16, R5
ADCS R17, R6
STP (R5, R6), 80(R2)
ADCS R19, R7
ADCS R20, R8
STP (R7, R8), 96(R2)
ADCS R21, R9
ADC R22, R10
STP (R9, R10), 112(R2)
RET
// Expects that X0*Y0 is already in Z0(low),Z3(high) and X0*Y1 in Z1(low),Z2(high)
// Z0 is not actually touched
// Result of (X0-X1) * (Y0-Y1) will be in Z0-Z3
// Inputs get overwritten, except for X1
#define mul128x128comba(X0, X1, Y0, Y1, Z0, Z1, Z2, Z3, T0) \
MUL X1, Y0, X0 \
UMULH X1, Y0, Y0 \
ADDS Z3, Z1 \
ADC ZR, Z2 \
\
MUL Y1, X1, T0 \
UMULH Y1, X1, Y1 \
ADDS X0, Z1 \
ADCS Y0, Z2 \
ADC ZR, ZR, Z3 \
\
ADDS T0, Z2 \
ADC Y1, Z3
// Expects that X points to (X0-X1)
// Result of (X0-X3) * (Y0-Y3) will be in Z0-Z7
// Inputs get overwritten, except X2-X3 and Y2-Y3
#define mul256x256karatsuba(X, X0, X1, X2, X3, Y0, Y1, Y2, Y3, Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7, T0, T1)\
ADDS X2, X0 \ // xH + xL, destroys xL
ADCS X3, X1 \
ADCS ZR, ZR, T0 \
\
ADDS Y2, Y0, Z6 \ // yH + yL
ADCS Y3, Y1, T1 \
ADC ZR, ZR, Z7 \
\
SUB T0, ZR, Z2 \
SUB Z7, ZR, Z3 \
AND Z7, T0 \ // combined carry
\
AND Z2, Z6, Z0 \ // masked(yH + yL)
AND Z2, T1, Z1 \
\
AND Z3, X0, Z4 \ // masked(xH + xL)
AND Z3, X1, Z5 \
\
MUL Z6, X0, Z2 \
MUL T1, X0, Z3 \
\
ADDS Z4, Z0 \
UMULH T1, X0, Z4 \
ADCS Z5, Z1 \
UMULH Z6, X0, Z5 \
ADC ZR, T0 \
\ // (xH + xL) * (yH + yL)
mul128x128comba(X0, X1, Z6, T1, Z2, Z3, Z4, Z5, Z7)\
\
LDP 0+X, (X0, X1) \
\
ADDS Z0, Z4 \
UMULH Y0, X0, Z7 \
UMULH Y1, X0, T1 \
ADCS Z1, Z5 \
MUL Y0, X0, Z0 \
MUL Y1, X0, Z1 \
ADC ZR, T0 \
\ // xL * yL
mul128x128comba(X0, X1, Y0, Y1, Z0, Z1, T1, Z7, Z6)\
\
MUL Y2, X2, X0 \
UMULH Y2, X2, Y0 \
SUBS Z0, Z2 \ // (xH + xL) * (yH + yL) - xL * yL
SBCS Z1, Z3 \
SBCS T1, Z4 \
MUL Y3, X2, X1 \
UMULH Y3, X2, Z6 \
SBCS Z7, Z5 \
SBCS ZR, T0 \
\ // xH * yH
mul128x128comba(X2, X3, Y2, Y3, X0, X1, Z6, Y0, Y1)\
\
SUBS X0, Z2 \ // (xH + xL) * (yH + yL) - xL * yL - xH * yH
SBCS X1, Z3 \
SBCS Z6, Z4 \
SBCS Y0, Z5 \
SBCS ZR, T0 \
\
ADDS T1, Z2 \ // (xH * yH) * 2^256 + ((xH + xL) * (yH + yL) - xL * yL - xH * yH) * 2^128 + xL * yL
ADCS Z7, Z3 \
ADCS X0, Z4 \
ADCS X1, Z5 \
ADCS T0, Z6 \
ADC Y0, ZR, Z7
// This implements two-level Karatsuba with a 128x128 Comba multiplier
// at the bottom
TEXT ·fp503Mul(SB), NOSPLIT, $0-24
MOVD z+0(FP), R2
MOVD x+8(FP), R0
MOVD y+16(FP), R1
// Load xL in R3-R6, xH in R7-R10
// (xH + xL) in R25-R29
LDP 0(R0), (R3, R4)
LDP 32(R0), (R7, R8)
ADDS R3, R7, R25
ADCS R4, R8, R26
LDP 16(R0), (R5, R6)
LDP 48(R0), (R9, R10)
ADCS R5, R9, R27
ADCS R6, R10, R29
ADC ZR, ZR, R7
// Load yL in R11-R14, yH in R15-19
// (yH + yL) in R11-R14, destroys yL
LDP 0(R1), (R11, R12)
LDP 32(R1), (R15, R16)
ADDS R15, R11
ADCS R16, R12
LDP 16(R1), (R13, R14)
LDP 48(R1), (R17, R19)
ADCS R17, R13
ADCS R19, R14
ADC ZR, ZR, R8
// Compute maskes and combined carry
SUB R7, ZR, R9
SUB R8, ZR, R10
AND R8, R7
// masked(yH + yL)
AND R9, R11, R15
AND R9, R12, R16
AND R9, R13, R17
AND R9, R14, R19
// masked(xH + xL)
AND R10, R25, R20
AND R10, R26, R21
AND R10, R27, R22
AND R10, R29, R23
// masked(xH + xL) + masked(yH + yL) in R15-R19
ADDS R20, R15
ADCS R21, R16
ADCS R22, R17
ADCS R23, R19
ADC ZR, R7
// Use z as temporary storage
STP (R25, R26), 0(R2)
// (xH + xL) * (yH + yL)
mul256x256karatsuba(0(R2), R25, R26, R27, R29, R11, R12, R13, R14, R8, R9, R10, R20, R21, R22, R23, R24, R0, R1)
MOVD x+8(FP), R0
MOVD y+16(FP), R1
ADDS R21, R15
ADCS R22, R16
ADCS R23, R17
ADCS R24, R19
ADC ZR, R7
// Load yL in R11-R14
LDP 0(R1), (R11, R12)
LDP 16(R1), (R13, R14)
// xL * yL
mul256x256karatsuba(0(R0), R3, R4, R5, R6, R11, R12, R13, R14, R21, R22, R23, R24, R25, R26, R27, R29, R1, R2)
MOVD z+0(FP), R2
MOVD y+16(FP), R1
// (xH + xL) * (yH + yL) - xL * yL
SUBS R21, R8
SBCS R22, R9
STP (R21, R22), 0(R2)
SBCS R23, R10
SBCS R24, R20
STP (R23, R24), 16(R2)
SBCS R25, R15
SBCS R26, R16
SBCS R27, R17
SBCS R29, R19
SBC ZR, R7
// Load xH in R3-R6, yH in R11-R14
LDP 32(R0), (R3, R4)
LDP 48(R0), (R5, R6)
LDP 32(R1), (R11, R12)
LDP 48(R1), (R13, R14)
ADDS R25, R8
ADCS R26, R9
ADCS R27, R10
ADCS R29, R20
ADC ZR, ZR, R1
MOVD R20, 32(R2)
// xH * yH
mul256x256karatsuba(32(R0), R3, R4, R5, R6, R11, R12, R13, R14, R21, R22, R23, R24, R25, R26, R27, R29, R2, R20)
NEG R1, R1
MOVD z+0(FP), R2
MOVD 32(R2), R20
// (xH + xL) * (yH + yL) - xL * yL - xH * yH in R8-R10,R20,R15-R19
// Store lower half in z, that's done
SUBS R21, R8
SBCS R22, R9
STP (R8, R9), 32(R2)
SBCS R23, R10
SBCS R24, R20
STP (R10, R20), 48(R2)
SBCS R25, R15
SBCS R26, R16
SBCS R27, R17
SBCS R29, R19
SBC ZR, R7
// (xH * yH) * 2^512 + ((xH + xL) * (yH + yL) - xL * yL - xH * yH) * 2^256 + xL * yL
// Store remaining limbs in z
ADDS $1, R1
ADCS R21, R15
ADCS R22, R16
STP (R15, R16), 64(R2)
ADCS R23, R17
ADCS R24, R19
STP (R17, R19), 80(R2)
ADCS R7, R25
ADCS ZR, R26
STP (R25, R26), 96(R2)
ADCS ZR, R27
ADC ZR, R29
STP (R27, R29), 112(R2)
RET
// Expects that X0*Y0 is already in Z0(low),Z3(high) and X0*Y1 in Z1(low),Z2(high)
// Z0 is not actually touched
// Result of (X0-X1) * (Y0-Y3) will be in Z0-Z5
// Inputs remain intact
#define mul128x256comba(X0, X1, Y0, Y1, Y2, Y3, Z0, Z1, Z2, Z3, Z4, Z5, T0, T1, T2, T3)\
MUL X1, Y0, T0 \
UMULH X1, Y0, T1 \
ADDS Z3, Z1 \
ADC ZR, Z2 \
\
MUL X0, Y2, T2 \
UMULH X0, Y2, T3 \
ADDS T0, Z1 \
ADCS T1, Z2 \
ADC ZR, ZR, Z3 \
\
MUL X1, Y1, T0 \
UMULH X1, Y1, T1 \
ADDS T2, Z2 \
ADCS T3, Z3 \
ADC ZR, ZR, Z4 \
\
MUL X0, Y3, T2 \
UMULH X0, Y3, T3 \
ADDS T0, Z2 \
ADCS T1, Z3 \
ADC ZR, Z4 \
\
MUL X1, Y2, T0 \
UMULH X1, Y2, T1 \
ADDS T2, Z3 \
ADCS T3, Z4 \
ADC ZR, ZR, Z5 \
\
MUL X1, Y3, T2 \
UMULH X1, Y3, T3 \
ADDS T0, Z3 \
ADCS T1, Z4 \
ADC ZR, Z5 \
ADDS T2, Z4 \
ADC T3, Z5
// This implements the shifted 2^(B*w) Montgomery reduction from
// https://eprint.iacr.org/2016/986.pdf, section Section 3.2, with
// B = 4, w = 64. Performance results were reported in
// https://eprint.iacr.org/2018/700.pdf Section 6.
TEXT ·fp503MontgomeryReduce(SB), NOSPLIT, $0-16
MOVD x+8(FP), R0
// Load x0-x1
LDP 0(R0), (R2, R3)
// Load the prime constant in R25-R29
LDP ·p503p1s8+32(SB), (R25, R26)
LDP ·p503p1s8+48(SB), (R27, R29)
// [x0,x1] * p503p1s8 to R4-R9
MUL R2, R25, R4 // x0 * p503p1s8[0]
UMULH R2, R25, R7
MUL R2, R26, R5 // x0 * p503p1s8[1]
UMULH R2, R26, R6
mul128x256comba(R2, R3, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13)
LDP 16(R0), (R3, R11) // x2
LDP 32(R0), (R12, R13)
LDP 48(R0), (R14, R15)
// Left-shift result in R4-R9 by 56 to R4-R10
ORR R9>>8, ZR, R10
LSL $56, R9
ORR R8>>8, R9
LSL $56, R8
ORR R7>>8, R8
LSL $56, R7
ORR R6>>8, R7
LSL $56, R6
ORR R5>>8, R6
LSL $56, R5
ORR R4>>8, R5
LSL $56, R4
ADDS R4, R11 // x3
ADCS R5, R12 // x4
ADCS R6, R13
ADCS R7, R14
ADCS R8, R15
LDP 64(R0), (R16, R17)
LDP 80(R0), (R19, R20)
MUL R3, R25, R4 // x2 * p503p1s8[0]
UMULH R3, R25, R7
ADCS R9, R16
ADCS R10, R17
ADCS ZR, R19
ADCS ZR, R20
LDP 96(R0), (R21, R22)
LDP 112(R0), (R23, R24)
MUL R3, R26, R5 // x2 * p503p1s8[1]
UMULH R3, R26, R6
ADCS ZR, R21
ADCS ZR, R22
ADCS ZR, R23
ADC ZR, R24
// [x2,x3] * p503p1s8 to R4-R9
mul128x256comba(R3, R11, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R0, R1, R2)
ORR R9>>8, ZR, R10
LSL $56, R9
ORR R8>>8, R9
LSL $56, R8
ORR R7>>8, R8
LSL $56, R7
ORR R6>>8, R7
LSL $56, R6
ORR R5>>8, R6
LSL $56, R5
ORR R4>>8, R5
LSL $56, R4
ADDS R4, R13 // x5
ADCS R5, R14 // x6
ADCS R6, R15
ADCS R7, R16
MUL R12, R25, R4 // x4 * p503p1s8[0]
UMULH R12, R25, R7
ADCS R8, R17
ADCS R9, R19
ADCS R10, R20
ADCS ZR, R21
MUL R12, R26, R5 // x4 * p503p1s8[1]
UMULH R12, R26, R6
ADCS ZR, R22
ADCS ZR, R23
ADC ZR, R24
// [x4,x5] * p503p1s8 to R4-R9
mul128x256comba(R12, R13, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R0, R1, R2)
ORR R9>>8, ZR, R10
LSL $56, R9
ORR R8>>8, R9
LSL $56, R8
ORR R7>>8, R8
LSL $56, R7
ORR R6>>8, R7
LSL $56, R6
ORR R5>>8, R6
LSL $56, R5
ORR R4>>8, R5
LSL $56, R4
ADDS R4, R15 // x7
ADCS R5, R16 // x8
ADCS R6, R17
ADCS R7, R19
MUL R14, R25, R4 // x6 * p503p1s8[0]
UMULH R14, R25, R7
ADCS R8, R20
ADCS R9, R21
ADCS R10, R22
MUL R14, R26, R5 // x6 * p503p1s8[1]
UMULH R14, R26, R6
ADCS ZR, R23
ADC ZR, R24
// [x6,x7] * p503p1s8 to R4-R9
mul128x256comba(R14, R15, R25, R26, R27, R29, R4, R5, R6, R7, R8, R9, R10, R0, R1, R2)
ORR R9>>8, ZR, R10
LSL $56, R9
ORR R8>>8, R9
LSL $56, R8
ORR R7>>8, R8
LSL $56, R7
ORR R6>>8, R7
LSL $56, R6
ORR R5>>8, R6
LSL $56, R5
ORR R4>>8, R5
LSL $56, R4
MOVD z+0(FP), R0
ADDS R4, R17
ADCS R5, R19
STP (R16, R17), 0(R0) // Store final result to z
ADCS R6, R20
ADCS R7, R21
STP (R19, R20), 16(R0)
ADCS R8, R22
ADCS R9, R23
STP (R21, R22), 32(R0)
ADC R10, R24
STP (R23, R24), 48(R0)
RET
TEXT ·fp503StrongReduce(SB), NOSPLIT, $0-8
MOVD x+0(FP), R0
// Keep x in R1-R8, p503 in R9-R14, subtract to R1-R8
LDP ·p503+16(SB), (R9, R10)
LDP 0(R0), (R1, R2)
LDP 16(R0), (R3, R4)
SUBS R9, R1
SBCS R9, R2
LDP 32(R0), (R5, R6)
LDP ·p503+32(SB), (R11, R12)
SBCS R9, R3
SBCS R10, R4
LDP 48(R0), (R7, R8)
LDP ·p503+48(SB), (R13, R14)
SBCS R11, R5
SBCS R12, R6
SBCS R13, R7
SBCS R14, R8
SBC ZR, ZR, R15
// Mask with the borrow and add p503
AND R15, R9
AND R15, R10
AND R15, R11
AND R15, R12
AND R15, R13
AND R15, R14
ADDS R9, R1
ADCS R9, R2
STP (R1, R2), 0(R0)
ADCS R9, R3
ADCS R10, R4
STP (R3, R4), 16(R0)
ADCS R11, R5
ADCS R12, R6
STP (R5, R6), 32(R0)
ADCS R13, R7
ADCS R14, R8
STP (R7, R8), 48(R0)
RET

View File

@ -1,46 +0,0 @@
// +build amd64,!noasm arm64,!noasm
package p503
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
)
// If choice = 0, leave x,y unchanged. If choice = 1, set x,y = y,x.
// If choice is neither 0 nor 1 then behaviour is undefined.
// This function executes in constant time.
//go:noescape
func fp503ConditionalSwap(x, y *FpElement, choice uint8)
// Compute z = x + y (mod p).
//go:noescape
func fp503AddReduced(z, x, y *FpElement)
// Compute z = x - y (mod p).
//go:noescape
func fp503SubReduced(z, x, y *FpElement)
// Compute z = x + y, without reducing mod p.
//go:noescape
func fp503AddLazy(z, x, y *FpElement)
// Compute z = x + y, without reducing mod p.
//go:noescape
func fp503X2AddLazy(z, x, y *FpElementX2)
// Compute z = x - y, without reducing mod p.
//go:noescape
func fp503X2SubLazy(z, x, y *FpElementX2)
// Reduce a field element in [0, 2*p) to one in [0,p).
//go:noescape
func fp503StrongReduce(x *FpElement)
// Computes z = x * y.
//go:noescape
func fp503Mul(z *FpElementX2, x, y *FpElement)
// Computes the Montgomery reduction z = x R^{-1} (mod 2*p). On return value
// of x may be changed. z=x not allowed.
//go:noescape
func fp503MontgomeryReduce(z *FpElement, x *FpElementX2)

View File

@ -1,197 +0,0 @@
// +build noasm !amd64,!arm64
package p503
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/arith"
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
)
// Compute z = x + y (mod p).
func fp503AddReduced(z, x, y *FpElement) {
var carry uint64
// z=x+y % p503
for i := 0; i < NumWords; i++ {
z[i], carry = Addc64(carry, x[i], y[i])
}
// z = z - p503x2
carry = 0
for i := 0; i < NumWords; i++ {
z[i], carry = Subc64(carry, z[i], p503x2[i])
}
// if z<0 add p503x2 back
mask := uint64(0 - carry)
carry = 0
for i := 0; i < NumWords; i++ {
z[i], carry = Addc64(carry, z[i], p503x2[i]&mask)
}
}
// Compute z = x - y (mod p).
func fp503SubReduced(z, x, y *FpElement) {
var borrow uint64
// z = z - p503x2
for i := 0; i < NumWords; i++ {
z[i], borrow = Subc64(borrow, x[i], y[i])
}
// if z<0 add p503x2 back
mask := uint64(0 - borrow)
borrow = 0
for i := 0; i < NumWords; i++ {
z[i], borrow = Addc64(borrow, z[i], p503x2[i]&mask)
}
}
// Conditionally swaps bits in x and y in constant time.
// mask indicates bits to be swapped (set bits are swapped)
// For details see "Hackers Delight, 2.20"
//
// Implementation doesn't actually depend on a prime field.
func fp503ConditionalSwap(x, y *FpElement, mask uint8) {
var tmp, mask64 uint64
mask64 = 0 - uint64(mask)
for i := 0; i < NumWords; i++ {
tmp = mask64 & (x[i] ^ y[i])
x[i] = tmp ^ x[i]
y[i] = tmp ^ y[i]
}
}
// Perform Montgomery reduction: set z = x R^{-1} (mod 2*p)
// with R=2^512. Destroys the input value.
func fp503MontgomeryReduce(z *FpElement, x *FpElementX2) {
var carry, t, u, v uint64
var uv Uint128
var count int
count = 3 // number of 0 digits in the least significant part of p503 + 1
for i := 0; i < NumWords; i++ {
for j := 0; j < i; j++ {
if j < (i - count + 1) {
uv = Mul64(z[j], p503p1[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
}
v, carry = Addc64(0, v, x[i])
u, carry = Addc64(carry, u, 0)
t += carry
z[i] = v
v = u
u = t
t = 0
}
for i := NumWords; i < 2*NumWords-1; i++ {
if count > 0 {
count--
}
for j := i - NumWords + 1; j < NumWords; j++ {
if j < (NumWords - count) {
uv = Mul64(z[j], p503p1[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
}
v, carry = Addc64(0, v, x[i])
u, carry = Addc64(carry, u, 0)
t += carry
z[i-NumWords] = v
v = u
u = t
t = 0
}
v, carry = Addc64(0, v, x[2*NumWords-1])
z[NumWords-1] = v
}
// Compute z = x * y.
func fp503Mul(z *FpElementX2, x, y *FpElement) {
var u, v, t uint64
var carry uint64
var uv Uint128
for i := uint64(0); i < NumWords; i++ {
for j := uint64(0); j <= i; j++ {
uv = Mul64(x[j], y[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
z[i] = v
v = u
u = t
t = 0
}
for i := NumWords; i < (2*NumWords)-1; i++ {
for j := i - NumWords + 1; j < NumWords; j++ {
uv = Mul64(x[j], y[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
z[i] = v
v = u
u = t
t = 0
}
z[2*NumWords-1] = v
}
// Compute z = x + y, without reducing mod p.
func fp503AddLazy(z, x, y *FpElement) {
var carry uint64
for i := 0; i < NumWords; i++ {
z[i], carry = Addc64(carry, x[i], y[i])
}
}
// Compute z = x + y, without reducing mod p.
func fp503X2AddLazy(z, x, y *FpElementX2) {
var carry uint64
for i := 0; i < 2*NumWords; i++ {
z[i], carry = Addc64(carry, x[i], y[i])
}
}
// Reduce a field element in [0, 2*p) to one in [0,p).
func fp503StrongReduce(x *FpElement) {
var borrow, mask uint64
for i := 0; i < NumWords; i++ {
x[i], borrow = Subc64(borrow, x[i], p503[i])
}
// Sets all bits if borrow = 1
mask = 0 - borrow
borrow = 0
for i := 0; i < NumWords; i++ {
x[i], borrow = Addc64(borrow, x[i], p503[i]&mask)
}
}
// Compute z = x - y, without reducing mod p.
func fp503X2SubLazy(z, x, y *FpElementX2) {
var borrow, mask uint64
for i := 0; i < 2*NumWords; i++ {
z[i], borrow = Subc64(borrow, x[i], y[i])
}
// Sets all bits if borrow = 1
mask = 0 - borrow
borrow = 0
for i := NumWords; i < 2*NumWords; i++ {
z[i], borrow = Addc64(borrow, z[i], p503[i-NumWords]&mask)
}
}

View File

@ -1,178 +0,0 @@
package p503
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
cpu "v2ray.com/core/external/github.com/cloudflare/sidh/internal/utils"
)
const (
// SIDH public key byte size
P503_PublicKeySize = 378
// SIDH shared secret byte size.
P503_SharedSecretSize = 126
// Max size of secret key for 2-torsion group, corresponds to 2^e2 - 1
P503_SecretBitLenA = 250
// Size of secret key for 3-torsion group, corresponds to log_2(3^e3) - 1
P503_SecretBitLenB = 252
// Size of a compuatation strategy for 2-torsion group
strategySizeA = 124
// Size of a compuatation strategy for 3-torsion group
strategySizeB = 158
// ceil(503+7/8)
P503_Bytelen = 63
// Number of limbs for a field element
NumWords = 8
)
// CPU Capabilities. Those flags are referred by assembly code. According to
// https://github.com/golang/go/issues/28230, variables referred from the
// assembly must be in the same package.
// We declare them variables not constants in order to facilitate testing.
var (
// Signals support for MULX which is in BMI2
HasBMI2 = cpu.X86.HasBMI2
// Signals support for ADX and BMI2
HasADXandBMI2 = cpu.X86.HasBMI2 && cpu.X86.HasADX
)
// The x-coordinate of PA
var P503_affine_PA = Fp2Element{
A: FpElement{
0xE7EF4AA786D855AF, 0xED5758F03EB34D3B, 0x09AE172535A86AA9, 0x237B9CC07D622723,
0xE3A284CBA4E7932D, 0x27481D9176C5E63F, 0x6A323FF55C6E71BF, 0x002ECC31A6FB8773,
},
B: FpElement{
0x64D02E4E90A620B8, 0xDAB8128537D4B9F1, 0x4BADF77B8A228F98, 0x0F5DBDF9D1FB7D1B,
0xBEC4DB288E1A0DCC, 0xE76A8665E80675DB, 0x6D6F252E12929463, 0x003188BD1463FACC,
},
}
// The x-coordinate of QA
var P503_affine_QA = Fp2Element{
A: FpElement{
0xB79D41025DE85D56, 0x0B867DA9DF169686, 0x740E5368021C827D, 0x20615D72157BF25C,
0xFF1590013C9B9F5B, 0xC884DCADE8C16CEA, 0xEBD05E53BF724E01, 0x0032FEF8FDA5748C,
},
B: FpElement{
0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
},
}
// The x-coordinate of RA = PA-QA
var P503_affine_RA = Fp2Element{
A: FpElement{
0x12E2E849AA0A8006, 0x41CF47008635A1E8, 0x9CD720A70798AED7, 0x42A820B42FCF04CF,
0x7BF9BAD32AAE88B1, 0xF619127A54090BBE, 0x1CB10D8F56408EAA, 0x001D6B54C3C0EDEB,
},
B: FpElement{
0x34DB54931CBAAC36, 0x420A18CB8DD5F0C4, 0x32008C1A48C0F44D, 0x3B3BA772B1CFD44D,
0xA74B058FDAF13515, 0x095FC9CA7EEC17B4, 0x448E829D28F120F8, 0x00261EC3ED16A489,
},
}
// The x-coordinate of PB
var P503_affine_PB = Fp2Element{
A: FpElement{
0x7EDE37F4FA0BC727, 0xF7F8EC5C8598941C, 0xD15519B516B5F5C8, 0xF6D5AC9B87A36282,
0x7B19F105B30E952E, 0x13BD8B2025B4EBEE, 0x7B96D27F4EC579A2, 0x00140850CAB7E5DE,
},
B: FpElement{
0x7764909DAE7B7B2D, 0x578ABB16284911AB, 0x76E2BFD146A6BF4D, 0x4824044B23AA02F0,
0x1105048912A321F3, 0xB8A2E482CF0F10C1, 0x42FF7D0BE2152085, 0x0018E599C5223352,
},
}
// The x-coordinate of QB
var P503_affine_QB = Fp2Element{
A: FpElement{
0x4256C520FB388820, 0x744FD7C3BAAF0A13, 0x4B6A2DDDB12CBCB8, 0xE46826E27F427DF8,
0xFE4A663CD505A61B, 0xD6B3A1BAF025C695, 0x7C3BB62B8FCC00BD, 0x003AFDDE4A35746C,
},
B: FpElement{
0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
},
}
// The x-coordinate of RB = PB - QB
var P503_affine_RB = Fp2Element{
A: FpElement{
0x75601CD1E6C0DFCB, 0x1A9007239B58F93E, 0xC1F1BE80C62107AC, 0x7F513B898F29FF08,
0xEA0BEDFF43E1F7B2, 0x2C6D94018CBAE6D0, 0x3A430D31BCD84672, 0x000D26892ECCFE83,
},
B: FpElement{
0x1119D62AEA3007A1, 0xE3702AA4E04BAE1B, 0x9AB96F7D59F990E7, 0xF58440E8B43319C0,
0xAF8134BEE1489775, 0xE7F7774E905192AA, 0xF54AE09308E98039, 0x001EF7A041A86112,
},
}
// 2-torsion group computation strategy
var P503_AliceIsogenyStrategy = [strategySizeA]uint32{
0x3D, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02,
0x01, 0x01, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01,
0x01, 0x10, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01,
0x01, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x1D, 0x10, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01,
0x01, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x0D, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x05, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01}
// 3-torsion group computation strategy
var P503_BobIsogenyStrategy = [strategySizeB]uint32{
0x47, 0x26, 0x15, 0x0D, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01,
0x02, 0x01, 0x01, 0x05, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x09,
0x05, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x01,
0x02, 0x01, 0x01, 0x11, 0x09, 0x05, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01,
0x04, 0x02, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x08, 0x04, 0x02, 0x01, 0x01, 0x01, 0x02, 0x01,
0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x21, 0x11, 0x09, 0x05, 0x03, 0x02, 0x01, 0x01,
0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x08, 0x04,
0x02, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x10, 0x08,
0x04, 0x02, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x08,
0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01}
// Used internally by this package
// -------------------------------
var p503 = FpElement{
0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xABFFFFFFFFFFFFFF,
0x13085BDA2211E7A0, 0x1B9BF6C87B7E7DAF, 0x6045C6BDDA77A4D0, 0x004066F541811E1E,
}
// 2*503
var p503x2 = FpElement{
0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x57FFFFFFFFFFFFFF,
0x2610B7B44423CF41, 0x3737ED90F6FCFB5E, 0xC08B8D7BB4EF49A0, 0x0080CDEA83023C3C,
}
// p503 + 1
var p503p1 = FpElement{
0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0xAC00000000000000,
0x13085BDA2211E7A0, 0x1B9BF6C87B7E7DAF, 0x6045C6BDDA77A4D0, 0x004066F541811E1E,
}
// R^2=(2^512)^2 mod p
var p503R2 = FpElement{
0x5289A0CF641D011F, 0x9B88257189FED2B9, 0xA3B365D58DC8F17A, 0x5BC57AB6EFF168EC,
0x9E51998BD84D4423, 0xBF8999CBAC3B5695, 0x46E9127BCE14CDB6, 0x003F6CFCE8B81771,
}
// p503 + 1 left-shifted by 8, assuming little endianness
var p503p1s8 = FpElement{
0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x085BDA2211E7A0AC, 0x9BF6C87B7E7DAF13, 0x45C6BDDA77A4D01B, 0x4066F541811E1E60,
}
// 1*R mod p
var P503_OneFp2 = Fp2Element{
A: FpElement{
0x00000000000003F9, 0x0000000000000000, 0x0000000000000000, 0xB400000000000000,
0x63CB1A6EA6DED2B4, 0x51689D8D667EB37D, 0x8ACD77C71AB24142, 0x0026FBAEC60F5953},
}
// 1/2 * R mod p
var P503_HalfFp2 = Fp2Element{
A: FpElement{
0x00000000000001FC, 0x0000000000000000, 0x0000000000000000, 0xB000000000000000,
0x3B69BB2464785D2A, 0x36824A2AF0FE9896, 0xF5899F427A94F309, 0x0033B15203C83BB8},
}

View File

@ -1,249 +0,0 @@
package p503
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
)
type fp503Ops struct{}
func FieldOperations() FieldOps {
return &fp503Ops{}
}
func (fp503Ops) Add(dest, lhs, rhs *Fp2Element) {
fp503AddReduced(&dest.A, &lhs.A, &rhs.A)
fp503AddReduced(&dest.B, &lhs.B, &rhs.B)
}
func (fp503Ops) Sub(dest, lhs, rhs *Fp2Element) {
fp503SubReduced(&dest.A, &lhs.A, &rhs.A)
fp503SubReduced(&dest.B, &lhs.B, &rhs.B)
}
func (fp503Ops) Mul(dest, lhs, rhs *Fp2Element) {
// Let (a,b,c,d) = (lhs.a,lhs.b,rhs.a,rhs.b).
a := &lhs.A
b := &lhs.B
c := &rhs.A
d := &rhs.B
// We want to compute
//
// (a + bi)*(c + di) = (a*c - b*d) + (a*d + b*c)i
//
// Use Karatsuba's trick: note that
//
// (b - a)*(c - d) = (b*c + a*d) - a*c - b*d
//
// so (a*d + b*c) = (b-a)*(c-d) + a*c + b*d.
var ac, bd FpElementX2
fp503Mul(&ac, a, c) // = a*c*R*R
fp503Mul(&bd, b, d) // = b*d*R*R
var b_minus_a, c_minus_d FpElement
fp503SubReduced(&b_minus_a, b, a) // = (b-a)*R
fp503SubReduced(&c_minus_d, c, d) // = (c-d)*R
var ad_plus_bc FpElementX2
fp503Mul(&ad_plus_bc, &b_minus_a, &c_minus_d) // = (b-a)*(c-d)*R*R
fp503X2AddLazy(&ad_plus_bc, &ad_plus_bc, &ac) // = ((b-a)*(c-d) + a*c)*R*R
fp503X2AddLazy(&ad_plus_bc, &ad_plus_bc, &bd) // = ((b-a)*(c-d) + a*c + b*d)*R*R
fp503MontgomeryReduce(&dest.B, &ad_plus_bc) // = (a*d + b*c)*R mod p
var ac_minus_bd FpElementX2
fp503X2SubLazy(&ac_minus_bd, &ac, &bd) // = (a*c - b*d)*R*R
fp503MontgomeryReduce(&dest.A, &ac_minus_bd) // = (a*c - b*d)*R mod p
}
// Set dest = 1/x
//
// Allowed to overlap dest with x.
//
// Returns dest to allow chaining operations.
func (fp503Ops) Inv(dest, x *Fp2Element) {
a := &x.A
b := &x.B
// We want to compute
//
// 1 1 (a - bi) (a - bi)
// -------- = -------- -------- = -----------
// (a + bi) (a + bi) (a - bi) (a^2 + b^2)
//
// Letting c = 1/(a^2 + b^2), this is
//
// 1/(a+bi) = a*c - b*ci.
var asq_plus_bsq primeFieldElement
var asq, bsq FpElementX2
fp503Mul(&asq, a, a) // = a*a*R*R
fp503Mul(&bsq, b, b) // = b*b*R*R
fp503X2AddLazy(&asq, &asq, &bsq) // = (a^2 + b^2)*R*R
fp503MontgomeryReduce(&asq_plus_bsq.A, &asq) // = (a^2 + b^2)*R mod p
// Now asq_plus_bsq = a^2 + b^2
inv := asq_plus_bsq
inv.Mul(&asq_plus_bsq, &asq_plus_bsq)
inv.P34(&inv)
inv.Mul(&inv, &inv)
inv.Mul(&inv, &asq_plus_bsq)
var ac FpElementX2
fp503Mul(&ac, a, &inv.A)
fp503MontgomeryReduce(&dest.A, &ac)
var minus_b FpElement
fp503SubReduced(&minus_b, &minus_b, b)
var minus_bc FpElementX2
fp503Mul(&minus_bc, &minus_b, &inv.A)
fp503MontgomeryReduce(&dest.B, &minus_bc)
}
func (fp503Ops) Square(dest, x *Fp2Element) {
a := &x.A
b := &x.B
// We want to compute
//
// (a + bi)*(a + bi) = (a^2 - b^2) + 2abi.
var a2, a_plus_b, a_minus_b FpElement
fp503AddReduced(&a2, a, a) // = a*R + a*R = 2*a*R
fp503AddReduced(&a_plus_b, a, b) // = a*R + b*R = (a+b)*R
fp503SubReduced(&a_minus_b, a, b) // = a*R - b*R = (a-b)*R
var asq_minus_bsq, ab2 FpElementX2
fp503Mul(&asq_minus_bsq, &a_plus_b, &a_minus_b) // = (a+b)*(a-b)*R*R = (a^2 - b^2)*R*R
fp503Mul(&ab2, &a2, b) // = 2*a*b*R*R
fp503MontgomeryReduce(&dest.A, &asq_minus_bsq) // = (a^2 - b^2)*R mod p
fp503MontgomeryReduce(&dest.B, &ab2) // = 2*a*b*R mod p
}
// In case choice == 1, performs following swap in constant time:
// xPx <-> xQx
// xPz <-> xQz
// Otherwise returns xPx, xPz, xQx, xQz unchanged
func (fp503Ops) CondSwap(xPx, xPz, xQx, xQz *Fp2Element, choice uint8) {
fp503ConditionalSwap(&xPx.A, &xQx.A, choice)
fp503ConditionalSwap(&xPx.B, &xQx.B, choice)
fp503ConditionalSwap(&xPz.A, &xQz.A, choice)
fp503ConditionalSwap(&xPz.B, &xQz.B, choice)
}
// Converts values in x.A and x.B to Montgomery domain
// x.A = x.A * R mod p
// x.B = x.B * R mod p
// Performs v = v*R^2*R^(-1) mod p, for both x.A and x.B
func (fp503Ops) ToMontgomery(x *Fp2Element) {
var aRR FpElementX2
// convert to montgomery domain
fp503Mul(&aRR, &x.A, &p503R2) // = a*R*R
fp503MontgomeryReduce(&x.A, &aRR) // = a*R mod p
fp503Mul(&aRR, &x.B, &p503R2)
fp503MontgomeryReduce(&x.B, &aRR)
}
// Converts values in x.A and x.B from Montgomery domain
// a = x.A mod p
// b = x.B mod p
//
// After returning from the call x is not modified.
func (fp503Ops) FromMontgomery(x *Fp2Element, out *Fp2Element) {
var aR FpElementX2
// convert from montgomery domain
// TODO: make fpXXXMontgomeryReduce use stack instead of reusing aR
// so that we don't have do this copy here
copy(aR[:], x.A[:])
fp503MontgomeryReduce(&out.A, &aR) // = a mod p in [0, 2p)
fp503StrongReduce(&out.A) // = a mod p in [0, p)
for i := range aR {
aR[i] = 0
}
copy(aR[:], x.B[:])
fp503MontgomeryReduce(&out.B, &aR)
fp503StrongReduce(&out.B)
}
//------------------------------------------------------------------------------
// Prime Field
//------------------------------------------------------------------------------
// Represents an element of the prime field F_p.
type primeFieldElement struct {
// This field element is in Montgomery form, so that the value `A` is
// represented by `aR mod p`.
A FpElement
}
// Set dest = lhs * rhs.
//
// Allowed to overlap lhs or rhs with dest.
//
// Returns dest to allow chaining operations.
func (dest *primeFieldElement) Mul(lhs, rhs *primeFieldElement) *primeFieldElement {
a := &lhs.A // = a*R
b := &rhs.A // = b*R
var ab FpElementX2
fp503Mul(&ab, a, b) // = a*b*R*R
fp503MontgomeryReduce(&dest.A, &ab) // = a*b*R mod p
return dest
}
// Set dest = x^(2^k), for k >= 1, by repeated squarings.
//
// Allowed to overlap x with dest.
//
// Returns dest to allow chaining operations.
func (dest *primeFieldElement) Pow2k(x *primeFieldElement, k uint8) *primeFieldElement {
dest.Mul(x, x)
for i := uint8(1); i < k; i++ {
dest.Mul(dest, dest)
}
return dest
}
// Set dest = x^((p-3)/4). If x is square, this is 1/sqrt(x).
// Uses variation of sliding-window algorithm from with window size
// of 5 and least to most significant bit sliding (left-to-right)
// See HAC 14.85 for general description.
//
// Allowed to overlap x with dest.
//
// Returns dest to allow chaining operations.
func (dest *primeFieldElement) P34(x *primeFieldElement) *primeFieldElement {
// Sliding-window strategy computed with etc/scripts/sliding_window_start_calc.py
//
// This performs sum(powStrategy) + 1 squarings and len(lookup) + len(mulStrategy)
// multiplications.
powStrategy := []uint8{1, 12, 5, 5, 2, 7, 11, 3, 8, 4, 11, 4, 7, 5, 6, 3, 7, 5, 7, 2, 12, 5, 6, 4, 6, 8, 6, 4, 7, 5, 5, 8, 5, 8, 5, 5, 8, 9, 3, 6, 2, 10, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3}
mulStrategy := []uint8{0, 12, 11, 10, 0, 1, 8, 3, 7, 1, 8, 3, 6, 7, 14, 2, 14, 14, 9, 0, 13, 9, 15, 5, 12, 7, 13, 7, 15, 6, 7, 9, 0, 5, 7, 6, 8, 8, 3, 7, 0, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 3}
// Precompute lookup table of odd multiples of x for window
// size k=5.
lookup := [16]primeFieldElement{}
xx := &primeFieldElement{}
xx.Mul(x, x)
lookup[0] = *x
for i := 1; i < 16; i++ {
lookup[i].Mul(&lookup[i-1], xx)
}
// Now lookup = {x, x^3, x^5, ... }
// so that lookup[i] = x^{2*i + 1}
// so that lookup[k/2] = x^k, for odd k
*dest = lookup[mulStrategy[0]]
for i := uint8(1); i < uint8(len(powStrategy)); i++ {
dest.Pow2k(dest, powStrategy[i])
dest.Mul(dest, &lookup[mulStrategy[i]])
}
return dest
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,46 +0,0 @@
// +build amd64,!noasm arm64,!noasm
package p751
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
)
// If choice = 0, leave x,y unchanged. If choice = 1, set x,y = y,x.
// If choice is neither 0 nor 1 then behaviour is undefined.
// This function executes in constant time.
//go:noescape
func fp751ConditionalSwap(x, y *FpElement, choice uint8)
// Compute z = x + y (mod p).
//go:noescape
func fp751AddReduced(z, x, y *FpElement)
// Compute z = x - y (mod p).
//go:noescape
func fp751SubReduced(z, x, y *FpElement)
// Compute z = x + y, without reducing mod p.
//go:noescape
func fp751AddLazy(z, x, y *FpElement)
// Compute z = x + y, without reducing mod p.
//go:noescape
func fp751X2AddLazy(z, x, y *FpElementX2)
// Compute z = x - y, without reducing mod p.
//go:noescape
func fp751X2SubLazy(z, x, y *FpElementX2)
// Compute z = x * y.
//go:noescape
func fp751Mul(z *FpElementX2, x, y *FpElement)
// Compute Montgomery reduction: set z = x * R^{-1} (mod 2*p).
// It may destroy the input value.
//go:noescape
func fp751MontgomeryReduce(z *FpElement, x *FpElementX2)
// Reduce a field element in [0, 2*p) to one in [0,p).
//go:noescape
func fp751StrongReduce(x *FpElement)

View File

@ -1,196 +0,0 @@
// +build noasm !amd64,!arm64
package p751
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/arith"
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
)
// Compute z = x + y (mod p).
func fp751AddReduced(z, x, y *FpElement) {
var carry uint64
// z=x+y % p751
for i := 0; i < NumWords; i++ {
z[i], carry = Addc64(carry, x[i], y[i])
}
// z = z - p751x2
carry = 0
for i := 0; i < NumWords; i++ {
z[i], carry = Subc64(carry, z[i], p751x2[i])
}
// z = z + p751x2
mask := uint64(0 - carry)
carry = 0
for i := 0; i < NumWords; i++ {
z[i], carry = Addc64(carry, z[i], p751x2[i]&mask)
}
}
// Compute z = x - y (mod p).
func fp751SubReduced(z, x, y *FpElement) {
var borrow uint64
for i := 0; i < NumWords; i++ {
z[i], borrow = Subc64(borrow, x[i], y[i])
}
mask := uint64(0 - borrow)
borrow = 0
for i := 0; i < NumWords; i++ {
z[i], borrow = Addc64(borrow, z[i], p751x2[i]&mask)
}
}
// Conditionally swaps bits in x and y in constant time.
// mask indicates bits to be swaped (set bits are swapped)
// For details see "Hackers Delight, 2.20"
//
// Implementation doesn't actually depend on a prime field.
func fp751ConditionalSwap(x, y *FpElement, mask uint8) {
var tmp, mask64 uint64
mask64 = 0 - uint64(mask)
for i := 0; i < len(x); i++ {
tmp = mask64 & (x[i] ^ y[i])
x[i] = tmp ^ x[i]
y[i] = tmp ^ y[i]
}
}
// Perform Montgomery reduction: set z = x R^{-1} (mod 2*p)
// with R=2^768. Destroys the input value.
func fp751MontgomeryReduce(z *FpElement, x *FpElementX2) {
var carry, t, u, v uint64
var uv Uint128
var count int
count = 5 // number of 0 digits in the least significant part of p751 + 1
for i := 0; i < NumWords; i++ {
for j := 0; j < i; j++ {
if j < (i - count + 1) {
uv = Mul64(z[j], p751p1[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
}
v, carry = Addc64(0, v, x[i])
u, carry = Addc64(carry, u, 0)
t += carry
z[i] = v
v = u
u = t
t = 0
}
for i := NumWords; i < 2*NumWords-1; i++ {
if count > 0 {
count--
}
for j := i - NumWords + 1; j < NumWords; j++ {
if j < (NumWords - count) {
uv = Mul64(z[j], p751p1[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
}
v, carry = Addc64(0, v, x[i])
u, carry = Addc64(carry, u, 0)
t += carry
z[i-NumWords] = v
v = u
u = t
t = 0
}
v, carry = Addc64(0, v, x[2*NumWords-1])
z[NumWords-1] = v
}
// Compute z = x * y.
func fp751Mul(z *FpElementX2, x, y *FpElement) {
var u, v, t uint64
var carry uint64
var uv Uint128
for i := uint64(0); i < NumWords; i++ {
for j := uint64(0); j <= i; j++ {
uv = Mul64(x[j], y[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
z[i] = v
v = u
u = t
t = 0
}
for i := NumWords; i < (2*NumWords)-1; i++ {
for j := i - NumWords + 1; j < NumWords; j++ {
uv = Mul64(x[j], y[i-j])
v, carry = Addc64(0, uv.L, v)
u, carry = Addc64(carry, uv.H, u)
t += carry
}
z[i] = v
v = u
u = t
t = 0
}
z[2*NumWords-1] = v
}
// Compute z = x + y, without reducing mod p.
func fp751AddLazy(z, x, y *FpElement) {
var carry uint64
for i := 0; i < NumWords; i++ {
z[i], carry = Addc64(carry, x[i], y[i])
}
}
// Compute z = x + y, without reducing mod p.
func fp751X2AddLazy(z, x, y *FpElementX2) {
var carry uint64
for i := 0; i < 2*NumWords; i++ {
z[i], carry = Addc64(carry, x[i], y[i])
}
}
// Reduce a field element in [0, 2*p) to one in [0,p).
func fp751StrongReduce(x *FpElement) {
var borrow, mask uint64
for i := 0; i < NumWords; i++ {
x[i], borrow = Subc64(borrow, x[i], p751[i])
}
// Sets all bits if borrow = 1
mask = 0 - borrow
borrow = 0
for i := 0; i < NumWords; i++ {
x[i], borrow = Addc64(borrow, x[i], p751[i]&mask)
}
}
// Compute z = x - y, without reducing mod p.
func fp751X2SubLazy(z, x, y *FpElementX2) {
var borrow, mask uint64
for i := 0; i < len(z); i++ {
z[i], borrow = Subc64(borrow, x[i], y[i])
}
// Sets all bits if borrow = 1
mask = 0 - borrow
borrow = 0
for i := NumWords; i < len(z); i++ {
z[i], borrow = Addc64(borrow, z[i], p751[i-NumWords]&mask)
}
}

View File

@ -1,227 +0,0 @@
package p751
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
cpu "v2ray.com/core/external/github.com/cloudflare/sidh/internal/utils"
)
const (
// SIDH public key byte size
P751_PublicKeySize = 564
// SIDH shared secret byte size.
P751_SharedSecretSize = 188
// Max size of secret key for 2-torsion group, corresponds to 2^e2
P751_SecretBitLenA = 372
// Size of secret key for 3-torsion group, corresponds to floor(log_2(3^e3))
P751_SecretBitLenB = 378
// P751 bytelen ceil(751/8)
P751_Bytelen = 94
// Size of a compuatation strategy for 2-torsion group
strategySizeA = 185
// Size of a compuatation strategy for 3-torsion group
strategySizeB = 238
// Number of 64-bit limbs used to store Fp element
NumWords = 12
)
// CPU Capabilities. Those flags are referred by assembly code. According to
// https://github.com/golang/go/issues/28230, variables referred from the
// assembly must be in the same package.
// We declare them variables not constants in order to facilitate testing.
var (
// Signals support for MULX which is in BMI2
HasBMI2 = cpu.X86.HasBMI2
// Signals support for ADX and BMI2
HasADXandBMI2 = cpu.X86.HasBMI2 && cpu.X86.HasADX
)
// The x-coordinate of PA
var P751_affine_PA = Fp2Element{
A: FpElement{
0xC2FC08CEAB50AD8B, 0x1D7D710F55E457B1, 0xE8738D92953DCD6E,
0xBAA7EBEE8A3418AA, 0xC9A288345F03F46F, 0xC8D18D167CFE2616,
0x02043761F6B1C045, 0xAA1975E13180E7E9, 0x9E13D3FDC6690DE6,
0x3A024640A3A3BB4F, 0x4E5AD44E6ACBBDAE, 0x0000544BEB561DAD,
},
B: FpElement{
0xE6CC41D21582E411, 0x07C2ECB7C5DF400A, 0xE8E34B521432AEC4,
0x50761E2AB085167D, 0x032CFBCAA6094B3C, 0x6C522F5FDF9DDD71,
0x1319217DC3A1887D, 0xDC4FB25803353A86, 0x362C8D7B63A6AB09,
0x39DCDFBCE47EA488, 0x4C27C99A2C28D409, 0x00003CB0075527C4,
},
}
// The x-coordinate of QA
var P751_affine_QA = Fp2Element{
A: FpElement{
0xD56FE52627914862, 0x1FAD60DC96B5BAEA, 0x01E137D0BF07AB91,
0x404D3E9252161964, 0x3C5385E4CD09A337, 0x4476426769E4AF73,
0x9790C6DB989DFE33, 0xE06E1C04D2AA8B5E, 0x38C08185EDEA73B9,
0xAA41F678A4396CA6, 0x92B9259B2229E9A0, 0x00002F9326818BE0,
},
B: FpElement{
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
},
}
// The x-coordinate of RA = PA-QA
var P751_affine_RA = Fp2Element{
A: FpElement{
0x0BB84441DFFD19B3, 0x84B4DEA99B48C18E, 0x692DE648AD313805,
0xE6D72761B6DFAEE0, 0x223975C672C3058D, 0xA0FDE0C3CBA26FDC,
0xA5326132A922A3CA, 0xCA5E7F5D5EA96FA4, 0x127C7EFE33FFA8C6,
0x4749B1567E2A23C4, 0x2B7DF5B4AF413BFA, 0x0000656595B9623C,
},
B: FpElement{
0xED78C17F1EC71BE8, 0xF824D6DF753859B1, 0x33A10839B2A8529F,
0xFC03E9E25FDEA796, 0xC4708A8054DF1762, 0x4034F2EC034C6467,
0xABFB70FBF06ECC79, 0xDABE96636EC108B7, 0x49CBCFB090605FD3,
0x20B89711819A45A7, 0xFB8E1590B2B0F63E, 0x0000556A5F964AB2,
},
}
// The x-coordinate of PB
var P751_affine_PB = Fp2Element{
A: FpElement{
0xCFB6D71EF867AB0B, 0x4A5FDD76E9A45C76, 0x38B1EE69194B1F03,
0xF6E7B18A7761F3F0, 0xFCF01A486A52C84C, 0xCBE2F63F5AA75466,
0x6487BCE837B5E4D6, 0x7747F5A8C622E9B8, 0x4CBFE1E4EE6AEBBA,
0x8A8616A13FA91512, 0x53DB980E1579E0A5, 0x000058FEBFF3BE69,
},
B: FpElement{
0xA492034E7C075CC3, 0x677BAF00B04AA430, 0x3AAE0C9A755C94C8,
0x1DC4B064E9EBB08B, 0x3684EDD04E826C66, 0x9BAA6CB661F01B22,
0x20285A00AD2EFE35, 0xDCE95ABD0497065F, 0x16C7FBB3778E3794,
0x26B3AC29CEF25AAF, 0xFB3C28A31A30AC1D, 0x000046ED190624EE,
},
}
// The x-coordinate of QB
var P751_affine_QB = Fp2Element{
A: FpElement{
0xF1A8C9ED7B96C4AB, 0x299429DA5178486E, 0xEF4926F20CD5C2F4,
0x683B2E2858B4716A, 0xDDA2FBCC3CAC3EEB, 0xEC055F9F3A600460,
0xD5A5A17A58C3848B, 0x4652D836F42EAED5, 0x2F2E71ED78B3A3B3,
0xA771C057180ADD1D, 0xC780A5D2D835F512, 0x0000114EA3B55AC1,
},
B: FpElement{
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
},
}
// The x-coordinate of RB = PB - QB
var P751_affine_RB = Fp2Element{
A: FpElement{
0x1C0D6733769D0F31, 0xF084C3086E2659D1, 0xE23D5DA27BCBD133,
0xF38EC9A8D5864025, 0x6426DC781B3B645B, 0x4B24E8E3C9FB03EE,
0x6432792F9D2CEA30, 0x7CC8E8B1AE76E857, 0x7F32BFB626BB8963,
0xB9F05995B48D7B74, 0x4D71200A7D67E042, 0x0000228457AF0637,
},
B: FpElement{
0x4AE37E7D8F72BD95, 0xDD2D504B3E993488, 0x5D14E7FA1ECB3C3E,
0x127610CEB75D6350, 0x255B4B4CAC446B11, 0x9EA12336C1F70CAF,
0x79FA68A2147BC2F8, 0x11E895CFDADBBC49, 0xE4B9D3C4D6356C18,
0x44B25856A67F951C, 0x5851541F61308D0B, 0x00002FFD994F7E4C,
},
}
// 2-torsion group computation strategy
var P751_AliceIsogenyStrategy = [strategySizeA]uint32{
0x50, 0x30, 0x1B, 0x0F, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02,
0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x07,
0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x03, 0x02, 0x01,
0x01, 0x01, 0x01, 0x0C, 0x07, 0x04, 0x02, 0x01, 0x01, 0x02,
0x01, 0x01, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x05, 0x03,
0x02, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x15,
0x0C, 0x07, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x03,
0x02, 0x01, 0x01, 0x01, 0x01, 0x05, 0x03, 0x02, 0x01, 0x01,
0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x09, 0x05, 0x03, 0x02,
0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x04, 0x02,
0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x21, 0x14, 0x0C, 0x07,
0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x03, 0x02, 0x01,
0x01, 0x01, 0x01, 0x05, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01,
0x02, 0x01, 0x01, 0x01, 0x08, 0x05, 0x03, 0x02, 0x01, 0x01,
0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01,
0x02, 0x01, 0x01, 0x10, 0x08, 0x04, 0x02, 0x01, 0x01, 0x01,
0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02,
0x01, 0x01, 0x02, 0x01, 0x01}
// 3-torsion group computation strategy
var P751_BobIsogenyStrategy = [strategySizeB]uint32{
0x70, 0x3F, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02,
0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x08,
0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01,
0x01, 0x02, 0x01, 0x01, 0x10, 0x08, 0x04, 0x02, 0x01, 0x01,
0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02,
0x01, 0x01, 0x02, 0x01, 0x01, 0x1F, 0x10, 0x08, 0x04, 0x02,
0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02,
0x01, 0x01, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x0F, 0x08, 0x04,
0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01,
0x02, 0x01, 0x01, 0x07, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01,
0x01, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x31, 0x1F, 0x10,
0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04, 0x02,
0x01, 0x01, 0x02, 0x01, 0x01, 0x08, 0x04, 0x02, 0x01, 0x01,
0x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x0F, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x04,
0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x07, 0x04, 0x02, 0x01,
0x01, 0x02, 0x01, 0x01, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01,
0x15, 0x0C, 0x08, 0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01,
0x04, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01, 0x05, 0x03, 0x02,
0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x09, 0x05,
0x03, 0x02, 0x01, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x01,
0x04, 0x02, 0x01, 0x01, 0x01, 0x02, 0x01, 0x01}
// Used internally by this package. Not consts as Go doesn't allow arrays to be consts
// -------------------------------
// p751
var p751 = FpElement{
0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff,
0xffffffffffffffff, 0xffffffffffffffff, 0xeeafffffffffffff,
0xe3ec968549f878a8, 0xda959b1a13f7cc76, 0x084e9867d6ebe876,
0x8562b5045cb25748, 0x0e12909f97badc66, 0x00006fe5d541f71c}
// 2*p751
var p751x2 = FpElement{
0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF,
0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xDD5FFFFFFFFFFFFF,
0xC7D92D0A93F0F151, 0xB52B363427EF98ED, 0x109D30CFADD7D0ED,
0x0AC56A08B964AE90, 0x1C25213F2F75B8CD, 0x0000DFCBAA83EE38}
// p751 + 1
var p751p1 = FpElement{
0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0xeeb0000000000000,
0xe3ec968549f878a8, 0xda959b1a13f7cc76, 0x084e9867d6ebe876,
0x8562b5045cb25748, 0x0e12909f97badc66, 0x00006fe5d541f71c}
// R^2 = (2^768)^2 mod p
var p751R2 = FpElement{
2535603850726686808, 15780896088201250090, 6788776303855402382,
17585428585582356230, 5274503137951975249, 2266259624764636289,
11695651972693921304, 13072885652150159301, 4908312795585420432,
6229583484603254826, 488927695601805643, 72213483953973}
// 1*R mod p
var P751_OneFp2 = Fp2Element{
A: FpElement{
0x249ad, 0x0, 0x0, 0x0, 0x0, 0x8310000000000000, 0x5527b1e4375c6c66, 0x697797bf3f4f24d0, 0xc89db7b2ac5c4e2e, 0x4ca4b439d2076956, 0x10f7926c7512c7e9, 0x2d5b24bce5e2},
}
// 1/2 * R mod p
var P751_HalfFp2 = Fp2Element{
A: FpElement{
0x00000000000124D6, 0x0000000000000000, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000, 0xB8E0000000000000,
0x9C8A2434C0AA7287, 0xA206996CA9A378A3, 0x6876280D41A41B52,
0xE903B49F175CE04F, 0x0F8511860666D227, 0x00004EA07CFF6E7F},
}

View File

@ -1,254 +0,0 @@
package p751
import . "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
// 2*p751
var ()
//------------------------------------------------------------------------------
// Implementtaion of FieldOperations
//------------------------------------------------------------------------------
// Implements FieldOps
type fp751Ops struct{}
func FieldOperations() FieldOps {
return &fp751Ops{}
}
func (fp751Ops) Add(dest, lhs, rhs *Fp2Element) {
fp751AddReduced(&dest.A, &lhs.A, &rhs.A)
fp751AddReduced(&dest.B, &lhs.B, &rhs.B)
}
func (fp751Ops) Sub(dest, lhs, rhs *Fp2Element) {
fp751SubReduced(&dest.A, &lhs.A, &rhs.A)
fp751SubReduced(&dest.B, &lhs.B, &rhs.B)
}
func (fp751Ops) Mul(dest, lhs, rhs *Fp2Element) {
// Let (a,b,c,d) = (lhs.a,lhs.b,rhs.a,rhs.b).
a := &lhs.A
b := &lhs.B
c := &rhs.A
d := &rhs.B
// We want to compute
//
// (a + bi)*(c + di) = (a*c - b*d) + (a*d + b*c)i
//
// Use Karatsuba's trick: note that
//
// (b - a)*(c - d) = (b*c + a*d) - a*c - b*d
//
// so (a*d + b*c) = (b-a)*(c-d) + a*c + b*d.
var ac, bd FpElementX2
fp751Mul(&ac, a, c) // = a*c*R*R
fp751Mul(&bd, b, d) // = b*d*R*R
var b_minus_a, c_minus_d FpElement
fp751SubReduced(&b_minus_a, b, a) // = (b-a)*R
fp751SubReduced(&c_minus_d, c, d) // = (c-d)*R
var ad_plus_bc FpElementX2
fp751Mul(&ad_plus_bc, &b_minus_a, &c_minus_d) // = (b-a)*(c-d)*R*R
fp751X2AddLazy(&ad_plus_bc, &ad_plus_bc, &ac) // = ((b-a)*(c-d) + a*c)*R*R
fp751X2AddLazy(&ad_plus_bc, &ad_plus_bc, &bd) // = ((b-a)*(c-d) + a*c + b*d)*R*R
fp751MontgomeryReduce(&dest.B, &ad_plus_bc) // = (a*d + b*c)*R mod p
var ac_minus_bd FpElementX2
fp751X2SubLazy(&ac_minus_bd, &ac, &bd) // = (a*c - b*d)*R*R
fp751MontgomeryReduce(&dest.A, &ac_minus_bd) // = (a*c - b*d)*R mod p
}
func (fp751Ops) Square(dest, x *Fp2Element) {
a := &x.A
b := &x.B
// We want to compute
//
// (a + bi)*(a + bi) = (a^2 - b^2) + 2abi.
var a2, a_plus_b, a_minus_b FpElement
fp751AddReduced(&a2, a, a) // = a*R + a*R = 2*a*R
fp751AddReduced(&a_plus_b, a, b) // = a*R + b*R = (a+b)*R
fp751SubReduced(&a_minus_b, a, b) // = a*R - b*R = (a-b)*R
var asq_minus_bsq, ab2 FpElementX2
fp751Mul(&asq_minus_bsq, &a_plus_b, &a_minus_b) // = (a+b)*(a-b)*R*R = (a^2 - b^2)*R*R
fp751Mul(&ab2, &a2, b) // = 2*a*b*R*R
fp751MontgomeryReduce(&dest.A, &asq_minus_bsq) // = (a^2 - b^2)*R mod p
fp751MontgomeryReduce(&dest.B, &ab2) // = 2*a*b*R mod p
}
// Set dest = 1/x
//
// Allowed to overlap dest with x.
//
// Returns dest to allow chaining operations.
func (fp751Ops) Inv(dest, x *Fp2Element) {
a := &x.A
b := &x.B
// We want to compute
//
// 1 1 (a - bi) (a - bi)
// -------- = -------- -------- = -----------
// (a + bi) (a + bi) (a - bi) (a^2 + b^2)
//
// Letting c = 1/(a^2 + b^2), this is
//
// 1/(a+bi) = a*c - b*ci.
var asq_plus_bsq primeFieldElement
var asq, bsq FpElementX2
fp751Mul(&asq, a, a) // = a*a*R*R
fp751Mul(&bsq, b, b) // = b*b*R*R
fp751X2AddLazy(&asq, &asq, &bsq) // = (a^2 + b^2)*R*R
fp751MontgomeryReduce(&asq_plus_bsq.A, &asq) // = (a^2 + b^2)*R mod p
// Now asq_plus_bsq = a^2 + b^2
// Invert asq_plus_bsq
inv := asq_plus_bsq
inv.Mul(&asq_plus_bsq, &asq_plus_bsq)
inv.P34(&inv)
inv.Mul(&inv, &inv)
inv.Mul(&inv, &asq_plus_bsq)
var ac FpElementX2
fp751Mul(&ac, a, &inv.A)
fp751MontgomeryReduce(&dest.A, &ac)
var minus_b FpElement
fp751SubReduced(&minus_b, &minus_b, b)
var minus_bc FpElementX2
fp751Mul(&minus_bc, &minus_b, &inv.A)
fp751MontgomeryReduce(&dest.B, &minus_bc)
}
// In case choice == 1, performs following swap in constant time:
// xPx <-> xQx
// xPz <-> xQz
// Otherwise returns xPx, xPz, xQx, xQz unchanged
func (fp751Ops) CondSwap(xPx, xPz, xQx, xQz *Fp2Element, choice uint8) {
fp751ConditionalSwap(&xPx.A, &xQx.A, choice)
fp751ConditionalSwap(&xPx.B, &xQx.B, choice)
fp751ConditionalSwap(&xPz.A, &xQz.A, choice)
fp751ConditionalSwap(&xPz.B, &xQz.B, choice)
}
// Converts values in x.A and x.B to Montgomery domain
// x.A = x.A * R mod p
// x.B = x.B * R mod p
func (fp751Ops) ToMontgomery(x *Fp2Element) {
var aRR FpElementX2
// convert to montgomery domain
fp751Mul(&aRR, &x.A, &p751R2) // = a*R*R
fp751MontgomeryReduce(&x.A, &aRR) // = a*R mod p
fp751Mul(&aRR, &x.B, &p751R2)
fp751MontgomeryReduce(&x.B, &aRR)
}
// Converts values in x.A and x.B from Montgomery domain
// a = x.A mod p
// b = x.B mod p
//
// After returning from the call x is not modified.
func (fp751Ops) FromMontgomery(x *Fp2Element, out *Fp2Element) {
var aR FpElementX2
// convert from montgomery domain
copy(aR[:], x.A[:])
fp751MontgomeryReduce(&out.A, &aR) // = a mod p in [0, 2p)
fp751StrongReduce(&out.A) // = a mod p in [0, p)
for i := range aR {
aR[i] = 0
}
copy(aR[:], x.B[:])
fp751MontgomeryReduce(&out.B, &aR)
fp751StrongReduce(&out.B)
}
//------------------------------------------------------------------------------
// Prime Field
//------------------------------------------------------------------------------
// Represents an element of the prime field F_p in Montgomery domain
type primeFieldElement struct {
// The value `A`is represented by `aR mod p`.
A FpElement
}
// Set dest = lhs * rhs.
//
// Allowed to overlap lhs or rhs with dest.
//
// Returns dest to allow chaining operations.
func (dest *primeFieldElement) Mul(lhs, rhs *primeFieldElement) *primeFieldElement {
a := &lhs.A // = a*R
b := &rhs.A // = b*R
var ab FpElementX2
fp751Mul(&ab, a, b) // = a*b*R*R
fp751MontgomeryReduce(&dest.A, &ab) // = a*b*R mod p
return dest
}
// Set dest = x^(2^k), for k >= 1, by repeated squarings.
//
// Allowed to overlap x with dest.
//
// Returns dest to allow chaining operations.
func (dest *primeFieldElement) Pow2k(x *primeFieldElement, k uint8) *primeFieldElement {
dest.Mul(x, x)
for i := uint8(1); i < k; i++ {
dest.Mul(dest, dest)
}
return dest
}
// Set dest = x^((p-3)/4). If x is square, this is 1/sqrt(x).
//
// Allowed to overlap x with dest.
//
// Returns dest to allow chaining operations.
func (dest *primeFieldElement) P34(x *primeFieldElement) *primeFieldElement {
// Sliding-window strategy computed with Sage, awk, sed, and tr.
//
// This performs sum(powStrategy) = 744 squarings and len(mulStrategy)
// = 137 multiplications, in addition to 1 squaring and 15
// multiplications to build a lookup table.
//
// In total this is 745 squarings, 152 multiplications. Since squaring
// is not implemented for the prime field, this is 897 multiplications
// in total.
powStrategy := [137]uint8{5, 7, 6, 2, 10, 4, 6, 9, 8, 5, 9, 4, 7, 5, 5, 4, 8, 3, 9, 5, 5, 4, 10, 4, 6, 6, 6, 5, 8, 9, 3, 4, 9, 4, 5, 6, 6, 2, 9, 4, 5, 5, 5, 7, 7, 9, 4, 6, 4, 8, 5, 8, 6, 6, 2, 9, 7, 4, 8, 8, 8, 4, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2}
mulStrategy := [137]uint8{31, 23, 21, 1, 31, 7, 7, 7, 9, 9, 19, 15, 23, 23, 11, 7, 25, 5, 21, 17, 11, 5, 17, 7, 11, 9, 23, 9, 1, 19, 5, 3, 25, 15, 11, 29, 31, 1, 29, 11, 13, 9, 11, 27, 13, 19, 15, 31, 3, 29, 23, 31, 25, 11, 1, 21, 19, 15, 15, 21, 29, 13, 23, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 3}
initialMul := uint8(27)
// Build a lookup table of odd multiples of x.
lookup := [16]primeFieldElement{}
xx := &primeFieldElement{}
xx.Mul(x, x) // Set xx = x^2
lookup[0] = *x
for i := 1; i < 16; i++ {
lookup[i].Mul(&lookup[i-1], xx)
}
// Now lookup = {x, x^3, x^5, ... }
// so that lookup[i] = x^{2*i + 1}
// so that lookup[k/2] = x^k, for odd k
*dest = lookup[initialMul/2]
for i := uint8(0); i < 137; i++ {
dest.Pow2k(dest, powStrategy[i])
dest.Mul(dest, &lookup[mulStrategy[i]/2])
}
return dest
}

View File

@ -1,227 +0,0 @@
package sidh
import (
"errors"
"io"
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
)
// I keep it bool in order to be able to apply logical NOT
type KeyVariant uint
// Id's correspond to bitlength of the prime field characteristic
// Currently FP_751 is the only one supported by this implementation
const (
FP_503 uint8 = iota
FP_751
FP_964
maxPrimeFieldId
)
const (
// First 2 bits identify SIDH variant third bit indicates
// wether key is a SIKE variant (set) or SIDH (not set)
// 001 - SIDH: corresponds to 2-torsion group
KeyVariant_SIDH_A KeyVariant = 1 << 0
// 010 - SIDH: corresponds to 3-torsion group
KeyVariant_SIDH_B = 1 << 1
// 110 - SIKE
KeyVariant_SIKE = 1<<2 | KeyVariant_SIDH_B
)
// Base type for public and private key. Used mainly to carry domain
// parameters.
type key struct {
// Domain parameters of the algorithm to be used with a key
params *SidhParams
// Flag indicates wether corresponds to 2-, 3-torsion group or SIKE
keyVariant KeyVariant
}
// Defines operations on public key
type PublicKey struct {
key
affine_xP Fp2Element
affine_xQ Fp2Element
affine_xQmP Fp2Element
}
// Defines operations on private key
type PrivateKey struct {
key
// Secret key
Scalar []byte
// Used only by KEM
S []byte
}
// Accessor to the domain parameters
func (key *key) Params() *SidhParams {
return key.params
}
// Accessor to key variant
func (key *key) Variant() KeyVariant {
return key.keyVariant
}
// NewPrivateKey initializes private key.
// Usage of this function guarantees that the object is correctly initialized.
func NewPrivateKey(id uint8, v KeyVariant) *PrivateKey {
prv := &PrivateKey{key: key{params: Params(id), keyVariant: v}}
if (v & KeyVariant_SIDH_A) == KeyVariant_SIDH_A {
prv.Scalar = make([]byte, prv.params.A.SecretByteLen)
} else {
prv.Scalar = make([]byte, prv.params.B.SecretByteLen)
}
if v == KeyVariant_SIKE {
prv.S = make([]byte, prv.params.MsgLen)
}
return prv
}
// NewPublicKey initializes public key.
// Usage of this function guarantees that the object is correctly initialized.
func NewPublicKey(id uint8, v KeyVariant) *PublicKey {
return &PublicKey{key: key{params: Params(id), keyVariant: v}}
}
// Import clears content of the public key currently stored in the structure
// and imports key stored in the byte string. Returns error in case byte string
// size is wrong. Doesn't perform any validation.
func (pub *PublicKey) Import(input []byte) error {
if len(input) != pub.Size() {
return errors.New("sidh: input to short")
}
op := CurveOperations{Params: pub.params}
ssSz := pub.params.SharedSecretSize
op.Fp2FromBytes(&pub.affine_xP, input[0:ssSz])
op.Fp2FromBytes(&pub.affine_xQ, input[ssSz:2*ssSz])
op.Fp2FromBytes(&pub.affine_xQmP, input[2*ssSz:3*ssSz])
return nil
}
// Exports currently stored key. In case structure hasn't been filled with key data
// returned byte string is filled with zeros.
func (pub *PublicKey) Export() []byte {
output := make([]byte, pub.params.PublicKeySize)
op := CurveOperations{Params: pub.params}
ssSz := pub.params.SharedSecretSize
op.Fp2ToBytes(output[0:ssSz], &pub.affine_xP)
op.Fp2ToBytes(output[ssSz:2*ssSz], &pub.affine_xQ)
op.Fp2ToBytes(output[2*ssSz:3*ssSz], &pub.affine_xQmP)
return output
}
// Size returns size of the public key in bytes
func (pub *PublicKey) Size() int {
return pub.params.PublicKeySize
}
// Exports currently stored key. In case structure hasn't been filled with key data
// returned byte string is filled with zeros.
func (prv *PrivateKey) Export() []byte {
ret := make([]byte, len(prv.Scalar)+len(prv.S))
copy(ret, prv.S)
copy(ret[len(prv.S):], prv.Scalar)
return ret
}
// Size returns size of the private key in bytes
func (prv *PrivateKey) Size() int {
tmp := len(prv.Scalar)
if prv.Variant() == KeyVariant_SIKE {
tmp += int(prv.params.MsgLen)
}
return tmp
}
// Import clears content of the private key currently stored in the structure
// and imports key from octet string. In case of SIKE, the random value 'S'
// must be prepended to the value of actual private key (see SIKE spec for details).
// Function doesn't import public key value to PrivateKey object.
func (prv *PrivateKey) Import(input []byte) error {
if len(input) != prv.Size() {
return errors.New("sidh: input to short")
}
copy(prv.S, input[:len(prv.S)])
copy(prv.Scalar, input[len(prv.S):])
return nil
}
// Generates random private key for SIDH or SIKE. Generated value is
// formed as little-endian integer from key-space <2^(e2-1)..2^e2 - 1>
// for KeyVariant_A or <2^(s-1)..2^s - 1>, where s = floor(log_2(3^e3)),
// for KeyVariant_B.
//
// Returns error in case user provided RNG fails.
func (prv *PrivateKey) Generate(rand io.Reader) error {
var err error
var dp *DomainParams
if (prv.keyVariant & KeyVariant_SIDH_A) == KeyVariant_SIDH_A {
dp = &prv.params.A
} else {
dp = &prv.params.B
}
if prv.keyVariant == KeyVariant_SIKE && err == nil {
_, err = io.ReadFull(rand, prv.S)
}
// Private key generation takes advantage of the fact that keyspace for secret
// key is (0, 2^x - 1), for some possitivite value of 'x' (see SIKE, 1.3.8).
// It means that all bytes in the secret key, but the last one, can take any
// value between <0x00,0xFF>. Similarly for the last byte, but generation
// needs to chop off some bits, to make sure generated value is an element of
// a key-space.
_, err = io.ReadFull(rand, prv.Scalar)
if err != nil {
return err
}
prv.Scalar[len(prv.Scalar)-1] &= (1 << (dp.SecretBitLen % 8)) - 1
// Make sure scalar is SecretBitLen long. SIKE spec says that key
// space starts from 0, but I'm not comfortable with having low
// value scalars used for private keys. It is still secrure as per
// table 5.1 in [SIKE].
prv.Scalar[len(prv.Scalar)-1] |= 1 << ((dp.SecretBitLen % 8) - 1)
return err
}
// Generates public key.
//
// Constant time.
func (prv *PrivateKey) GeneratePublicKey() *PublicKey {
if (prv.keyVariant & KeyVariant_SIDH_A) == KeyVariant_SIDH_A {
return publicKeyGenA(prv)
}
return publicKeyGenB(prv)
}
// Computes a shared secret which is a j-invariant. Function requires that pub has
// different KeyVariant than prv. Length of returned output is 2*ceil(log_2 P)/8),
// where P is a prime defining finite field.
//
// It's important to notice that each keypair must not be used more than once
// to calculate shared secret.
//
// Function may return error. This happens only in case provided input is invalid.
// Constant time for properly initialized private and public key.
func DeriveSecret(prv *PrivateKey, pub *PublicKey) ([]byte, error) {
if (pub == nil) || (prv == nil) {
return nil, errors.New("sidh: invalid arguments")
}
if (pub.keyVariant == prv.keyVariant) || (pub.params.Id != prv.params.Id) {
return nil, errors.New("sidh: public and private are incompatbile")
}
if (prv.keyVariant & KeyVariant_SIDH_A) == KeyVariant_SIDH_A {
return deriveSecretA(prv, pub), nil
} else {
return deriveSecretB(prv, pub), nil
}
}

View File

@ -1,82 +0,0 @@
package sidh
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
p503 "v2ray.com/core/external/github.com/cloudflare/sidh/p503"
p751 "v2ray.com/core/external/github.com/cloudflare/sidh/p751"
)
// Keeps mapping: SIDH prime field ID to domain parameters
var sidhParams = make(map[uint8]SidhParams)
// Params returns domain parameters corresponding to finite field and identified by
// `id` provieded by the caller. Function panics in case `id` wasn't registered earlier.
func Params(id uint8) *SidhParams {
if val, ok := sidhParams[id]; ok {
return &val
}
panic("sidh: SIDH Params ID unregistered")
}
func init() {
p503 := SidhParams{
Id: FP_503,
PublicKeySize: p503.P503_PublicKeySize,
SharedSecretSize: p503.P503_SharedSecretSize,
A: DomainParams{
Affine_P: p503.P503_affine_PA,
Affine_Q: p503.P503_affine_QA,
Affine_R: p503.P503_affine_RA,
SecretBitLen: p503.P503_SecretBitLenA,
SecretByteLen: uint((p503.P503_SecretBitLenA + 7) / 8),
IsogenyStrategy: p503.P503_AliceIsogenyStrategy[:],
},
B: DomainParams{
Affine_P: p503.P503_affine_PB,
Affine_Q: p503.P503_affine_QB,
Affine_R: p503.P503_affine_RB,
SecretBitLen: p503.P503_SecretBitLenB,
SecretByteLen: uint((p503.P503_SecretBitLenB + 7) / 8),
IsogenyStrategy: p503.P503_BobIsogenyStrategy[:],
},
OneFp2: p503.P503_OneFp2,
HalfFp2: p503.P503_HalfFp2,
MsgLen: 24,
// SIKEp751 provides 128 bit of classical security ([SIKE], 5.1)
KemSize: 16,
Bytelen: p503.P503_Bytelen,
Op: p503.FieldOperations(),
}
p751 := SidhParams{
Id: FP_751,
PublicKeySize: p751.P751_PublicKeySize,
SharedSecretSize: p751.P751_SharedSecretSize,
A: DomainParams{
Affine_P: p751.P751_affine_PA,
Affine_Q: p751.P751_affine_QA,
Affine_R: p751.P751_affine_RA,
IsogenyStrategy: p751.P751_AliceIsogenyStrategy[:],
SecretBitLen: p751.P751_SecretBitLenA,
SecretByteLen: uint((p751.P751_SecretBitLenA + 7) / 8),
},
B: DomainParams{
Affine_P: p751.P751_affine_PB,
Affine_Q: p751.P751_affine_QB,
Affine_R: p751.P751_affine_RB,
IsogenyStrategy: p751.P751_BobIsogenyStrategy[:],
SecretBitLen: p751.P751_SecretBitLenB,
SecretByteLen: uint((p751.P751_SecretBitLenB + 7) / 8),
},
OneFp2: p751.P751_OneFp2,
HalfFp2: p751.P751_HalfFp2,
MsgLen: 32,
// SIKEp751 provides 192 bit of classical security ([SIKE], 5.1)
KemSize: 24,
Bytelen: p751.P751_Bytelen,
Op: p751.FieldOperations(),
}
sidhParams[FP_503] = p503
sidhParams[FP_751] = p751
}

View File

@ -1,302 +0,0 @@
package sidh
import (
. "v2ray.com/core/external/github.com/cloudflare/sidh/internal/isogeny"
)
// -----------------------------------------------------------------------------
// Functions for traversing isogeny trees acoording to strategy. Key type 'A' is
//
// Traverses isogeny tree in order to compute xR, xP, xQ and xQmP needed
// for public key generation.
func traverseTreePublicKeyA(curve *ProjectiveCurveParameters, xR, phiP, phiQ, phiR *ProjectivePoint, pub *PublicKey) {
var points = make([]ProjectivePoint, 0, 8)
var indices = make([]int, 0, 8)
var i, sidx int
var op = CurveOperations{Params: pub.params}
cparam := op.CalcCurveParamsEquiv4(curve)
phi := Newisogeny4(op.Params.Op)
strategy := pub.params.A.IsogenyStrategy
stratSz := len(strategy)
for j := 1; j <= stratSz; j++ {
for i <= stratSz-j {
points = append(points, *xR)
indices = append(indices, i)
k := strategy[sidx]
sidx++
op.Pow2k(xR, &cparam, 2*k)
i += int(k)
}
cparam = phi.GenerateCurve(xR)
for k := 0; k < len(points); k++ {
points[k] = phi.EvaluatePoint(&points[k])
}
*phiP = phi.EvaluatePoint(phiP)
*phiQ = phi.EvaluatePoint(phiQ)
*phiR = phi.EvaluatePoint(phiR)
// pop xR from points
*xR, points = points[len(points)-1], points[:len(points)-1]
i, indices = int(indices[len(indices)-1]), indices[:len(indices)-1]
}
}
// Traverses isogeny tree in order to compute xR needed
// for public key generation.
func traverseTreeSharedKeyA(curve *ProjectiveCurveParameters, xR *ProjectivePoint, pub *PublicKey) {
var points = make([]ProjectivePoint, 0, 8)
var indices = make([]int, 0, 8)
var i, sidx int
var op = CurveOperations{Params: pub.params}
cparam := op.CalcCurveParamsEquiv4(curve)
phi := Newisogeny4(op.Params.Op)
strategy := pub.params.A.IsogenyStrategy
stratSz := len(strategy)
for j := 1; j <= stratSz; j++ {
for i <= stratSz-j {
points = append(points, *xR)
indices = append(indices, i)
k := strategy[sidx]
sidx++
op.Pow2k(xR, &cparam, 2*k)
i += int(k)
}
cparam = phi.GenerateCurve(xR)
for k := 0; k < len(points); k++ {
points[k] = phi.EvaluatePoint(&points[k])
}
// pop xR from points
*xR, points = points[len(points)-1], points[:len(points)-1]
i, indices = int(indices[len(indices)-1]), indices[:len(indices)-1]
}
}
// Traverses isogeny tree in order to compute xR, xP, xQ and xQmP needed
// for public key generation.
func traverseTreePublicKeyB(curve *ProjectiveCurveParameters, xR, phiP, phiQ, phiR *ProjectivePoint, pub *PublicKey) {
var points = make([]ProjectivePoint, 0, 8)
var indices = make([]int, 0, 8)
var i, sidx int
var op = CurveOperations{Params: pub.params}
cparam := op.CalcCurveParamsEquiv3(curve)
phi := Newisogeny3(op.Params.Op)
strategy := pub.params.B.IsogenyStrategy
stratSz := len(strategy)
for j := 1; j <= stratSz; j++ {
for i <= stratSz-j {
points = append(points, *xR)
indices = append(indices, i)
k := strategy[sidx]
sidx++
op.Pow3k(xR, &cparam, k)
i += int(k)
}
cparam = phi.GenerateCurve(xR)
for k := 0; k < len(points); k++ {
points[k] = phi.EvaluatePoint(&points[k])
}
*phiP = phi.EvaluatePoint(phiP)
*phiQ = phi.EvaluatePoint(phiQ)
*phiR = phi.EvaluatePoint(phiR)
// pop xR from points
*xR, points = points[len(points)-1], points[:len(points)-1]
i, indices = int(indices[len(indices)-1]), indices[:len(indices)-1]
}
}
// Traverses isogeny tree in order to compute xR, xP, xQ and xQmP needed
// for public key generation.
func traverseTreeSharedKeyB(curve *ProjectiveCurveParameters, xR *ProjectivePoint, pub *PublicKey) {
var points = make([]ProjectivePoint, 0, 8)
var indices = make([]int, 0, 8)
var i, sidx int
var op = CurveOperations{Params: pub.params}
cparam := op.CalcCurveParamsEquiv3(curve)
phi := Newisogeny3(op.Params.Op)
strategy := pub.params.B.IsogenyStrategy
stratSz := len(strategy)
for j := 1; j <= stratSz; j++ {
for i <= stratSz-j {
points = append(points, *xR)
indices = append(indices, i)
k := strategy[sidx]
sidx++
op.Pow3k(xR, &cparam, k)
i += int(k)
}
cparam = phi.GenerateCurve(xR)
for k := 0; k < len(points); k++ {
points[k] = phi.EvaluatePoint(&points[k])
}
// pop xR from points
*xR, points = points[len(points)-1], points[:len(points)-1]
i, indices = int(indices[len(indices)-1]), indices[:len(indices)-1]
}
}
// Generate a public key in the 2-torsion group
func publicKeyGenA(prv *PrivateKey) (pub *PublicKey) {
var xPA, xQA, xRA ProjectivePoint
var xPB, xQB, xRB, xR ProjectivePoint
var invZP, invZQ, invZR Fp2Element
var tmp ProjectiveCurveParameters
pub = NewPublicKey(prv.params.Id, KeyVariant_SIDH_A)
var op = CurveOperations{Params: pub.params}
var phi = Newisogeny4(op.Params.Op)
// Load points for A
xPA = ProjectivePoint{X: prv.params.A.Affine_P, Z: prv.params.OneFp2}
xQA = ProjectivePoint{X: prv.params.A.Affine_Q, Z: prv.params.OneFp2}
xRA = ProjectivePoint{X: prv.params.A.Affine_R, Z: prv.params.OneFp2}
// Load points for B
xRB = ProjectivePoint{X: prv.params.B.Affine_R, Z: prv.params.OneFp2}
xQB = ProjectivePoint{X: prv.params.B.Affine_Q, Z: prv.params.OneFp2}
xPB = ProjectivePoint{X: prv.params.B.Affine_P, Z: prv.params.OneFp2}
// Find isogeny kernel
tmp.C = pub.params.OneFp2
xR = op.ScalarMul3Pt(&tmp, &xPA, &xQA, &xRA, prv.params.A.SecretBitLen, prv.Scalar)
// Reset params object and travers isogeny tree
tmp.C = pub.params.OneFp2
tmp.A.Zeroize()
traverseTreePublicKeyA(&tmp, &xR, &xPB, &xQB, &xRB, pub)
// Secret isogeny
phi.GenerateCurve(&xR)
xPA = phi.EvaluatePoint(&xPB)
xQA = phi.EvaluatePoint(&xQB)
xRA = phi.EvaluatePoint(&xRB)
op.Fp2Batch3Inv(&xPA.Z, &xQA.Z, &xRA.Z, &invZP, &invZQ, &invZR)
op.Params.Op.Mul(&pub.affine_xP, &xPA.X, &invZP)
op.Params.Op.Mul(&pub.affine_xQ, &xQA.X, &invZQ)
op.Params.Op.Mul(&pub.affine_xQmP, &xRA.X, &invZR)
return
}
// Generate a public key in the 3-torsion group
func publicKeyGenB(prv *PrivateKey) (pub *PublicKey) {
var xPB, xQB, xRB, xR ProjectivePoint
var xPA, xQA, xRA ProjectivePoint
var invZP, invZQ, invZR Fp2Element
var tmp ProjectiveCurveParameters
pub = NewPublicKey(prv.params.Id, prv.keyVariant)
var op = CurveOperations{Params: pub.params}
var phi = Newisogeny3(op.Params.Op)
// Load points for B
xRB = ProjectivePoint{X: prv.params.B.Affine_R, Z: prv.params.OneFp2}
xQB = ProjectivePoint{X: prv.params.B.Affine_Q, Z: prv.params.OneFp2}
xPB = ProjectivePoint{X: prv.params.B.Affine_P, Z: prv.params.OneFp2}
// Load points for A
xPA = ProjectivePoint{X: prv.params.A.Affine_P, Z: prv.params.OneFp2}
xQA = ProjectivePoint{X: prv.params.A.Affine_Q, Z: prv.params.OneFp2}
xRA = ProjectivePoint{X: prv.params.A.Affine_R, Z: prv.params.OneFp2}
tmp.C = pub.params.OneFp2
xR = op.ScalarMul3Pt(&tmp, &xPB, &xQB, &xRB, prv.params.B.SecretBitLen, prv.Scalar)
tmp.C = pub.params.OneFp2
tmp.A.Zeroize()
traverseTreePublicKeyB(&tmp, &xR, &xPA, &xQA, &xRA, pub)
phi.GenerateCurve(&xR)
xPB = phi.EvaluatePoint(&xPA)
xQB = phi.EvaluatePoint(&xQA)
xRB = phi.EvaluatePoint(&xRA)
op.Fp2Batch3Inv(&xPB.Z, &xQB.Z, &xRB.Z, &invZP, &invZQ, &invZR)
op.Params.Op.Mul(&pub.affine_xP, &xPB.X, &invZP)
op.Params.Op.Mul(&pub.affine_xQ, &xQB.X, &invZQ)
op.Params.Op.Mul(&pub.affine_xQmP, &xRB.X, &invZR)
return
}
// -----------------------------------------------------------------------------
// Key agreement functions
//
// Establishing shared keys in in 2-torsion group
func deriveSecretA(prv *PrivateKey, pub *PublicKey) []byte {
var sharedSecret = make([]byte, pub.params.SharedSecretSize)
var cparam ProjectiveCurveParameters
var xP, xQ, xQmP ProjectivePoint
var xR ProjectivePoint
var op = CurveOperations{Params: prv.params}
var phi = Newisogeny4(op.Params.Op)
// Recover curve coefficients
cparam.C = pub.params.OneFp2
op.RecoverCoordinateA(&cparam, &pub.affine_xP, &pub.affine_xQ, &pub.affine_xQmP)
// Find kernel of the morphism
xP = ProjectivePoint{X: pub.affine_xP, Z: pub.params.OneFp2}
xQ = ProjectivePoint{X: pub.affine_xQ, Z: pub.params.OneFp2}
xQmP = ProjectivePoint{X: pub.affine_xQmP, Z: pub.params.OneFp2}
xR = op.ScalarMul3Pt(&cparam, &xP, &xQ, &xQmP, pub.params.A.SecretBitLen, prv.Scalar)
// Traverse isogeny tree
traverseTreeSharedKeyA(&cparam, &xR, pub)
// Calculate j-invariant on isogeneus curve
c := phi.GenerateCurve(&xR)
op.RecoverCurveCoefficients4(&cparam, &c)
op.Jinvariant(&cparam, sharedSecret)
return sharedSecret
}
// Establishing shared keys in in 3-torsion group
func deriveSecretB(prv *PrivateKey, pub *PublicKey) []byte {
var sharedSecret = make([]byte, pub.params.SharedSecretSize)
var xP, xQ, xQmP ProjectivePoint
var xR ProjectivePoint
var cparam ProjectiveCurveParameters
var op = CurveOperations{Params: prv.params}
var phi = Newisogeny3(op.Params.Op)
// Recover curve coefficients
cparam.C = pub.params.OneFp2
op.RecoverCoordinateA(&cparam, &pub.affine_xP, &pub.affine_xQ, &pub.affine_xQmP)
// Find kernel of the morphism
xP = ProjectivePoint{X: pub.affine_xP, Z: pub.params.OneFp2}
xQ = ProjectivePoint{X: pub.affine_xQ, Z: pub.params.OneFp2}
xQmP = ProjectivePoint{X: pub.affine_xQmP, Z: pub.params.OneFp2}
xR = op.ScalarMul3Pt(&cparam, &xP, &xQ, &xQmP, pub.params.B.SecretBitLen, prv.Scalar)
// Traverse isogeny tree
traverseTreeSharedKeyB(&cparam, &xR, pub)
// Calculate j-invariant on isogeneus curve
c := phi.GenerateCurve(&xR)
op.RecoverCurveCoefficients3(&cparam, &c)
op.Jinvariant(&cparam, sharedSecret)
return sharedSecret
}

View File

@ -1,44 +0,0 @@
# Changelog
## v0.10.0 (2018-08-28)
- Add support for QUIC 44, drop support for QUIC 42.
## v0.9.0 (2018-08-15)
- Add a `quic.Config` option for the length of the connection ID (for IETF QUIC).
- Split Session.Close into one method for regular closing and one for closing with an error.
## v0.8.0 (2018-06-26)
- Add support for unidirectional streams (for IETF QUIC).
- Add a `quic.Config` option for the maximum number of incoming streams.
- Add support for QUIC 42 and 43.
- Add dial functions that use a context.
- Multiplex clients on a net.PacketConn, when using Dial(conn).
## v0.7.0 (2018-02-03)
- The lower boundary for packets included in ACKs is now derived, and the value sent in STOP_WAITING frames is ignored.
- Remove `DialNonFWSecure` and `DialAddrNonFWSecure`.
- Expose the `ConnectionState` in the `Session` (experimental API).
- Implement packet pacing.
## v0.6.0 (2017-12-12)
- Add support for QUIC 39, drop support for QUIC 35 - 37
- Added `quic.Config` options for maximal flow control windows
- Add a `quic.Config` option for QUIC versions
- Add a `quic.Config` option to request omission of the connection ID from a server
- Add a `quic.Config` option to configure the source address validation
- Add a `quic.Config` option to configure the handshake timeout
- Add a `quic.Config` option to configure the idle timeout
- Add a `quic.Config` option to configure keep-alive
- Rename the STK to Cookie
- Implement `net.Conn`-style deadlines for streams
- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/lucas-clemente/quic-go) for details.
- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/lucas-clemente/quic-go/wiki/Logging) for more details.
- Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper`
- Changed `h2quic.Server.Serve()` to accept a `net.PacketConn`
- Drop support for Go 1.7 and 1.8.
- Various bugfixes

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2016 the quic-go authors & Google, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,68 +0,0 @@
# A QUIC implementation in pure Go
<img src="docs/quic.png" width=303 height=124>
[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/lucas-clemente/quic-go)
[![Travis Build Status](https://img.shields.io/travis/lucas-clemente/quic-go/master.svg?style=flat-square&label=Travis+build)](https://travis-ci.org/lucas-clemente/quic-go)
[![CircleCI Build Status](https://img.shields.io/circleci/project/github/lucas-clemente/quic-go.svg?style=flat-square&label=CircleCI+build)](https://circleci.com/gh/lucas-clemente/quic-go)
[![Windows Build Status](https://img.shields.io/appveyor/ci/lucas-clemente/quic-go/master.svg?style=flat-square&label=windows+build)](https://ci.appveyor.com/project/lucas-clemente/quic-go/branch/master)
[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/)
quic-go is an implementation of the [QUIC](https://en.wikipedia.org/wiki/QUIC) protocol in Go. It roughly implements the [IETF QUIC draft](https://github.com/quicwg/base-drafts), although we don't fully support any of the draft versions at the moment.
## Version compatibility
Since quic-go is under active development, there's no guarantee that two builds of different commits are interoperable. The QUIC version used in the *master* branch is just a placeholder, and should not be considered stable.
If you want to use quic-go as a library in other projects, please consider using a [tagged release](https://github.com/lucas-clemente/quic-go/releases). These releases expose [experimental QUIC versions](https://github.com/quicwg/base-drafts/wiki/QUIC-Versions), which are guaranteed to be stable.
## Google QUIC
quic-go used to support both the QUIC versions supported by Google Chrome and QUIC as deployed on Google's servers, as well as IETF QUIC. Due to the divergence of the two protocols, we decided to not support both versions any more.
The *master* branch **only** supports IETF QUIC. For Google QUIC support, please refer to the [gquic branch](https://github.com/lucas-clemente/quic-go/tree/gquic).
## Guides
We currently support Go 1.9+.
Installing and updating dependencies:
go get -t -u ./...
Running tests:
go test ./...
### HTTP mapping
We're currently not implementing the HTTP mapping as described in the [QUIC over HTTP draft](https://quicwg.org/base-drafts/draft-ietf-quic-http.html). The HTTP mapping here is a leftover from Google QUIC.
### QUIC without HTTP/2
Take a look at [this echo example](example/echo/echo.go).
## Usage
### As a server
See the [example server](example/main.go). Starting a QUIC server is very similar to the standard lib http in go:
```go
http.Handle("/", http.FileServer(http.Dir(wwwDir)))
h2quic.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil)
```
### As a client
See the [example client](example/client/main.go). Use a `h2quic.RoundTripper` as a `Transport` in a `http.Client`.
```go
http.Client{
Transport: &h2quic.RoundTripper{},
}
```
## Contributing
We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.

View File

@ -1,56 +0,0 @@
package quic
import (
"sync"
"v2ray.com/core/common/bytespool"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
type packetBuffer struct {
Slice []byte
// refCount counts how many packets the Slice is used in.
// It doesn't support concurrent use.
// It is > 1 when used for coalesced packet.
refCount int
}
// Split increases the refCount.
// It must be called when a packet buffer is used for more than one packet,
// e.g. when splitting coalesced packets.
func (b *packetBuffer) Split() {
b.refCount++
}
// Release decreases the refCount.
// It should be called when processing the packet is finished.
// When the refCount reaches 0, the packet buffer is put back into the pool.
func (b *packetBuffer) Release() {
if cap(b.Slice) < 2048 {
return
}
b.refCount--
if b.refCount < 0 {
panic("negative packetBuffer refCount")
}
// only put the packetBuffer back if it's not used any more
if b.refCount == 0 {
buffer := b.Slice[0:cap(b.Slice)]
bufferPool.Put(buffer)
}
}
var bufferPool *sync.Pool
func getPacketBuffer() *packetBuffer {
buffer := bufferPool.Get().([]byte)
return &packetBuffer{
refCount: 1,
Slice: buffer[:protocol.MaxReceivePacketSize],
}
}
func init() {
bufferPool = bytespool.GetPool(int32(protocol.MaxReceivePacketSize))
}

View File

@ -1,443 +0,0 @@
package quic
import (
"context"
"crypto/tls"
"fmt"
"net"
"sync"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/handshake"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/qerr"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
type client struct {
mutex sync.Mutex
conn connection
// If the client is created with DialAddr, we create a packet conn.
// If it is started with Dial, we take a packet conn as a parameter.
createdPacketConn bool
packetHandlers packetHandlerManager
token []byte
versionNegotiated utils.AtomicBool // has the server accepted our version
receivedVersionNegotiationPacket bool
negotiatedVersions []protocol.VersionNumber // the list of versions from the version negotiation packet
tlsConf *tls.Config
config *Config
srcConnID protocol.ConnectionID
destConnID protocol.ConnectionID
origDestConnID protocol.ConnectionID // the destination conn ID used on the first Initial (before a Retry)
initialPacketNumber protocol.PacketNumber
initialVersion protocol.VersionNumber
version protocol.VersionNumber
handshakeChan chan struct{}
session quicSession
logger utils.Logger
}
var _ packetHandler = &client{}
var (
// make it possible to mock connection ID generation in the tests
generateConnectionID = protocol.GenerateConnectionID
generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
)
// DialAddr establishes a new QUIC connection to a server.
// It uses a new UDP connection and closes this connection when the QUIC session is closed.
// The hostname for SNI is taken from the given address.
func DialAddr(
addr string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
return DialAddrContext(context.Background(), addr, tlsConf, config)
}
// DialAddrContext establishes a new QUIC connection to a server using the provided context.
// See DialAddr for details.
func DialAddrContext(
ctx context.Context,
addr string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
if err != nil {
return nil, err
}
return dialContext(ctx, udpConn, udpAddr, addr, tlsConf, config, true)
}
// Dial establishes a new QUIC connection to a server using a net.PacketConn.
// The same PacketConn can be used for multiple calls to Dial and Listen,
// QUIC connection IDs are used for demultiplexing the different connections.
// The host parameter is used for SNI.
func Dial(
pconn net.PacketConn,
remoteAddr net.Addr,
host string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
return DialContext(context.Background(), pconn, remoteAddr, host, tlsConf, config)
}
// DialContext establishes a new QUIC connection to a server using a net.PacketConn using the provided context.
// See Dial for details.
func DialContext(
ctx context.Context,
pconn net.PacketConn,
remoteAddr net.Addr,
host string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
return dialContext(ctx, pconn, remoteAddr, host, tlsConf, config, false)
}
func dialContext(
ctx context.Context,
pconn net.PacketConn,
remoteAddr net.Addr,
host string,
tlsConf *tls.Config,
config *Config,
createdPacketConn bool,
) (Session, error) {
config = populateClientConfig(config, createdPacketConn)
packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDLength)
if err != nil {
return nil, err
}
c, err := newClient(pconn, remoteAddr, config, tlsConf, host, createdPacketConn)
if err != nil {
return nil, err
}
c.packetHandlers = packetHandlers
if err := c.dial(ctx); err != nil {
return nil, err
}
return c.session, nil
}
func newClient(
pconn net.PacketConn,
remoteAddr net.Addr,
config *Config,
tlsConf *tls.Config,
host string,
createdPacketConn bool,
) (*client, error) {
if tlsConf == nil {
tlsConf = &tls.Config{}
}
if tlsConf.ServerName == "" {
var err error
tlsConf.ServerName, _, err = net.SplitHostPort(host)
if err != nil {
return nil, err
}
}
// check that all versions are actually supported
if config != nil {
for _, v := range config.Versions {
if !protocol.IsValidVersion(v) {
return nil, fmt.Errorf("%s is not a valid QUIC version", v)
}
}
}
srcConnID, err := generateConnectionID(config.ConnectionIDLength)
if err != nil {
return nil, err
}
destConnID, err := generateConnectionIDForInitial()
if err != nil {
return nil, err
}
c := &client{
srcConnID: srcConnID,
destConnID: destConnID,
conn: &conn{pconn: pconn, currentAddr: remoteAddr},
createdPacketConn: createdPacketConn,
tlsConf: tlsConf,
config: config,
version: config.Versions[0],
handshakeChan: make(chan struct{}),
logger: utils.DefaultLogger.WithPrefix("client"),
}
return c, nil
}
// populateClientConfig populates fields in the quic.Config with their default values, if none are set
// it may be called with nil
func populateClientConfig(config *Config, createdPacketConn bool) *Config {
if config == nil {
config = &Config{}
}
versions := config.Versions
if len(versions) == 0 {
versions = protocol.SupportedVersions
}
handshakeTimeout := protocol.DefaultHandshakeTimeout
if config.HandshakeTimeout != 0 {
handshakeTimeout = config.HandshakeTimeout
}
idleTimeout := protocol.DefaultIdleTimeout
if config.IdleTimeout != 0 {
idleTimeout = config.IdleTimeout
}
maxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow
if maxReceiveStreamFlowControlWindow == 0 {
maxReceiveStreamFlowControlWindow = protocol.DefaultMaxReceiveStreamFlowControlWindow
}
maxReceiveConnectionFlowControlWindow := config.MaxReceiveConnectionFlowControlWindow
if maxReceiveConnectionFlowControlWindow == 0 {
maxReceiveConnectionFlowControlWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindow
}
maxIncomingStreams := config.MaxIncomingStreams
if maxIncomingStreams == 0 {
maxIncomingStreams = protocol.DefaultMaxIncomingStreams
} else if maxIncomingStreams < 0 {
maxIncomingStreams = 0
}
maxIncomingUniStreams := config.MaxIncomingUniStreams
if maxIncomingUniStreams == 0 {
maxIncomingUniStreams = protocol.DefaultMaxIncomingUniStreams
} else if maxIncomingUniStreams < 0 {
maxIncomingUniStreams = 0
}
connIDLen := config.ConnectionIDLength
if connIDLen == 0 && !createdPacketConn {
connIDLen = protocol.DefaultConnectionIDLength
}
return &Config{
Versions: versions,
HandshakeTimeout: handshakeTimeout,
IdleTimeout: idleTimeout,
ConnectionIDLength: connIDLen,
MaxReceiveStreamFlowControlWindow: maxReceiveStreamFlowControlWindow,
MaxReceiveConnectionFlowControlWindow: maxReceiveConnectionFlowControlWindow,
MaxIncomingStreams: maxIncomingStreams,
MaxIncomingUniStreams: maxIncomingUniStreams,
KeepAlive: config.KeepAlive,
}
}
func (c *client) dial(ctx context.Context) error {
c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.conn.LocalAddr(), c.conn.RemoteAddr(), c.srcConnID, c.destConnID, c.version)
if err := c.createNewTLSSession(c.version); err != nil {
return err
}
err := c.establishSecureConnection(ctx)
if err == errCloseForRecreating {
return c.dial(ctx)
}
return err
}
// establishSecureConnection runs the session, and tries to establish a secure connection
// It returns:
// - errCloseSessionRecreating when the server sends a version negotiation packet, or a stateless retry is performed
// - any other error that might occur
// - when the connection is forward-secure
func (c *client) establishSecureConnection(ctx context.Context) error {
errorChan := make(chan error, 1)
go func() {
err := c.session.run() // returns as soon as the session is closed
if err != errCloseForRecreating && c.createdPacketConn {
c.conn.Close()
}
errorChan <- err
}()
select {
case <-ctx.Done():
// The session will send a PeerGoingAway error to the server.
c.session.Close()
return ctx.Err()
case err := <-errorChan:
return err
case <-c.handshakeChan:
// handshake successfully completed
return nil
}
}
func (c *client) handlePacket(p *receivedPacket) {
if p.hdr.IsVersionNegotiation() {
go c.handleVersionNegotiationPacket(p.hdr)
return
}
if p.hdr.Type == protocol.PacketTypeRetry {
go c.handleRetryPacket(p.hdr)
return
}
// this is the first packet we are receiving
// since it is not a Version Negotiation Packet, this means the server supports the suggested version
if !c.versionNegotiated.Get() {
c.versionNegotiated.Set(true)
}
c.session.handlePacket(p)
}
func (c *client) handleVersionNegotiationPacket(hdr *wire.Header) {
c.mutex.Lock()
defer c.mutex.Unlock()
// ignore delayed / duplicated version negotiation packets
if c.receivedVersionNegotiationPacket || c.versionNegotiated.Get() {
c.logger.Debugf("Received a delayed Version Negotiation packet.")
return
}
for _, v := range hdr.SupportedVersions {
if v == c.version {
// The Version Negotiation packet contains the version that we offered.
// This might be a packet sent by an attacker (or by a terribly broken server implementation).
return
}
}
c.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", hdr.SupportedVersions)
newVersion, ok := protocol.ChooseSupportedVersion(c.config.Versions, hdr.SupportedVersions)
if !ok {
c.session.destroy(qerr.InvalidVersion)
c.logger.Debugf("No compatible version found.")
return
}
c.receivedVersionNegotiationPacket = true
c.negotiatedVersions = hdr.SupportedVersions
// switch to negotiated version
c.initialVersion = c.version
c.version = newVersion
c.logger.Infof("Switching to QUIC version %s. New connection ID: %s", newVersion, c.destConnID)
c.initialPacketNumber = c.session.closeForRecreating()
}
func (c *client) handleRetryPacket(hdr *wire.Header) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.logger.Debugf("<- Received Retry")
(&wire.ExtendedHeader{Header: *hdr}).Log(c.logger)
if !hdr.OrigDestConnectionID.Equal(c.destConnID) {
c.logger.Debugf("Ignoring spoofed Retry. Original Destination Connection ID: %s, expected: %s", hdr.OrigDestConnectionID, c.destConnID)
return
}
if hdr.SrcConnectionID.Equal(c.destConnID) {
c.logger.Debugf("Ignoring Retry, since the server didn't change the Source Connection ID.")
return
}
// If a token is already set, this means that we already received a Retry from the server.
// Ignore this Retry packet.
if len(c.token) > 0 {
c.logger.Debugf("Ignoring Retry, since a Retry was already received.")
return
}
c.origDestConnID = c.destConnID
c.destConnID = hdr.SrcConnectionID
c.token = hdr.Token
c.initialPacketNumber = c.session.closeForRecreating()
}
func (c *client) createNewTLSSession(version protocol.VersionNumber) error {
params := &handshake.TransportParameters{
InitialMaxStreamDataBidiRemote: protocol.InitialMaxStreamData,
InitialMaxStreamDataBidiLocal: protocol.InitialMaxStreamData,
InitialMaxStreamDataUni: protocol.InitialMaxStreamData,
InitialMaxData: protocol.InitialMaxData,
IdleTimeout: c.config.IdleTimeout,
MaxBidiStreams: uint64(c.config.MaxIncomingStreams),
MaxUniStreams: uint64(c.config.MaxIncomingUniStreams),
DisableMigration: true,
}
c.mutex.Lock()
defer c.mutex.Unlock()
runner := &runner{
onHandshakeCompleteImpl: func(_ Session) { close(c.handshakeChan) },
retireConnectionIDImpl: c.packetHandlers.Retire,
removeConnectionIDImpl: c.packetHandlers.Remove,
}
sess, err := newClientSession(
c.conn,
runner,
c.token,
c.origDestConnID,
c.destConnID,
c.srcConnID,
c.config,
c.tlsConf,
c.initialPacketNumber,
params,
c.initialVersion,
c.logger,
c.version,
)
if err != nil {
return err
}
c.session = sess
c.packetHandlers.Add(c.srcConnID, c)
return nil
}
func (c *client) Close() error {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.session == nil {
return nil
}
return c.session.Close()
}
func (c *client) destroy(e error) {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.session == nil {
return
}
c.session.destroy(e)
}
func (c *client) GetVersion() protocol.VersionNumber {
c.mutex.Lock()
v := c.version
c.mutex.Unlock()
return v
}
func (c *client) GetPerspective() protocol.Perspective {
return protocol.PerspectiveClient
}

View File

@ -1,54 +0,0 @@
package quic
import (
"net"
"sync"
)
type connection interface {
Write([]byte) error
Read([]byte) (int, net.Addr, error)
Close() error
LocalAddr() net.Addr
RemoteAddr() net.Addr
SetCurrentRemoteAddr(net.Addr)
}
type conn struct {
mutex sync.RWMutex
pconn net.PacketConn
currentAddr net.Addr
}
var _ connection = &conn{}
func (c *conn) Write(p []byte) error {
_, err := c.pconn.WriteTo(p, c.currentAddr)
return err
}
func (c *conn) Read(p []byte) (int, net.Addr, error) {
return c.pconn.ReadFrom(p)
}
func (c *conn) SetCurrentRemoteAddr(addr net.Addr) {
c.mutex.Lock()
c.currentAddr = addr
c.mutex.Unlock()
}
func (c *conn) LocalAddr() net.Addr {
return c.pconn.LocalAddr()
}
func (c *conn) RemoteAddr() net.Addr {
c.mutex.RLock()
addr := c.currentAddr
c.mutex.RUnlock()
return addr
}
func (c *conn) Close() error {
return c.pconn.Close()
}

View File

@ -1,108 +0,0 @@
package quic
import (
"errors"
"fmt"
"io"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
type cryptoStream interface {
// for receiving data
HandleCryptoFrame(*wire.CryptoFrame) error
GetCryptoData() []byte
Finish() error
// for sending data
io.Writer
HasData() bool
PopCryptoFrame(protocol.ByteCount) *wire.CryptoFrame
}
type cryptoStreamImpl struct {
queue *frameSorter
msgBuf []byte
highestOffset protocol.ByteCount
finished bool
writeOffset protocol.ByteCount
writeBuf []byte
}
func newCryptoStream() cryptoStream {
return &cryptoStreamImpl{
queue: newFrameSorter(),
}
}
func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error {
highestOffset := f.Offset + protocol.ByteCount(len(f.Data))
if maxOffset := highestOffset; maxOffset > protocol.MaxCryptoStreamOffset {
return fmt.Errorf("received invalid offset %d on crypto stream, maximum allowed %d", maxOffset, protocol.MaxCryptoStreamOffset)
}
if s.finished {
if highestOffset > s.highestOffset {
// reject crypto data received after this stream was already finished
return errors.New("received crypto data after change of encryption level")
}
// ignore data with a smaller offset than the highest received
// could e.g. be a retransmission
return nil
}
s.highestOffset = utils.MaxByteCount(s.highestOffset, highestOffset)
if err := s.queue.Push(f.Data, f.Offset, false); err != nil {
return err
}
for {
data, _ := s.queue.Pop()
if data == nil {
return nil
}
s.msgBuf = append(s.msgBuf, data...)
}
}
// GetCryptoData retrieves data that was received in CRYPTO frames
func (s *cryptoStreamImpl) GetCryptoData() []byte {
if len(s.msgBuf) < 4 {
return nil
}
msgLen := 4 + int(s.msgBuf[1])<<16 + int(s.msgBuf[2])<<8 + int(s.msgBuf[3])
if len(s.msgBuf) < msgLen {
return nil
}
msg := make([]byte, msgLen)
copy(msg, s.msgBuf[:msgLen])
s.msgBuf = s.msgBuf[msgLen:]
return msg
}
func (s *cryptoStreamImpl) Finish() error {
if s.queue.HasMoreData() {
return errors.New("encryption level changed, but crypto stream has more data to read")
}
s.finished = true
return nil
}
// Writes writes data that should be sent out in CRYPTO frames
func (s *cryptoStreamImpl) Write(p []byte) (int, error) {
s.writeBuf = append(s.writeBuf, p...)
return len(p), nil
}
func (s *cryptoStreamImpl) HasData() bool {
return len(s.writeBuf) > 0
}
func (s *cryptoStreamImpl) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame {
f := &wire.CryptoFrame{Offset: s.writeOffset}
n := utils.MinByteCount(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf)))
f.Data = s.writeBuf[:n]
s.writeBuf = s.writeBuf[n:]
s.writeOffset += n
return f
}

View File

@ -1,55 +0,0 @@
package quic
import (
"fmt"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
type cryptoDataHandler interface {
HandleMessage([]byte, protocol.EncryptionLevel) bool
}
type cryptoStreamManager struct {
cryptoHandler cryptoDataHandler
initialStream cryptoStream
handshakeStream cryptoStream
}
func newCryptoStreamManager(
cryptoHandler cryptoDataHandler,
initialStream cryptoStream,
handshakeStream cryptoStream,
) *cryptoStreamManager {
return &cryptoStreamManager{
cryptoHandler: cryptoHandler,
initialStream: initialStream,
handshakeStream: handshakeStream,
}
}
func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) (bool /* encryption level changed */, error) {
var str cryptoStream
switch encLevel {
case protocol.EncryptionInitial:
str = m.initialStream
case protocol.EncryptionHandshake:
str = m.handshakeStream
default:
return false, fmt.Errorf("received CRYPTO frame with unexpected encryption level: %s", encLevel)
}
if err := str.HandleCryptoFrame(frame); err != nil {
return false, err
}
for {
data := str.GetCryptoData()
if data == nil {
return false, nil
}
if encLevelFinished := m.cryptoHandler.HandleMessage(data, encLevel); encLevelFinished {
return true, str.Finish()
}
}
}

View File

@ -1,163 +0,0 @@
package quic
import (
"errors"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
type frameSorter struct {
queue map[protocol.ByteCount][]byte
readPos protocol.ByteCount
finalOffset protocol.ByteCount
gaps *utils.ByteIntervalList
}
var errDuplicateStreamData = errors.New("Duplicate Stream Data")
func newFrameSorter() *frameSorter {
s := frameSorter{
gaps: utils.NewByteIntervalList(),
queue: make(map[protocol.ByteCount][]byte),
finalOffset: protocol.MaxByteCount,
}
s.gaps.PushFront(utils.ByteInterval{Start: 0, End: protocol.MaxByteCount})
return &s
}
func (s *frameSorter) Push(data []byte, offset protocol.ByteCount, fin bool) error {
err := s.push(data, offset, fin)
if err == errDuplicateStreamData {
return nil
}
return err
}
func (s *frameSorter) push(data []byte, offset protocol.ByteCount, fin bool) error {
if fin {
s.finalOffset = offset + protocol.ByteCount(len(data))
}
if len(data) == 0 {
return nil
}
var wasCut bool
if oldData, ok := s.queue[offset]; ok {
if len(data) <= len(oldData) {
return errDuplicateStreamData
}
data = data[len(oldData):]
offset += protocol.ByteCount(len(oldData))
wasCut = true
}
start := offset
end := offset + protocol.ByteCount(len(data))
// skip all gaps that are before this stream frame
var gap *utils.ByteIntervalElement
for gap = s.gaps.Front(); gap != nil; gap = gap.Next() {
// the frame is a duplicate. Ignore it
if end <= gap.Value.Start {
return errDuplicateStreamData
}
if end > gap.Value.Start && start <= gap.Value.End {
break
}
}
if gap == nil {
return errors.New("StreamFrameSorter BUG: no gap found")
}
if start < gap.Value.Start {
add := gap.Value.Start - start
offset += add
start += add
data = data[add:]
wasCut = true
}
// find the highest gaps whose Start lies before the end of the frame
endGap := gap
for end >= endGap.Value.End {
nextEndGap := endGap.Next()
if nextEndGap == nil {
return errors.New("StreamFrameSorter BUG: no end gap found")
}
if endGap != gap {
s.gaps.Remove(endGap)
}
if end <= nextEndGap.Value.Start {
break
}
// delete queued frames completely covered by the current frame
delete(s.queue, endGap.Value.End)
endGap = nextEndGap
}
if end > endGap.Value.End {
cutLen := end - endGap.Value.End
len := protocol.ByteCount(len(data)) - cutLen
end -= cutLen
data = data[:len]
wasCut = true
}
if start == gap.Value.Start {
if end >= gap.Value.End {
// the frame completely fills this gap
// delete the gap
s.gaps.Remove(gap)
}
if end < endGap.Value.End {
// the frame covers the beginning of the gap
// adjust the Start value to shrink the gap
endGap.Value.Start = end
}
} else if end == endGap.Value.End {
// the frame covers the end of the gap
// adjust the End value to shrink the gap
gap.Value.End = start
} else {
if gap == endGap {
// the frame lies within the current gap, splitting it into two
// insert a new gap and adjust the current one
intv := utils.ByteInterval{Start: end, End: gap.Value.End}
s.gaps.InsertAfter(intv, gap)
gap.Value.End = start
} else {
gap.Value.End = start
endGap.Value.Start = end
}
}
if s.gaps.Len() > protocol.MaxStreamFrameSorterGaps {
return errors.New("Too many gaps in received data")
}
if wasCut {
newData := make([]byte, len(data))
copy(newData, data)
data = newData
}
s.queue[offset] = data
return nil
}
func (s *frameSorter) Pop() ([]byte /* data */, bool /* fin */) {
data, ok := s.queue[s.readPos]
if !ok {
return nil, s.readPos >= s.finalOffset
}
delete(s.queue, s.readPos)
s.readPos += protocol.ByteCount(len(data))
return data, s.readPos >= s.finalOffset
}
// HasMoreData says if there is any more data queued at *any* offset.
func (s *frameSorter) HasMoreData() bool {
return len(s.queue) > 0
}

View File

@ -1,109 +0,0 @@
package quic
import (
"sync"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
type framer interface {
QueueControlFrame(wire.Frame)
AppendControlFrames([]wire.Frame, protocol.ByteCount) ([]wire.Frame, protocol.ByteCount)
AddActiveStream(protocol.StreamID)
AppendStreamFrames([]wire.Frame, protocol.ByteCount) []wire.Frame
}
type framerI struct {
mutex sync.Mutex
streamGetter streamGetter
version protocol.VersionNumber
activeStreams map[protocol.StreamID]struct{}
streamQueue []protocol.StreamID
controlFrameMutex sync.Mutex
controlFrames []wire.Frame
}
var _ framer = &framerI{}
func newFramer(
streamGetter streamGetter,
v protocol.VersionNumber,
) framer {
return &framerI{
streamGetter: streamGetter,
activeStreams: make(map[protocol.StreamID]struct{}),
version: v,
}
}
func (f *framerI) QueueControlFrame(frame wire.Frame) {
f.controlFrameMutex.Lock()
f.controlFrames = append(f.controlFrames, frame)
f.controlFrameMutex.Unlock()
}
func (f *framerI) AppendControlFrames(frames []wire.Frame, maxLen protocol.ByteCount) ([]wire.Frame, protocol.ByteCount) {
var length protocol.ByteCount
f.controlFrameMutex.Lock()
for len(f.controlFrames) > 0 {
frame := f.controlFrames[len(f.controlFrames)-1]
frameLen := frame.Length(f.version)
if length+frameLen > maxLen {
break
}
frames = append(frames, frame)
length += frameLen
f.controlFrames = f.controlFrames[:len(f.controlFrames)-1]
}
f.controlFrameMutex.Unlock()
return frames, length
}
func (f *framerI) AddActiveStream(id protocol.StreamID) {
f.mutex.Lock()
if _, ok := f.activeStreams[id]; !ok {
f.streamQueue = append(f.streamQueue, id)
f.activeStreams[id] = struct{}{}
}
f.mutex.Unlock()
}
func (f *framerI) AppendStreamFrames(frames []wire.Frame, maxLen protocol.ByteCount) []wire.Frame {
var length protocol.ByteCount
f.mutex.Lock()
// pop STREAM frames, until less than MinStreamFrameSize bytes are left in the packet
numActiveStreams := len(f.streamQueue)
for i := 0; i < numActiveStreams; i++ {
if maxLen-length < protocol.MinStreamFrameSize {
break
}
id := f.streamQueue[0]
f.streamQueue = f.streamQueue[1:]
// This should never return an error. Better check it anyway.
// The stream will only be in the streamQueue, if it enqueued itself there.
str, err := f.streamGetter.GetOrOpenSendStream(id)
// The stream can be nil if it completed after it said it had data.
if str == nil || err != nil {
delete(f.activeStreams, id)
continue
}
frame, hasMoreData := str.popStreamFrame(maxLen - length)
if hasMoreData { // put the stream back in the queue (at the end)
f.streamQueue = append(f.streamQueue, id)
} else { // no more data to send. Stream is not active any more
delete(f.activeStreams, id)
}
if frame == nil { // can happen if the receiveStream was canceled after it said it had data
continue
}
frames = append(frames, frame)
length += frame.Length(f.version)
}
f.mutex.Unlock()
return frames
}

View File

@ -1,214 +0,0 @@
package quic
import (
"context"
"io"
"net"
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/handshake"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
// The StreamID is the ID of a QUIC stream.
type StreamID = protocol.StreamID
// A VersionNumber is a QUIC version number.
type VersionNumber = protocol.VersionNumber
// A Cookie can be used to verify the ownership of the client address.
type Cookie struct {
RemoteAddr string
SentTime time.Time
}
// ConnectionState records basic details about the QUIC connection.
type ConnectionState = handshake.ConnectionState
// An ErrorCode is an application-defined error code.
type ErrorCode = protocol.ApplicationErrorCode
// Stream is the interface implemented by QUIC streams
type Stream interface {
// StreamID returns the stream ID.
StreamID() StreamID
// Read reads data from the stream.
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
// If the stream was canceled by the peer, the error implements the StreamError
// interface, and Canceled() == true.
io.Reader
// Write writes data to the stream.
// Write can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetWriteDeadline.
// If the stream was canceled by the peer, the error implements the StreamError
// interface, and Canceled() == true.
io.Writer
// Close closes the write-direction of the stream.
// Future calls to Write are not permitted after calling Close.
// It must not be called concurrently with Write.
// It must not be called after calling CancelWrite.
io.Closer
// CancelWrite aborts sending on this stream.
// It must not be called after Close.
// Data already written, but not yet delivered to the peer is not guaranteed to be delivered reliably.
// Write will unblock immediately, and future calls to Write will fail.
CancelWrite(ErrorCode) error
// CancelRead aborts receiving on this stream.
// It will ask the peer to stop transmitting stream data.
// Read will unblock immediately, and future Read calls will fail.
CancelRead(ErrorCode) error
// The context is canceled as soon as the write-side of the stream is closed.
// This happens when Close() is called, or when the stream is reset (either locally or remotely).
// Warning: This API should not be considered stable and might change soon.
Context() context.Context
// SetReadDeadline sets the deadline for future Read calls and
// any currently-blocked Read call.
// A zero value for t means Read will not time out.
SetReadDeadline(t time.Time) error
// SetWriteDeadline sets the deadline for future Write calls
// and any currently-blocked Write call.
// Even if write times out, it may return n > 0, indicating that
// some of the data was successfully written.
// A zero value for t means Write will not time out.
SetWriteDeadline(t time.Time) error
// SetDeadline sets the read and write deadlines associated
// with the connection. It is equivalent to calling both
// SetReadDeadline and SetWriteDeadline.
SetDeadline(t time.Time) error
HasMoreData() bool
}
// A ReceiveStream is a unidirectional Receive Stream.
type ReceiveStream interface {
// see Stream.StreamID
StreamID() StreamID
// see Stream.Read
io.Reader
// see Stream.CancelRead
CancelRead(ErrorCode) error
// see Stream.SetReadDealine
SetReadDeadline(t time.Time) error
HasMoreData() bool
}
// A SendStream is a unidirectional Send Stream.
type SendStream interface {
// see Stream.StreamID
StreamID() StreamID
// see Stream.Write
io.Writer
// see Stream.Close
io.Closer
// see Stream.CancelWrite
CancelWrite(ErrorCode) error
// see Stream.Context
Context() context.Context
// see Stream.SetWriteDeadline
SetWriteDeadline(t time.Time) error
}
// StreamError is returned by Read and Write when the peer cancels the stream.
type StreamError interface {
error
Canceled() bool
ErrorCode() ErrorCode
}
// A Session is a QUIC connection between two peers.
type Session interface {
// AcceptStream returns the next stream opened by the peer, blocking until one is available.
AcceptStream() (Stream, error)
// AcceptUniStream returns the next unidirectional stream opened by the peer, blocking until one is available.
AcceptUniStream() (ReceiveStream, error)
// OpenStream opens a new bidirectional QUIC stream.
// There is no signaling to the peer about new streams:
// The peer can only accept the stream after data has been sent on the stream.
// If the error is non-nil, it satisfies the net.Error interface.
// When reaching the peer's stream limit, err.Temporary() will be true.
OpenStream() (Stream, error)
// OpenStreamSync opens a new bidirectional QUIC stream.
// It blocks until a new stream can be opened.
// If the error is non-nil, it satisfies the net.Error interface.
OpenStreamSync() (Stream, error)
// OpenUniStream opens a new outgoing unidirectional QUIC stream.
// If the error is non-nil, it satisfies the net.Error interface.
// When reaching the peer's stream limit, Temporary() will be true.
OpenUniStream() (SendStream, error)
// OpenUniStreamSync opens a new outgoing unidirectional QUIC stream.
// It blocks until a new stream can be opened.
// If the error is non-nil, it satisfies the net.Error interface.
OpenUniStreamSync() (SendStream, error)
// LocalAddr returns the local address.
LocalAddr() net.Addr
// RemoteAddr returns the address of the peer.
RemoteAddr() net.Addr
// Close the connection.
io.Closer
// Close the connection with an error.
// The error must not be nil.
CloseWithError(ErrorCode, error) error
// The context is cancelled when the session is closed.
// Warning: This API should not be considered stable and might change soon.
Context() context.Context
// ConnectionState returns basic details about the QUIC connection.
// Warning: This API should not be considered stable and might change soon.
ConnectionState() ConnectionState
}
// Config contains all configuration data needed for a QUIC server or client.
type Config struct {
// The QUIC versions that can be negotiated.
// If not set, it uses all versions available.
// Warning: This API should not be considered stable and will change soon.
Versions []VersionNumber
// The length of the connection ID in bytes.
// It can be 0, or any value between 4 and 18.
// If not set, the interpretation depends on where the Config is used:
// If used for dialing an address, a 0 byte connection ID will be used.
// If used for a server, or dialing on a packet conn, a 4 byte connection ID will be used.
// When dialing on a packet conn, the ConnectionIDLength value must be the same for every Dial call.
ConnectionIDLength int
// HandshakeTimeout is the maximum duration that the cryptographic handshake may take.
// If the timeout is exceeded, the connection is closed.
// If this value is zero, the timeout is set to 10 seconds.
HandshakeTimeout time.Duration
// IdleTimeout is the maximum duration that may pass without any incoming network activity.
// This value only applies after the handshake has completed.
// If the timeout is exceeded, the connection is closed.
// If this value is zero, the timeout is set to 30 seconds.
IdleTimeout time.Duration
// AcceptCookie determines if a Cookie is accepted.
// It is called with cookie = nil if the client didn't send an Cookie.
// If not set, it verifies that the address matches, and that the Cookie was issued within the last 24 hours.
// This option is only valid for the server.
AcceptCookie func(clientAddr net.Addr, cookie *Cookie) bool
// MaxReceiveStreamFlowControlWindow is the maximum stream-level flow control window for receiving data.
// If this value is zero, it will default to 1 MB for the server and 6 MB for the client.
MaxReceiveStreamFlowControlWindow uint64
// MaxReceiveConnectionFlowControlWindow is the connection-level flow control window for receiving data.
// If this value is zero, it will default to 1.5 MB for the server and 15 MB for the client.
MaxReceiveConnectionFlowControlWindow uint64
// MaxIncomingStreams is the maximum number of concurrent bidirectional streams that a peer is allowed to open.
// If not set, it will default to 100.
// If set to a negative value, it doesn't allow any bidirectional streams.
MaxIncomingStreams int
// MaxIncomingUniStreams is the maximum number of concurrent unidirectional streams that a peer is allowed to open.
// If not set, it will default to 100.
// If set to a negative value, it doesn't allow any unidirectional streams.
MaxIncomingUniStreams int
// KeepAlive defines whether this peer will periodically send PING frames to keep the connection alive.
KeepAlive bool
}
// A Listener for incoming QUIC connections
type Listener interface {
// Close the server, sending CONNECTION_CLOSE frames to each peer.
Close() error
// Addr returns the local network addr that the server is listening on.
Addr() net.Addr
// Accept returns new sessions. It should be called in a loop.
Accept() (Session, error)
}

View File

@ -1,3 +0,0 @@
package ackhandler
//go:generate genny -pkg ackhandler -in ../utils/linkedlist/linkedlist.go -out packet_linkedlist.go gen Item=Packet

View File

@ -1,49 +0,0 @@
package ackhandler
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
// SentPacketHandler handles ACKs received for outgoing packets
type SentPacketHandler interface {
// SentPacket may modify the packet
SentPacket(packet *Packet)
SentPacketsAsRetransmission(packets []*Packet, retransmissionOf protocol.PacketNumber)
ReceivedAck(ackFrame *wire.AckFrame, withPacketNumber protocol.PacketNumber, encLevel protocol.EncryptionLevel, recvTime time.Time) error
SetHandshakeComplete()
// The SendMode determines if and what kind of packets can be sent.
SendMode() SendMode
// TimeUntilSend is the time when the next packet should be sent.
// It is used for pacing packets.
TimeUntilSend() time.Time
// ShouldSendNumPackets returns the number of packets that should be sent immediately.
// It always returns a number greater or equal than 1.
// A number greater than 1 is returned when the pacing delay is smaller than the minimum pacing delay.
// Note that the number of packets is only calculated based on the pacing algorithm.
// Before sending any packet, SendingAllowed() must be called to learn if we can actually send it.
ShouldSendNumPackets() int
// only to be called once the handshake is complete
GetLowestPacketNotConfirmedAcked() protocol.PacketNumber
DequeuePacketForRetransmission() *Packet
DequeueProbePacket() (*Packet, error)
PeekPacketNumber() (protocol.PacketNumber, protocol.PacketNumberLen)
PopPacketNumber() protocol.PacketNumber
GetAlarmTimeout() time.Time
OnAlarm() error
}
// ReceivedPacketHandler handles ACKs needed to send for incoming packets
type ReceivedPacketHandler interface {
ReceivedPacket(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel, rcvTime time.Time, shouldInstigateAck bool) error
IgnoreBelow(protocol.PacketNumber)
GetAlarmTimeout() time.Time
GetAckFrame(protocol.EncryptionLevel) *wire.AckFrame
}

View File

@ -1,29 +0,0 @@
package ackhandler
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
// A Packet is a packet
type Packet struct {
PacketNumber protocol.PacketNumber
PacketType protocol.PacketType
Frames []wire.Frame
Length protocol.ByteCount
EncryptionLevel protocol.EncryptionLevel
SendTime time.Time
largestAcked protocol.PacketNumber // if the packet contains an ACK, the LargestAcked value of that ACK
// There are two reasons why a packet cannot be retransmitted:
// * it was already retransmitted
// * this packet is a retransmission, and we already received an ACK for the original packet
canBeRetransmitted bool
includedInBytesInFlight bool
retransmittedAs []protocol.PacketNumber
isRetransmission bool // we need a separate bool here because 0 is a valid packet number
retransmissionOf protocol.PacketNumber
}

View File

@ -1,217 +0,0 @@
// This file was automatically generated by genny.
// Any changes will be lost if this file is regenerated.
// see https://github.com/cheekybits/genny
package ackhandler
// Linked list implementation from the Go standard library.
// PacketElement is an element of a linked list.
type PacketElement struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
next, prev *PacketElement
// The list to which this element belongs.
list *PacketList
// The value stored with this element.
Value Packet
}
// Next returns the next list element or nil.
func (e *PacketElement) Next() *PacketElement {
if p := e.next; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// Prev returns the previous list element or nil.
func (e *PacketElement) Prev() *PacketElement {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// PacketList is a linked list of Packets.
type PacketList struct {
root PacketElement // sentinel list element, only &root, root.prev, and root.next are used
len int // current list length excluding (this) sentinel element
}
// Init initializes or clears list l.
func (l *PacketList) Init() *PacketList {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
// NewPacketList returns an initialized list.
func NewPacketList() *PacketList { return new(PacketList).Init() }
// Len returns the number of elements of list l.
// The complexity is O(1).
func (l *PacketList) Len() int { return l.len }
// Front returns the first element of list l or nil if the list is empty.
func (l *PacketList) Front() *PacketElement {
if l.len == 0 {
return nil
}
return l.root.next
}
// Back returns the last element of list l or nil if the list is empty.
func (l *PacketList) Back() *PacketElement {
if l.len == 0 {
return nil
}
return l.root.prev
}
// lazyInit lazily initializes a zero List value.
func (l *PacketList) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
func (l *PacketList) insert(e, at *PacketElement) *PacketElement {
n := at.next
at.next = e
e.prev = at
e.next = n
n.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
func (l *PacketList) insertValue(v Packet, at *PacketElement) *PacketElement {
return l.insert(&PacketElement{Value: v}, at)
}
// remove removes e from its list, decrements l.len, and returns e.
func (l *PacketList) remove(e *PacketElement) *PacketElement {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
l.len--
return e
}
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
func (l *PacketList) Remove(e *PacketElement) Packet {
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash
l.remove(e)
}
return e.Value
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
func (l *PacketList) PushFront(v Packet) *PacketElement {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
func (l *PacketList) PushBack(v Packet) *PacketElement {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *PacketList) InsertBefore(v Packet, mark *PacketElement) *PacketElement {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark.prev)
}
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *PacketList) InsertAfter(v Packet, mark *PacketElement) *PacketElement {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark)
}
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *PacketList) MoveToFront(e *PacketElement) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
l.insert(l.remove(e), &l.root)
}
// MoveToBack moves element e to the back of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *PacketList) MoveToBack(e *PacketElement) {
if e.list != l || l.root.prev == e {
return
}
// see comment in List.Remove about initialization of l
l.insert(l.remove(e), l.root.prev)
}
// MoveBefore moves element e to its new position before mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *PacketList) MoveBefore(e, mark *PacketElement) {
if e.list != l || e == mark || mark.list != l {
return
}
l.insert(l.remove(e), mark.prev)
}
// MoveAfter moves element e to its new position after mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *PacketList) MoveAfter(e, mark *PacketElement) {
if e.list != l || e == mark || mark.list != l {
return
}
l.insert(l.remove(e), mark)
}
// PushBackList inserts a copy of an other list at the back of list l.
// The lists l and other may be the same. They must not be nil.
func (l *PacketList) PushBackList(other *PacketList) {
l.lazyInit()
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
l.insertValue(e.Value, l.root.prev)
}
}
// PushFrontList inserts a copy of an other list at the front of list l.
// The lists l and other may be the same. They must not be nil.
func (l *PacketList) PushFrontList(other *PacketList) {
l.lazyInit()
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
l.insertValue(e.Value, &l.root)
}
}

View File

@ -1,78 +0,0 @@
package ackhandler
import (
"crypto/rand"
"math"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
// The packetNumberGenerator generates the packet number for the next packet
// it randomly skips a packet number every averagePeriod packets (on average)
// it is guarantued to never skip two consecutive packet numbers
type packetNumberGenerator struct {
averagePeriod protocol.PacketNumber
next protocol.PacketNumber
nextToSkip protocol.PacketNumber
history []protocol.PacketNumber
}
func newPacketNumberGenerator(initial, averagePeriod protocol.PacketNumber) *packetNumberGenerator {
g := &packetNumberGenerator{
next: initial,
averagePeriod: averagePeriod,
}
g.generateNewSkip()
return g
}
func (p *packetNumberGenerator) Peek() protocol.PacketNumber {
return p.next
}
func (p *packetNumberGenerator) Pop() protocol.PacketNumber {
next := p.next
// generate a new packet number for the next packet
p.next++
if p.next == p.nextToSkip {
if len(p.history)+1 > protocol.MaxTrackedSkippedPackets {
p.history = p.history[1:]
}
p.history = append(p.history, p.next)
p.next++
p.generateNewSkip()
}
return next
}
func (p *packetNumberGenerator) generateNewSkip() {
num := p.getRandomNumber()
skip := protocol.PacketNumber(num) * (p.averagePeriod - 1) / (math.MaxUint16 / 2)
// make sure that there are never two consecutive packet numbers that are skipped
p.nextToSkip = p.next + 2 + skip
}
// getRandomNumber() generates a cryptographically secure random number between 0 and MaxUint16 (= 65535)
// The expectation value is 65535/2
func (p *packetNumberGenerator) getRandomNumber() uint16 {
b := make([]byte, 2)
rand.Read(b) // ignore the error here
num := uint16(b[0])<<8 + uint16(b[1])
return num
}
func (p *packetNumberGenerator) Validate(ack *wire.AckFrame) bool {
for _, pn := range p.history {
if ack.AcksPacket(pn) {
return false
}
}
return true
}

View File

@ -1,98 +0,0 @@
package ackhandler
import (
"fmt"
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/congestion"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
const (
// maximum delay that can be applied to an ACK for a retransmittable packet
ackSendDelay = 25 * time.Millisecond
// initial maximum number of retransmittable packets received before sending an ack.
initialRetransmittablePacketsBeforeAck = 2
// number of retransmittable that an ACK is sent for
retransmittablePacketsBeforeAck = 10
// 1/5 RTT delay when doing ack decimation
ackDecimationDelay = 1.0 / 4
// 1/8 RTT delay when doing ack decimation
shortAckDecimationDelay = 1.0 / 8
// Minimum number of packets received before ack decimation is enabled.
// This intends to avoid the beginning of slow start, when CWNDs may be
// rapidly increasing.
minReceivedBeforeAckDecimation = 100
// Maximum number of packets to ack immediately after a missing packet for
// fast retransmission to kick in at the sender. This limit is created to
// reduce the number of acks sent that have no benefit for fast retransmission.
// Set to the number of nacks needed for fast retransmit plus one for protection
// against an ack loss
maxPacketsAfterNewMissing = 4
)
type receivedPacketHandler struct {
initialPackets *receivedPacketTracker
handshakePackets *receivedPacketTracker
oneRTTPackets *receivedPacketTracker
}
var _ ReceivedPacketHandler = &receivedPacketHandler{}
// NewReceivedPacketHandler creates a new receivedPacketHandler
func NewReceivedPacketHandler(
rttStats *congestion.RTTStats,
logger utils.Logger,
version protocol.VersionNumber,
) ReceivedPacketHandler {
return &receivedPacketHandler{
initialPackets: newReceivedPacketTracker(rttStats, logger, version),
handshakePackets: newReceivedPacketTracker(rttStats, logger, version),
oneRTTPackets: newReceivedPacketTracker(rttStats, logger, version),
}
}
func (h *receivedPacketHandler) ReceivedPacket(
pn protocol.PacketNumber,
encLevel protocol.EncryptionLevel,
rcvTime time.Time,
shouldInstigateAck bool,
) error {
switch encLevel {
case protocol.EncryptionInitial:
return h.initialPackets.ReceivedPacket(pn, rcvTime, shouldInstigateAck)
case protocol.EncryptionHandshake:
return h.handshakePackets.ReceivedPacket(pn, rcvTime, shouldInstigateAck)
case protocol.Encryption1RTT:
return h.oneRTTPackets.ReceivedPacket(pn, rcvTime, shouldInstigateAck)
default:
return fmt.Errorf("received packet with unknown encryption level: %s", encLevel)
}
}
// only to be used with 1-RTT packets
func (h *receivedPacketHandler) IgnoreBelow(pn protocol.PacketNumber) {
h.oneRTTPackets.IgnoreBelow(pn)
}
func (h *receivedPacketHandler) GetAlarmTimeout() time.Time {
initialAlarm := h.initialPackets.GetAlarmTimeout()
handshakeAlarm := h.handshakePackets.GetAlarmTimeout()
oneRTTAlarm := h.oneRTTPackets.GetAlarmTimeout()
return utils.MinNonZeroTime(utils.MinNonZeroTime(initialAlarm, handshakeAlarm), oneRTTAlarm)
}
func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel) *wire.AckFrame {
switch encLevel {
case protocol.EncryptionInitial:
return h.initialPackets.GetAckFrame()
case protocol.EncryptionHandshake:
return h.handshakePackets.GetAckFrame()
case protocol.Encryption1RTT:
return h.oneRTTPackets.GetAckFrame()
default:
return nil
}
}

View File

@ -1,122 +0,0 @@
package ackhandler
import (
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/qerr"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
// The receivedPacketHistory stores if a packet number has already been received.
// It generates ACK ranges which can be used to assemble an ACK frame.
// It does not store packet contents.
type receivedPacketHistory struct {
ranges *utils.PacketIntervalList
lowestInReceivedPacketNumbers protocol.PacketNumber
}
var errTooManyOutstandingReceivedAckRanges = qerr.Error(qerr.TooManyOutstandingReceivedPackets, "Too many outstanding received ACK ranges")
// newReceivedPacketHistory creates a new received packet history
func newReceivedPacketHistory() *receivedPacketHistory {
return &receivedPacketHistory{
ranges: utils.NewPacketIntervalList(),
}
}
// ReceivedPacket registers a packet with PacketNumber p and updates the ranges
func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) error {
if h.ranges.Len() >= protocol.MaxTrackedReceivedAckRanges {
return errTooManyOutstandingReceivedAckRanges
}
if h.ranges.Len() == 0 {
h.ranges.PushBack(utils.PacketInterval{Start: p, End: p})
return nil
}
for el := h.ranges.Back(); el != nil; el = el.Prev() {
// p already included in an existing range. Nothing to do here
if p >= el.Value.Start && p <= el.Value.End {
return nil
}
var rangeExtended bool
if el.Value.End == p-1 { // extend a range at the end
rangeExtended = true
el.Value.End = p
} else if el.Value.Start == p+1 { // extend a range at the beginning
rangeExtended = true
el.Value.Start = p
}
// if a range was extended (either at the beginning or at the end, maybe it is possible to merge two ranges into one)
if rangeExtended {
prev := el.Prev()
if prev != nil && prev.Value.End+1 == el.Value.Start { // merge two ranges
prev.Value.End = el.Value.End
h.ranges.Remove(el)
return nil
}
return nil // if the two ranges were not merge, we're done here
}
// create a new range at the end
if p > el.Value.End {
h.ranges.InsertAfter(utils.PacketInterval{Start: p, End: p}, el)
return nil
}
}
// create a new range at the beginning
h.ranges.InsertBefore(utils.PacketInterval{Start: p, End: p}, h.ranges.Front())
return nil
}
// DeleteBelow deletes all entries below (but not including) p
func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) {
if p <= h.lowestInReceivedPacketNumbers {
return
}
h.lowestInReceivedPacketNumbers = p
nextEl := h.ranges.Front()
for el := h.ranges.Front(); nextEl != nil; el = nextEl {
nextEl = el.Next()
if p > el.Value.Start && p <= el.Value.End {
el.Value.Start = p
} else if el.Value.End < p { // delete a whole range
h.ranges.Remove(el)
} else { // no ranges affected. Nothing to do
return
}
}
}
// GetAckRanges gets a slice of all AckRanges that can be used in an AckFrame
func (h *receivedPacketHistory) GetAckRanges() []wire.AckRange {
if h.ranges.Len() == 0 {
return nil
}
ackRanges := make([]wire.AckRange, h.ranges.Len())
i := 0
for el := h.ranges.Back(); el != nil; el = el.Prev() {
ackRanges[i] = wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End}
i++
}
return ackRanges
}
func (h *receivedPacketHistory) GetHighestAckRange() wire.AckRange {
ackRange := wire.AckRange{}
if h.ranges.Len() > 0 {
r := h.ranges.Back().Value
ackRange.Smallest = r.Start
ackRange.Largest = r.End
}
return ackRange
}

View File

@ -1,191 +0,0 @@
package ackhandler
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/congestion"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
type receivedPacketTracker struct {
largestObserved protocol.PacketNumber
ignoreBelow protocol.PacketNumber
largestObservedReceivedTime time.Time
packetHistory *receivedPacketHistory
ackSendDelay time.Duration
rttStats *congestion.RTTStats
packetsReceivedSinceLastAck int
retransmittablePacketsReceivedSinceLastAck int
ackQueued bool
ackAlarm time.Time
lastAck *wire.AckFrame
logger utils.Logger
version protocol.VersionNumber
}
func newReceivedPacketTracker(
rttStats *congestion.RTTStats,
logger utils.Logger,
version protocol.VersionNumber,
) *receivedPacketTracker {
return &receivedPacketTracker{
packetHistory: newReceivedPacketHistory(),
ackSendDelay: ackSendDelay,
rttStats: rttStats,
logger: logger,
version: version,
}
}
func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumber, rcvTime time.Time, shouldInstigateAck bool) error {
if packetNumber < h.ignoreBelow {
return nil
}
isMissing := h.isMissing(packetNumber)
if packetNumber >= h.largestObserved {
h.largestObserved = packetNumber
h.largestObservedReceivedTime = rcvTime
}
if err := h.packetHistory.ReceivedPacket(packetNumber); err != nil {
return err
}
h.maybeQueueAck(packetNumber, rcvTime, shouldInstigateAck, isMissing)
return nil
}
// IgnoreBelow sets a lower limit for acking packets.
// Packets with packet numbers smaller than p will not be acked.
func (h *receivedPacketTracker) IgnoreBelow(p protocol.PacketNumber) {
if p <= h.ignoreBelow {
return
}
h.ignoreBelow = p
h.packetHistory.DeleteBelow(p)
if h.logger.Debug() {
h.logger.Debugf("\tIgnoring all packets below %#x.", p)
}
}
// isMissing says if a packet was reported missing in the last ACK.
func (h *receivedPacketTracker) isMissing(p protocol.PacketNumber) bool {
if h.lastAck == nil || p < h.ignoreBelow {
return false
}
return p < h.lastAck.LargestAcked() && !h.lastAck.AcksPacket(p)
}
func (h *receivedPacketTracker) hasNewMissingPackets() bool {
if h.lastAck == nil {
return false
}
highestRange := h.packetHistory.GetHighestAckRange()
return highestRange.Smallest >= h.lastAck.LargestAcked() && highestRange.Len() <= maxPacketsAfterNewMissing
}
// maybeQueueAck queues an ACK, if necessary.
// It is implemented analogously to Chrome's QuicConnection::MaybeQueueAck()
// in ACK_DECIMATION_WITH_REORDERING mode.
func (h *receivedPacketTracker) maybeQueueAck(packetNumber protocol.PacketNumber, rcvTime time.Time, shouldInstigateAck, wasMissing bool) {
h.packetsReceivedSinceLastAck++
// always ack the first packet
if h.lastAck == nil {
h.logger.Debugf("\tQueueing ACK because the first packet should be acknowledged.")
h.ackQueued = true
return
}
// Send an ACK if this packet was reported missing in an ACK sent before.
// Ack decimation with reordering relies on the timer to send an ACK, but if
// missing packets we reported in the previous ack, send an ACK immediately.
if wasMissing {
if h.logger.Debug() {
h.logger.Debugf("\tQueueing ACK because packet %#x was missing before.", packetNumber)
}
h.ackQueued = true
}
if !h.ackQueued && shouldInstigateAck {
h.retransmittablePacketsReceivedSinceLastAck++
if packetNumber > minReceivedBeforeAckDecimation {
// ack up to 10 packets at once
if h.retransmittablePacketsReceivedSinceLastAck >= retransmittablePacketsBeforeAck {
h.ackQueued = true
if h.logger.Debug() {
h.logger.Debugf("\tQueueing ACK because packet %d packets were received after the last ACK (using threshold: %d).", h.retransmittablePacketsReceivedSinceLastAck, retransmittablePacketsBeforeAck)
}
} else if h.ackAlarm.IsZero() {
// wait for the minimum of the ack decimation delay or the delayed ack time before sending an ack
ackDelay := utils.MinDuration(ackSendDelay, time.Duration(float64(h.rttStats.MinRTT())*float64(ackDecimationDelay)))
h.ackAlarm = rcvTime.Add(ackDelay)
if h.logger.Debug() {
h.logger.Debugf("\tSetting ACK timer to min(1/4 min-RTT, max ack delay): %s (%s from now)", ackDelay, time.Until(h.ackAlarm))
}
}
} else {
// send an ACK every 2 retransmittable packets
if h.retransmittablePacketsReceivedSinceLastAck >= initialRetransmittablePacketsBeforeAck {
if h.logger.Debug() {
h.logger.Debugf("\tQueueing ACK because packet %d packets were received after the last ACK (using initial threshold: %d).", h.retransmittablePacketsReceivedSinceLastAck, initialRetransmittablePacketsBeforeAck)
}
h.ackQueued = true
} else if h.ackAlarm.IsZero() {
if h.logger.Debug() {
h.logger.Debugf("\tSetting ACK timer to max ack delay: %s", ackSendDelay)
}
h.ackAlarm = rcvTime.Add(ackSendDelay)
}
}
// If there are new missing packets to report, set a short timer to send an ACK.
if h.hasNewMissingPackets() {
// wait the minimum of 1/8 min RTT and the existing ack time
ackDelay := time.Duration(float64(h.rttStats.MinRTT()) * float64(shortAckDecimationDelay))
ackTime := rcvTime.Add(ackDelay)
if h.ackAlarm.IsZero() || h.ackAlarm.After(ackTime) {
h.ackAlarm = ackTime
if h.logger.Debug() {
h.logger.Debugf("\tSetting ACK timer to 1/8 min-RTT: %s (%s from now)", ackDelay, time.Until(h.ackAlarm))
}
}
}
}
if h.ackQueued {
// cancel the ack alarm
h.ackAlarm = time.Time{}
}
}
func (h *receivedPacketTracker) GetAckFrame() *wire.AckFrame {
now := time.Now()
if !h.ackQueued && (h.ackAlarm.IsZero() || h.ackAlarm.After(now)) {
return nil
}
if h.logger.Debug() && !h.ackQueued && !h.ackAlarm.IsZero() {
h.logger.Debugf("Sending ACK because the ACK timer expired.")
}
ack := &wire.AckFrame{
AckRanges: h.packetHistory.GetAckRanges(),
DelayTime: now.Sub(h.largestObservedReceivedTime),
}
h.lastAck = ack
h.ackAlarm = time.Time{}
h.ackQueued = false
h.packetsReceivedSinceLastAck = 0
h.retransmittablePacketsReceivedSinceLastAck = 0
return ack
}
func (h *receivedPacketTracker) GetAlarmTimeout() time.Time { return h.ackAlarm }

View File

@ -1,34 +0,0 @@
package ackhandler
import "v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
// Returns a new slice with all non-retransmittable frames deleted.
func stripNonRetransmittableFrames(fs []wire.Frame) []wire.Frame {
res := make([]wire.Frame, 0, len(fs))
for _, f := range fs {
if IsFrameRetransmittable(f) {
res = append(res, f)
}
}
return res
}
// IsFrameRetransmittable returns true if the frame should be retransmitted.
func IsFrameRetransmittable(f wire.Frame) bool {
switch f.(type) {
case *wire.AckFrame:
return false
default:
return true
}
}
// HasRetransmittableFrames returns true if at least one frame is retransmittable.
func HasRetransmittableFrames(fs []wire.Frame) bool {
for _, f := range fs {
if IsFrameRetransmittable(f) {
return true
}
}
return false
}

View File

@ -1,36 +0,0 @@
package ackhandler
import "fmt"
// The SendMode says what kind of packets can be sent.
type SendMode uint8
const (
// SendNone means that no packets should be sent
SendNone SendMode = iota
// SendAck means an ACK-only packet should be sent
SendAck
// SendRetransmission means that retransmissions should be sent
SendRetransmission
// SendPTO means that a probe packet should be sent
SendPTO
// SendAny means that any packet should be sent
SendAny
)
func (s SendMode) String() string {
switch s {
case SendNone:
return "none"
case SendAck:
return "ack"
case SendRetransmission:
return "retransmission"
case SendPTO:
return "pto"
case SendAny:
return "any"
default:
return fmt.Sprintf("invalid send mode: %d", s)
}
}

View File

@ -1,580 +0,0 @@
package ackhandler
import (
"errors"
"fmt"
"math"
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/congestion"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/qerr"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/wire"
)
const (
// Maximum reordering in time space before time based loss detection considers a packet lost.
// In fraction of an RTT.
timeReorderingFraction = 1.0 / 8
// Timer granularity. The timer will not be set to a value smaller than granularity.
granularity = time.Millisecond
)
type sentPacketHandler struct {
lastSentPacketNumber protocol.PacketNumber
packetNumberGenerator *packetNumberGenerator
lastSentRetransmittablePacketTime time.Time
lastSentCryptoPacketTime time.Time
nextPacketSendTime time.Time
largestAcked protocol.PacketNumber
largestReceivedPacketWithAck protocol.PacketNumber
// lowestPacketNotConfirmedAcked is the lowest packet number that we sent an ACK for, but haven't received confirmation, that this ACK actually arrived
// example: we send an ACK for packets 90-100 with packet number 20
// once we receive an ACK from the peer for packet 20, the lowestPacketNotConfirmedAcked is 101
lowestPacketNotConfirmedAcked protocol.PacketNumber
packetHistory *sentPacketHistory
retransmissionQueue []*Packet
bytesInFlight protocol.ByteCount
congestion congestion.SendAlgorithm
rttStats *congestion.RTTStats
handshakeComplete bool
// The number of times the crypto packets have been retransmitted without receiving an ack.
cryptoCount uint32
// The number of times a PTO has been sent without receiving an ack.
ptoCount uint32
// The number of PTO probe packets that should be sent.
numProbesToSend int
// The time at which the next packet will be considered lost based on early transmit or exceeding the reordering window in time.
lossTime time.Time
// The alarm timeout
alarm time.Time
logger utils.Logger
}
// NewSentPacketHandler creates a new sentPacketHandler
func NewSentPacketHandler(
initialPacketNumber protocol.PacketNumber,
rttStats *congestion.RTTStats,
logger utils.Logger,
) SentPacketHandler {
congestion := congestion.NewCubicSender(
congestion.DefaultClock{},
rttStats,
false, /* don't use reno since chromium doesn't (why?) */
protocol.InitialCongestionWindow,
protocol.DefaultMaxCongestionWindow,
)
return &sentPacketHandler{
packetNumberGenerator: newPacketNumberGenerator(initialPacketNumber, protocol.SkipPacketAveragePeriodLength),
packetHistory: newSentPacketHistory(),
rttStats: rttStats,
congestion: congestion,
logger: logger,
}
}
func (h *sentPacketHandler) lowestUnacked() protocol.PacketNumber {
if p := h.packetHistory.FirstOutstanding(); p != nil {
return p.PacketNumber
}
return h.largestAcked + 1
}
func (h *sentPacketHandler) SetHandshakeComplete() {
h.logger.Debugf("Handshake complete. Discarding all outstanding crypto packets.")
var queue []*Packet
for _, packet := range h.retransmissionQueue {
if packet.EncryptionLevel == protocol.Encryption1RTT {
queue = append(queue, packet)
}
}
var cryptoPackets []*Packet
h.packetHistory.Iterate(func(p *Packet) (bool, error) {
if p.EncryptionLevel != protocol.Encryption1RTT {
cryptoPackets = append(cryptoPackets, p)
}
return true, nil
})
for _, p := range cryptoPackets {
h.packetHistory.Remove(p.PacketNumber)
}
h.retransmissionQueue = queue
h.handshakeComplete = true
}
func (h *sentPacketHandler) SentPacket(packet *Packet) {
if isRetransmittable := h.sentPacketImpl(packet); isRetransmittable {
h.packetHistory.SentPacket(packet)
h.updateLossDetectionAlarm()
}
}
func (h *sentPacketHandler) SentPacketsAsRetransmission(packets []*Packet, retransmissionOf protocol.PacketNumber) {
var p []*Packet
for _, packet := range packets {
if isRetransmittable := h.sentPacketImpl(packet); isRetransmittable {
p = append(p, packet)
}
}
h.packetHistory.SentPacketsAsRetransmission(p, retransmissionOf)
h.updateLossDetectionAlarm()
}
func (h *sentPacketHandler) sentPacketImpl(packet *Packet) bool /* isRetransmittable */ {
if h.logger.Debug() && h.lastSentPacketNumber != 0 {
for p := h.lastSentPacketNumber + 1; p < packet.PacketNumber; p++ {
h.logger.Debugf("Skipping packet number %#x", p)
}
}
h.lastSentPacketNumber = packet.PacketNumber
if len(packet.Frames) > 0 {
if ackFrame, ok := packet.Frames[0].(*wire.AckFrame); ok {
packet.largestAcked = ackFrame.LargestAcked()
}
}
packet.Frames = stripNonRetransmittableFrames(packet.Frames)
isRetransmittable := len(packet.Frames) != 0
if isRetransmittable {
if packet.EncryptionLevel != protocol.Encryption1RTT {
h.lastSentCryptoPacketTime = packet.SendTime
}
h.lastSentRetransmittablePacketTime = packet.SendTime
packet.includedInBytesInFlight = true
h.bytesInFlight += packet.Length
packet.canBeRetransmitted = true
if h.numProbesToSend > 0 {
h.numProbesToSend--
}
}
h.congestion.OnPacketSent(packet.SendTime, h.bytesInFlight, packet.PacketNumber, packet.Length, isRetransmittable)
h.nextPacketSendTime = utils.MaxTime(h.nextPacketSendTime, packet.SendTime).Add(h.congestion.TimeUntilSend(h.bytesInFlight))
return isRetransmittable
}
func (h *sentPacketHandler) ReceivedAck(ackFrame *wire.AckFrame, withPacketNumber protocol.PacketNumber, encLevel protocol.EncryptionLevel, rcvTime time.Time) error {
largestAcked := ackFrame.LargestAcked()
if largestAcked > h.lastSentPacketNumber {
return qerr.Error(qerr.InvalidAckData, "Received ACK for an unsent package")
}
// duplicate or out of order ACK
if withPacketNumber != 0 && withPacketNumber < h.largestReceivedPacketWithAck {
h.logger.Debugf("Ignoring ACK frame (duplicate or out of order).")
return nil
}
h.largestReceivedPacketWithAck = withPacketNumber
h.largestAcked = utils.MaxPacketNumber(h.largestAcked, largestAcked)
if !h.packetNumberGenerator.Validate(ackFrame) {
return qerr.Error(qerr.InvalidAckData, "Received an ACK for a skipped packet number")
}
if rttUpdated := h.maybeUpdateRTT(largestAcked, ackFrame.DelayTime, rcvTime); rttUpdated {
h.congestion.MaybeExitSlowStart()
}
ackedPackets, err := h.determineNewlyAckedPackets(ackFrame)
if err != nil {
return err
}
if len(ackedPackets) == 0 {
return nil
}
priorInFlight := h.bytesInFlight
for _, p := range ackedPackets {
// TODO(#1534): check the encryption level
// if encLevel < p.EncryptionLevel {
// return fmt.Errorf("Received ACK with encryption level %s that acks a packet %d (encryption level %s)", encLevel, p.PacketNumber, p.EncryptionLevel)
// }
// largestAcked == 0 either means that the packet didn't contain an ACK, or it just acked packet 0
// It is safe to ignore the corner case of packets that just acked packet 0, because
// the lowestPacketNotConfirmedAcked is only used to limit the number of ACK ranges we will send.
if p.largestAcked != 0 {
h.lowestPacketNotConfirmedAcked = utils.MaxPacketNumber(h.lowestPacketNotConfirmedAcked, p.largestAcked+1)
}
if err := h.onPacketAcked(p, rcvTime); err != nil {
return err
}
if p.includedInBytesInFlight {
h.congestion.OnPacketAcked(p.PacketNumber, p.Length, priorInFlight, rcvTime)
}
}
if err := h.detectLostPackets(rcvTime, priorInFlight); err != nil {
return err
}
h.ptoCount = 0
h.cryptoCount = 0
h.updateLossDetectionAlarm()
return nil
}
func (h *sentPacketHandler) GetLowestPacketNotConfirmedAcked() protocol.PacketNumber {
return h.lowestPacketNotConfirmedAcked
}
func (h *sentPacketHandler) determineNewlyAckedPackets(ackFrame *wire.AckFrame) ([]*Packet, error) {
var ackedPackets []*Packet
ackRangeIndex := 0
lowestAcked := ackFrame.LowestAcked()
largestAcked := ackFrame.LargestAcked()
err := h.packetHistory.Iterate(func(p *Packet) (bool, error) {
// Ignore packets below the lowest acked
if p.PacketNumber < lowestAcked {
return true, nil
}
// Break after largest acked is reached
if p.PacketNumber > largestAcked {
return false, nil
}
if ackFrame.HasMissingRanges() {
ackRange := ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex]
for p.PacketNumber > ackRange.Largest && ackRangeIndex < len(ackFrame.AckRanges)-1 {
ackRangeIndex++
ackRange = ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex]
}
if p.PacketNumber >= ackRange.Smallest { // packet i contained in ACK range
if p.PacketNumber > ackRange.Largest {
return false, fmt.Errorf("BUG: ackhandler would have acked wrong packet 0x%x, while evaluating range 0x%x -> 0x%x", p.PacketNumber, ackRange.Smallest, ackRange.Largest)
}
ackedPackets = append(ackedPackets, p)
}
} else {
ackedPackets = append(ackedPackets, p)
}
return true, nil
})
if h.logger.Debug() && len(ackedPackets) > 0 {
pns := make([]protocol.PacketNumber, len(ackedPackets))
for i, p := range ackedPackets {
pns[i] = p.PacketNumber
}
h.logger.Debugf("\tnewly acked packets (%d): %#x", len(pns), pns)
}
return ackedPackets, err
}
func (h *sentPacketHandler) maybeUpdateRTT(largestAcked protocol.PacketNumber, ackDelay time.Duration, rcvTime time.Time) bool {
if p := h.packetHistory.GetPacket(largestAcked); p != nil {
h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay, rcvTime)
if h.logger.Debug() {
h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation())
}
return true
}
return false
}
func (h *sentPacketHandler) updateLossDetectionAlarm() {
// Cancel the alarm if no packets are outstanding
if !h.packetHistory.HasOutstandingPackets() {
h.alarm = time.Time{}
return
}
if h.packetHistory.HasOutstandingCryptoPackets() {
h.alarm = h.lastSentCryptoPacketTime.Add(h.computeCryptoTimeout())
} else if !h.lossTime.IsZero() {
// Early retransmit timer or time loss detection.
h.alarm = h.lossTime
} else { // PTO alarm
h.alarm = h.lastSentRetransmittablePacketTime.Add(h.computePTOTimeout())
}
}
func (h *sentPacketHandler) detectLostPackets(now time.Time, priorInFlight protocol.ByteCount) error {
h.lossTime = time.Time{}
maxRTT := float64(utils.MaxDuration(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT()))
delayUntilLost := time.Duration((1.0 + timeReorderingFraction) * maxRTT)
var lostPackets []*Packet
h.packetHistory.Iterate(func(packet *Packet) (bool, error) {
if packet.PacketNumber > h.largestAcked {
return false, nil
}
timeSinceSent := now.Sub(packet.SendTime)
if timeSinceSent > delayUntilLost {
lostPackets = append(lostPackets, packet)
} else if h.lossTime.IsZero() {
if h.logger.Debug() {
h.logger.Debugf("\tsetting loss timer for packet %#x to %s (in %s)", packet.PacketNumber, delayUntilLost, delayUntilLost-timeSinceSent)
}
// Note: This conditional is only entered once per call
h.lossTime = now.Add(delayUntilLost - timeSinceSent)
}
return true, nil
})
if h.logger.Debug() && len(lostPackets) > 0 {
pns := make([]protocol.PacketNumber, len(lostPackets))
for i, p := range lostPackets {
pns[i] = p.PacketNumber
}
h.logger.Debugf("\tlost packets (%d): %#x", len(pns), pns)
}
for _, p := range lostPackets {
// the bytes in flight need to be reduced no matter if this packet will be retransmitted
if p.includedInBytesInFlight {
h.bytesInFlight -= p.Length
h.congestion.OnPacketLost(p.PacketNumber, p.Length, priorInFlight)
}
if p.canBeRetransmitted {
// queue the packet for retransmission, and report the loss to the congestion controller
if err := h.queuePacketForRetransmission(p); err != nil {
return err
}
}
h.packetHistory.Remove(p.PacketNumber)
}
return nil
}
func (h *sentPacketHandler) OnAlarm() error {
// When all outstanding are acknowledged, the alarm is canceled in
// updateLossDetectionAlarm. This doesn't reset the timer in the session though.
// When OnAlarm is called, we therefore need to make sure that there are
// actually packets outstanding.
if h.packetHistory.HasOutstandingPackets() {
if err := h.onVerifiedAlarm(); err != nil {
return err
}
}
h.updateLossDetectionAlarm()
return nil
}
func (h *sentPacketHandler) onVerifiedAlarm() error {
var err error
if h.packetHistory.HasOutstandingCryptoPackets() {
if h.logger.Debug() {
h.logger.Debugf("Loss detection alarm fired in crypto mode. Crypto count: %d", h.cryptoCount)
}
h.cryptoCount++
err = h.queueCryptoPacketsForRetransmission()
} else if !h.lossTime.IsZero() {
if h.logger.Debug() {
h.logger.Debugf("Loss detection alarm fired in loss timer mode. Loss time: %s", h.lossTime)
}
// Early retransmit or time loss detection
err = h.detectLostPackets(time.Now(), h.bytesInFlight)
} else { // PTO
if h.logger.Debug() {
h.logger.Debugf("Loss detection alarm fired in PTO mode. PTO count: %d", h.ptoCount)
}
h.ptoCount++
h.numProbesToSend += 2
}
return err
}
func (h *sentPacketHandler) GetAlarmTimeout() time.Time {
return h.alarm
}
func (h *sentPacketHandler) onPacketAcked(p *Packet, rcvTime time.Time) error {
// This happens if a packet and its retransmissions is acked in the same ACK.
// As soon as we process the first one, this will remove all the retransmissions,
// so we won't find the retransmitted packet number later.
if packet := h.packetHistory.GetPacket(p.PacketNumber); packet == nil {
return nil
}
// only report the acking of this packet to the congestion controller if:
// * it is a retransmittable packet
// * this packet wasn't retransmitted yet
if p.isRetransmission {
// that the parent doesn't exist is expected to happen every time the original packet was already acked
if parent := h.packetHistory.GetPacket(p.retransmissionOf); parent != nil {
if len(parent.retransmittedAs) == 1 {
parent.retransmittedAs = nil
} else {
// remove this packet from the slice of retransmission
retransmittedAs := make([]protocol.PacketNumber, 0, len(parent.retransmittedAs)-1)
for _, pn := range parent.retransmittedAs {
if pn != p.PacketNumber {
retransmittedAs = append(retransmittedAs, pn)
}
}
parent.retransmittedAs = retransmittedAs
}
}
}
// this also applies to packets that have been retransmitted as probe packets
if p.includedInBytesInFlight {
h.bytesInFlight -= p.Length
}
if err := h.stopRetransmissionsFor(p); err != nil {
return err
}
return h.packetHistory.Remove(p.PacketNumber)
}
func (h *sentPacketHandler) stopRetransmissionsFor(p *Packet) error {
if err := h.packetHistory.MarkCannotBeRetransmitted(p.PacketNumber); err != nil {
return err
}
for _, r := range p.retransmittedAs {
packet := h.packetHistory.GetPacket(r)
if packet == nil {
return fmt.Errorf("sent packet handler BUG: marking packet as not retransmittable %d (retransmission of %d) not found in history", r, p.PacketNumber)
}
h.stopRetransmissionsFor(packet)
}
return nil
}
func (h *sentPacketHandler) DequeuePacketForRetransmission() *Packet {
if len(h.retransmissionQueue) == 0 {
return nil
}
packet := h.retransmissionQueue[0]
// Shift the slice and don't retain anything that isn't needed.
copy(h.retransmissionQueue, h.retransmissionQueue[1:])
h.retransmissionQueue[len(h.retransmissionQueue)-1] = nil
h.retransmissionQueue = h.retransmissionQueue[:len(h.retransmissionQueue)-1]
return packet
}
func (h *sentPacketHandler) DequeueProbePacket() (*Packet, error) {
if len(h.retransmissionQueue) == 0 {
p := h.packetHistory.FirstOutstanding()
if p == nil {
return nil, errors.New("cannot dequeue a probe packet. No outstanding packets")
}
if err := h.queuePacketForRetransmission(p); err != nil {
return nil, err
}
}
return h.DequeuePacketForRetransmission(), nil
}
func (h *sentPacketHandler) PeekPacketNumber() (protocol.PacketNumber, protocol.PacketNumberLen) {
pn := h.packetNumberGenerator.Peek()
return pn, protocol.GetPacketNumberLengthForHeader(pn, h.lowestUnacked())
}
func (h *sentPacketHandler) PopPacketNumber() protocol.PacketNumber {
return h.packetNumberGenerator.Pop()
}
func (h *sentPacketHandler) SendMode() SendMode {
numTrackedPackets := len(h.retransmissionQueue) + h.packetHistory.Len()
// Don't send any packets if we're keeping track of the maximum number of packets.
// Note that since MaxOutstandingSentPackets is smaller than MaxTrackedSentPackets,
// we will stop sending out new data when reaching MaxOutstandingSentPackets,
// but still allow sending of retransmissions and ACKs.
if numTrackedPackets >= protocol.MaxTrackedSentPackets {
if h.logger.Debug() {
h.logger.Debugf("Limited by the number of tracked packets: tracking %d packets, maximum %d", numTrackedPackets, protocol.MaxTrackedSentPackets)
}
return SendNone
}
if h.numProbesToSend > 0 {
return SendPTO
}
// Only send ACKs if we're congestion limited.
if cwnd := h.congestion.GetCongestionWindow(); h.bytesInFlight > cwnd {
if h.logger.Debug() {
h.logger.Debugf("Congestion limited: bytes in flight %d, window %d", h.bytesInFlight, cwnd)
}
return SendAck
}
// Send retransmissions first, if there are any.
if len(h.retransmissionQueue) > 0 {
return SendRetransmission
}
if numTrackedPackets >= protocol.MaxOutstandingSentPackets {
if h.logger.Debug() {
h.logger.Debugf("Max outstanding limited: tracking %d packets, maximum: %d", numTrackedPackets, protocol.MaxOutstandingSentPackets)
}
return SendAck
}
return SendAny
}
func (h *sentPacketHandler) TimeUntilSend() time.Time {
return h.nextPacketSendTime
}
func (h *sentPacketHandler) ShouldSendNumPackets() int {
if h.numProbesToSend > 0 {
// RTO probes should not be paced, but must be sent immediately.
return h.numProbesToSend
}
delay := h.congestion.TimeUntilSend(h.bytesInFlight)
if delay == 0 || delay > protocol.MinPacingDelay {
return 1
}
return int(math.Ceil(float64(protocol.MinPacingDelay) / float64(delay)))
}
func (h *sentPacketHandler) queueCryptoPacketsForRetransmission() error {
var cryptoPackets []*Packet
h.packetHistory.Iterate(func(p *Packet) (bool, error) {
if p.canBeRetransmitted && p.EncryptionLevel != protocol.Encryption1RTT {
cryptoPackets = append(cryptoPackets, p)
}
return true, nil
})
for _, p := range cryptoPackets {
h.logger.Debugf("Queueing packet %#x as a crypto retransmission", p.PacketNumber)
if err := h.queuePacketForRetransmission(p); err != nil {
return err
}
}
return nil
}
func (h *sentPacketHandler) queuePacketForRetransmission(p *Packet) error {
if !p.canBeRetransmitted {
return fmt.Errorf("sent packet handler BUG: packet %d already queued for retransmission", p.PacketNumber)
}
if err := h.packetHistory.MarkCannotBeRetransmitted(p.PacketNumber); err != nil {
return err
}
h.retransmissionQueue = append(h.retransmissionQueue, p)
return nil
}
func (h *sentPacketHandler) computeCryptoTimeout() time.Duration {
duration := utils.MaxDuration(2*h.rttStats.SmoothedOrInitialRTT(), granularity)
// exponential backoff
// There's an implicit limit to this set by the crypto timeout.
return duration << h.cryptoCount
}
func (h *sentPacketHandler) computePTOTimeout() time.Duration {
// TODO(#1236): include the max_ack_delay
duration := utils.MaxDuration(h.rttStats.SmoothedOrInitialRTT()+4*h.rttStats.MeanDeviation(), granularity)
return duration << h.ptoCount
}

View File

@ -1,168 +0,0 @@
package ackhandler
import (
"fmt"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
type sentPacketHistory struct {
packetList *PacketList
packetMap map[protocol.PacketNumber]*PacketElement
numOutstandingPackets int
numOutstandingCryptoPackets int
firstOutstanding *PacketElement
}
func newSentPacketHistory() *sentPacketHistory {
return &sentPacketHistory{
packetList: NewPacketList(),
packetMap: make(map[protocol.PacketNumber]*PacketElement),
}
}
func (h *sentPacketHistory) SentPacket(p *Packet) {
h.sentPacketImpl(p)
}
func (h *sentPacketHistory) sentPacketImpl(p *Packet) *PacketElement {
el := h.packetList.PushBack(*p)
h.packetMap[p.PacketNumber] = el
if h.firstOutstanding == nil {
h.firstOutstanding = el
}
if p.canBeRetransmitted {
h.numOutstandingPackets++
if p.EncryptionLevel != protocol.Encryption1RTT {
h.numOutstandingCryptoPackets++
}
}
return el
}
func (h *sentPacketHistory) SentPacketsAsRetransmission(packets []*Packet, retransmissionOf protocol.PacketNumber) {
retransmission, ok := h.packetMap[retransmissionOf]
// The retransmitted packet is not present anymore.
// This can happen if it was acked in between dequeueing of the retransmission and sending.
// Just treat the retransmissions as normal packets.
// TODO: This won't happen if we clear packets queued for retransmission on new ACKs.
if !ok {
for _, packet := range packets {
h.sentPacketImpl(packet)
}
return
}
retransmission.Value.retransmittedAs = make([]protocol.PacketNumber, len(packets))
for i, packet := range packets {
retransmission.Value.retransmittedAs[i] = packet.PacketNumber
el := h.sentPacketImpl(packet)
el.Value.isRetransmission = true
el.Value.retransmissionOf = retransmissionOf
}
}
func (h *sentPacketHistory) GetPacket(p protocol.PacketNumber) *Packet {
if el, ok := h.packetMap[p]; ok {
return &el.Value
}
return nil
}
// Iterate iterates through all packets.
// The callback must not modify the history.
func (h *sentPacketHistory) Iterate(cb func(*Packet) (cont bool, err error)) error {
cont := true
for el := h.packetList.Front(); cont && el != nil; el = el.Next() {
var err error
cont, err = cb(&el.Value)
if err != nil {
return err
}
}
return nil
}
// FirstOutStanding returns the first outstanding packet.
// It must not be modified (e.g. retransmitted).
// Use DequeueFirstPacketForRetransmission() to retransmit it.
func (h *sentPacketHistory) FirstOutstanding() *Packet {
if h.firstOutstanding == nil {
return nil
}
return &h.firstOutstanding.Value
}
// QueuePacketForRetransmission marks a packet for retransmission.
// A packet can only be queued once.
func (h *sentPacketHistory) MarkCannotBeRetransmitted(pn protocol.PacketNumber) error {
el, ok := h.packetMap[pn]
if !ok {
return fmt.Errorf("sent packet history: packet %d not found", pn)
}
if el.Value.canBeRetransmitted {
h.numOutstandingPackets--
if h.numOutstandingPackets < 0 {
panic("numOutstandingHandshakePackets negative")
}
if el.Value.EncryptionLevel != protocol.Encryption1RTT {
h.numOutstandingCryptoPackets--
if h.numOutstandingCryptoPackets < 0 {
panic("numOutstandingHandshakePackets negative")
}
}
}
el.Value.canBeRetransmitted = false
if el == h.firstOutstanding {
h.readjustFirstOutstanding()
}
return nil
}
// readjustFirstOutstanding readjusts the pointer to the first outstanding packet.
// This is necessary every time the first outstanding packet is deleted or retransmitted.
func (h *sentPacketHistory) readjustFirstOutstanding() {
el := h.firstOutstanding.Next()
for el != nil && !el.Value.canBeRetransmitted {
el = el.Next()
}
h.firstOutstanding = el
}
func (h *sentPacketHistory) Len() int {
return len(h.packetMap)
}
func (h *sentPacketHistory) Remove(p protocol.PacketNumber) error {
el, ok := h.packetMap[p]
if !ok {
return fmt.Errorf("packet %d not found in sent packet history", p)
}
if el == h.firstOutstanding {
h.readjustFirstOutstanding()
}
if el.Value.canBeRetransmitted {
h.numOutstandingPackets--
if h.numOutstandingPackets < 0 {
panic("numOutstandingHandshakePackets negative")
}
if el.Value.EncryptionLevel != protocol.Encryption1RTT {
h.numOutstandingCryptoPackets--
if h.numOutstandingCryptoPackets < 0 {
panic("numOutstandingHandshakePackets negative")
}
}
}
h.packetList.Remove(el)
delete(h.packetMap, p)
return nil
}
func (h *sentPacketHistory) HasOutstandingPackets() bool {
return h.numOutstandingPackets > 0
}
func (h *sentPacketHistory) HasOutstandingCryptoPackets() bool {
return h.numOutstandingCryptoPackets > 0
}

View File

@ -1,22 +0,0 @@
package congestion
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
// Bandwidth of a connection
type Bandwidth uint64
const (
// BitsPerSecond is 1 bit per second
BitsPerSecond Bandwidth = 1
// BytesPerSecond is 1 byte per second
BytesPerSecond = 8 * BitsPerSecond
)
// BandwidthFromDelta calculates the bandwidth from a number of bytes and a time delta
func BandwidthFromDelta(bytes protocol.ByteCount, delta time.Duration) Bandwidth {
return Bandwidth(bytes) * Bandwidth(time.Second) / Bandwidth(delta) * BytesPerSecond
}

View File

@ -1,18 +0,0 @@
package congestion
import "time"
// A Clock returns the current time
type Clock interface {
Now() time.Time
}
// DefaultClock implements the Clock interface using the Go stdlib clock.
type DefaultClock struct{}
var _ Clock = DefaultClock{}
// Now gets the current time
func (DefaultClock) Now() time.Time {
return time.Now()
}

View File

@ -1,210 +0,0 @@
package congestion
import (
"math"
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
// This cubic implementation is based on the one found in Chromiums's QUIC
// implementation, in the files net/quic/congestion_control/cubic.{hh,cc}.
// Constants based on TCP defaults.
// The following constants are in 2^10 fractions of a second instead of ms to
// allow a 10 shift right to divide.
// 1024*1024^3 (first 1024 is from 0.100^3)
// where 0.100 is 100 ms which is the scaling round trip time.
const cubeScale = 40
const cubeCongestionWindowScale = 410
const cubeFactor protocol.ByteCount = 1 << cubeScale / cubeCongestionWindowScale / protocol.DefaultTCPMSS
const defaultNumConnections = 2
// Default Cubic backoff factor
const beta float32 = 0.7
// Additional backoff factor when loss occurs in the concave part of the Cubic
// curve. This additional backoff factor is expected to give up bandwidth to
// new concurrent flows and speed up convergence.
const betaLastMax float32 = 0.85
// Cubic implements the cubic algorithm from TCP
type Cubic struct {
clock Clock
// Number of connections to simulate.
numConnections int
// Time when this cycle started, after last loss event.
epoch time.Time
// Max congestion window used just before last loss event.
// Note: to improve fairness to other streams an additional back off is
// applied to this value if the new value is below our latest value.
lastMaxCongestionWindow protocol.ByteCount
// Number of acked bytes since the cycle started (epoch).
ackedBytesCount protocol.ByteCount
// TCP Reno equivalent congestion window in packets.
estimatedTCPcongestionWindow protocol.ByteCount
// Origin point of cubic function.
originPointCongestionWindow protocol.ByteCount
// Time to origin point of cubic function in 2^10 fractions of a second.
timeToOriginPoint uint32
// Last congestion window in packets computed by cubic function.
lastTargetCongestionWindow protocol.ByteCount
}
// NewCubic returns a new Cubic instance
func NewCubic(clock Clock) *Cubic {
c := &Cubic{
clock: clock,
numConnections: defaultNumConnections,
}
c.Reset()
return c
}
// Reset is called after a timeout to reset the cubic state
func (c *Cubic) Reset() {
c.epoch = time.Time{}
c.lastMaxCongestionWindow = 0
c.ackedBytesCount = 0
c.estimatedTCPcongestionWindow = 0
c.originPointCongestionWindow = 0
c.timeToOriginPoint = 0
c.lastTargetCongestionWindow = 0
}
func (c *Cubic) alpha() float32 {
// TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
// beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
// We derive the equivalent alpha for an N-connection emulation as:
b := c.beta()
return 3 * float32(c.numConnections) * float32(c.numConnections) * (1 - b) / (1 + b)
}
func (c *Cubic) beta() float32 {
// kNConnectionBeta is the backoff factor after loss for our N-connection
// emulation, which emulates the effective backoff of an ensemble of N
// TCP-Reno connections on a single loss event. The effective multiplier is
// computed as:
return (float32(c.numConnections) - 1 + beta) / float32(c.numConnections)
}
func (c *Cubic) betaLastMax() float32 {
// betaLastMax is the additional backoff factor after loss for our
// N-connection emulation, which emulates the additional backoff of
// an ensemble of N TCP-Reno connections on a single loss event. The
// effective multiplier is computed as:
return (float32(c.numConnections) - 1 + betaLastMax) / float32(c.numConnections)
}
// OnApplicationLimited is called on ack arrival when sender is unable to use
// the available congestion window. Resets Cubic state during quiescence.
func (c *Cubic) OnApplicationLimited() {
// When sender is not using the available congestion window, the window does
// not grow. But to be RTT-independent, Cubic assumes that the sender has been
// using the entire window during the time since the beginning of the current
// "epoch" (the end of the last loss recovery period). Since
// application-limited periods break this assumption, we reset the epoch when
// in such a period. This reset effectively freezes congestion window growth
// through application-limited periods and allows Cubic growth to continue
// when the entire window is being used.
c.epoch = time.Time{}
}
// CongestionWindowAfterPacketLoss computes a new congestion window to use after
// a loss event. Returns the new congestion window in packets. The new
// congestion window is a multiplicative decrease of our current window.
func (c *Cubic) CongestionWindowAfterPacketLoss(currentCongestionWindow protocol.ByteCount) protocol.ByteCount {
if currentCongestionWindow+protocol.DefaultTCPMSS < c.lastMaxCongestionWindow {
// We never reached the old max, so assume we are competing with another
// flow. Use our extra back off factor to allow the other flow to go up.
c.lastMaxCongestionWindow = protocol.ByteCount(c.betaLastMax() * float32(currentCongestionWindow))
} else {
c.lastMaxCongestionWindow = currentCongestionWindow
}
c.epoch = time.Time{} // Reset time.
return protocol.ByteCount(float32(currentCongestionWindow) * c.beta())
}
// CongestionWindowAfterAck computes a new congestion window to use after a received ACK.
// Returns the new congestion window in packets. The new congestion window
// follows a cubic function that depends on the time passed since last
// packet loss.
func (c *Cubic) CongestionWindowAfterAck(
ackedBytes protocol.ByteCount,
currentCongestionWindow protocol.ByteCount,
delayMin time.Duration,
eventTime time.Time,
) protocol.ByteCount {
c.ackedBytesCount += ackedBytes
if c.epoch.IsZero() {
// First ACK after a loss event.
c.epoch = eventTime // Start of epoch.
c.ackedBytesCount = ackedBytes // Reset count.
// Reset estimated_tcp_congestion_window_ to be in sync with cubic.
c.estimatedTCPcongestionWindow = currentCongestionWindow
if c.lastMaxCongestionWindow <= currentCongestionWindow {
c.timeToOriginPoint = 0
c.originPointCongestionWindow = currentCongestionWindow
} else {
c.timeToOriginPoint = uint32(math.Cbrt(float64(cubeFactor * (c.lastMaxCongestionWindow - currentCongestionWindow))))
c.originPointCongestionWindow = c.lastMaxCongestionWindow
}
}
// Change the time unit from microseconds to 2^10 fractions per second. Take
// the round trip time in account. This is done to allow us to use shift as a
// divide operator.
elapsedTime := int64(eventTime.Add(delayMin).Sub(c.epoch)/time.Microsecond) << 10 / (1000 * 1000)
// Right-shifts of negative, signed numbers have implementation-dependent
// behavior, so force the offset to be positive, as is done in the kernel.
offset := int64(c.timeToOriginPoint) - elapsedTime
if offset < 0 {
offset = -offset
}
deltaCongestionWindow := protocol.ByteCount(cubeCongestionWindowScale*offset*offset*offset) * protocol.DefaultTCPMSS >> cubeScale
var targetCongestionWindow protocol.ByteCount
if elapsedTime > int64(c.timeToOriginPoint) {
targetCongestionWindow = c.originPointCongestionWindow + deltaCongestionWindow
} else {
targetCongestionWindow = c.originPointCongestionWindow - deltaCongestionWindow
}
// Limit the CWND increase to half the acked bytes.
targetCongestionWindow = utils.MinByteCount(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2)
// Increase the window by approximately Alpha * 1 MSS of bytes every
// time we ack an estimated tcp window of bytes. For small
// congestion windows (less than 25), the formula below will
// increase slightly slower than linearly per estimated tcp window
// of bytes.
c.estimatedTCPcongestionWindow += protocol.ByteCount(float32(c.ackedBytesCount) * c.alpha() * float32(protocol.DefaultTCPMSS) / float32(c.estimatedTCPcongestionWindow))
c.ackedBytesCount = 0
// We have a new cubic congestion window.
c.lastTargetCongestionWindow = targetCongestionWindow
// Compute target congestion_window based on cubic target and estimated TCP
// congestion_window, use highest (fastest).
if targetCongestionWindow < c.estimatedTCPcongestionWindow {
targetCongestionWindow = c.estimatedTCPcongestionWindow
}
return targetCongestionWindow
}
// SetNumConnections sets the number of emulated connections
func (c *Cubic) SetNumConnections(n int) {
c.numConnections = n
}

View File

@ -1,318 +0,0 @@
package congestion
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
const (
maxBurstBytes = 3 * protocol.DefaultTCPMSS
renoBeta float32 = 0.7 // Reno backoff factor.
defaultMinimumCongestionWindow protocol.ByteCount = 2 * protocol.DefaultTCPMSS
)
type cubicSender struct {
hybridSlowStart HybridSlowStart
prr PrrSender
rttStats *RTTStats
stats connectionStats
cubic *Cubic
reno bool
// Track the largest packet that has been sent.
largestSentPacketNumber protocol.PacketNumber
// Track the largest packet that has been acked.
largestAckedPacketNumber protocol.PacketNumber
// Track the largest packet number outstanding when a CWND cutback occurs.
largestSentAtLastCutback protocol.PacketNumber
// Whether the last loss event caused us to exit slowstart.
// Used for stats collection of slowstartPacketsLost
lastCutbackExitedSlowstart bool
// When true, exit slow start with large cutback of congestion window.
slowStartLargeReduction bool
// Congestion window in packets.
congestionWindow protocol.ByteCount
// Minimum congestion window in packets.
minCongestionWindow protocol.ByteCount
// Maximum congestion window.
maxCongestionWindow protocol.ByteCount
// Slow start congestion window in bytes, aka ssthresh.
slowstartThreshold protocol.ByteCount
// Number of connections to simulate.
numConnections int
// ACK counter for the Reno implementation.
numAckedPackets uint64
initialCongestionWindow protocol.ByteCount
initialMaxCongestionWindow protocol.ByteCount
minSlowStartExitWindow protocol.ByteCount
}
var _ SendAlgorithm = &cubicSender{}
var _ SendAlgorithmWithDebugInfo = &cubicSender{}
// NewCubicSender makes a new cubic sender
func NewCubicSender(clock Clock, rttStats *RTTStats, reno bool, initialCongestionWindow, initialMaxCongestionWindow protocol.ByteCount) SendAlgorithmWithDebugInfo {
return &cubicSender{
rttStats: rttStats,
initialCongestionWindow: initialCongestionWindow,
initialMaxCongestionWindow: initialMaxCongestionWindow,
congestionWindow: initialCongestionWindow,
minCongestionWindow: defaultMinimumCongestionWindow,
slowstartThreshold: initialMaxCongestionWindow,
maxCongestionWindow: initialMaxCongestionWindow,
numConnections: defaultNumConnections,
cubic: NewCubic(clock),
reno: reno,
}
}
// TimeUntilSend returns when the next packet should be sent.
func (c *cubicSender) TimeUntilSend(bytesInFlight protocol.ByteCount) time.Duration {
if c.InRecovery() {
// PRR is used when in recovery.
if c.prr.CanSend(c.GetCongestionWindow(), bytesInFlight, c.GetSlowStartThreshold()) {
return 0
}
}
delay := c.rttStats.SmoothedRTT() / time.Duration(2*c.GetCongestionWindow())
if !c.InSlowStart() { // adjust delay, such that it's 1.25*cwd/rtt
delay = delay * 8 / 5
}
return delay
}
func (c *cubicSender) OnPacketSent(
sentTime time.Time,
bytesInFlight protocol.ByteCount,
packetNumber protocol.PacketNumber,
bytes protocol.ByteCount,
isRetransmittable bool,
) {
if !isRetransmittable {
return
}
if c.InRecovery() {
// PRR is used when in recovery.
c.prr.OnPacketSent(bytes)
}
c.largestSentPacketNumber = packetNumber
c.hybridSlowStart.OnPacketSent(packetNumber)
}
func (c *cubicSender) InRecovery() bool {
return c.largestAckedPacketNumber <= c.largestSentAtLastCutback && c.largestAckedPacketNumber != 0
}
func (c *cubicSender) InSlowStart() bool {
return c.GetCongestionWindow() < c.GetSlowStartThreshold()
}
func (c *cubicSender) GetCongestionWindow() protocol.ByteCount {
return c.congestionWindow
}
func (c *cubicSender) GetSlowStartThreshold() protocol.ByteCount {
return c.slowstartThreshold
}
func (c *cubicSender) ExitSlowstart() {
c.slowstartThreshold = c.congestionWindow
}
func (c *cubicSender) SlowstartThreshold() protocol.ByteCount {
return c.slowstartThreshold
}
func (c *cubicSender) MaybeExitSlowStart() {
if c.InSlowStart() && c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/protocol.DefaultTCPMSS) {
c.ExitSlowstart()
}
}
func (c *cubicSender) OnPacketAcked(
ackedPacketNumber protocol.PacketNumber,
ackedBytes protocol.ByteCount,
priorInFlight protocol.ByteCount,
eventTime time.Time,
) {
c.largestAckedPacketNumber = utils.MaxPacketNumber(ackedPacketNumber, c.largestAckedPacketNumber)
if c.InRecovery() {
// PRR is used when in recovery.
c.prr.OnPacketAcked(ackedBytes)
return
}
c.maybeIncreaseCwnd(ackedPacketNumber, ackedBytes, priorInFlight, eventTime)
if c.InSlowStart() {
c.hybridSlowStart.OnPacketAcked(ackedPacketNumber)
}
}
func (c *cubicSender) OnPacketLost(
packetNumber protocol.PacketNumber,
lostBytes protocol.ByteCount,
priorInFlight protocol.ByteCount,
) {
// TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
// already sent should be treated as a single loss event, since it's expected.
if packetNumber <= c.largestSentAtLastCutback {
if c.lastCutbackExitedSlowstart {
c.stats.slowstartPacketsLost++
c.stats.slowstartBytesLost += lostBytes
if c.slowStartLargeReduction {
// Reduce congestion window by lost_bytes for every loss.
c.congestionWindow = utils.MaxByteCount(c.congestionWindow-lostBytes, c.minSlowStartExitWindow)
c.slowstartThreshold = c.congestionWindow
}
}
return
}
c.lastCutbackExitedSlowstart = c.InSlowStart()
if c.InSlowStart() {
c.stats.slowstartPacketsLost++
}
c.prr.OnPacketLost(priorInFlight)
// TODO(chromium): Separate out all of slow start into a separate class.
if c.slowStartLargeReduction && c.InSlowStart() {
if c.congestionWindow >= 2*c.initialCongestionWindow {
c.minSlowStartExitWindow = c.congestionWindow / 2
}
c.congestionWindow -= protocol.DefaultTCPMSS
} else if c.reno {
c.congestionWindow = protocol.ByteCount(float32(c.congestionWindow) * c.RenoBeta())
} else {
c.congestionWindow = c.cubic.CongestionWindowAfterPacketLoss(c.congestionWindow)
}
if c.congestionWindow < c.minCongestionWindow {
c.congestionWindow = c.minCongestionWindow
}
c.slowstartThreshold = c.congestionWindow
c.largestSentAtLastCutback = c.largestSentPacketNumber
// reset packet count from congestion avoidance mode. We start
// counting again when we're out of recovery.
c.numAckedPackets = 0
}
func (c *cubicSender) RenoBeta() float32 {
// kNConnectionBeta is the backoff factor after loss for our N-connection
// emulation, which emulates the effective backoff of an ensemble of N
// TCP-Reno connections on a single loss event. The effective multiplier is
// computed as:
return (float32(c.numConnections) - 1. + renoBeta) / float32(c.numConnections)
}
// Called when we receive an ack. Normal TCP tracks how many packets one ack
// represents, but quic has a separate ack for each packet.
func (c *cubicSender) maybeIncreaseCwnd(
ackedPacketNumber protocol.PacketNumber,
ackedBytes protocol.ByteCount,
priorInFlight protocol.ByteCount,
eventTime time.Time,
) {
// Do not increase the congestion window unless the sender is close to using
// the current window.
if !c.isCwndLimited(priorInFlight) {
c.cubic.OnApplicationLimited()
return
}
if c.congestionWindow >= c.maxCongestionWindow {
return
}
if c.InSlowStart() {
// TCP slow start, exponential growth, increase by one for each ACK.
c.congestionWindow += protocol.DefaultTCPMSS
return
}
// Congestion avoidance
if c.reno {
// Classic Reno congestion avoidance.
c.numAckedPackets++
// Divide by num_connections to smoothly increase the CWND at a faster
// rate than conventional Reno.
if c.numAckedPackets*uint64(c.numConnections) >= uint64(c.congestionWindow)/uint64(protocol.DefaultTCPMSS) {
c.congestionWindow += protocol.DefaultTCPMSS
c.numAckedPackets = 0
}
} else {
c.congestionWindow = utils.MinByteCount(c.maxCongestionWindow, c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime))
}
}
func (c *cubicSender) isCwndLimited(bytesInFlight protocol.ByteCount) bool {
congestionWindow := c.GetCongestionWindow()
if bytesInFlight >= congestionWindow {
return true
}
availableBytes := congestionWindow - bytesInFlight
slowStartLimited := c.InSlowStart() && bytesInFlight > congestionWindow/2
return slowStartLimited || availableBytes <= maxBurstBytes
}
// BandwidthEstimate returns the current bandwidth estimate
func (c *cubicSender) BandwidthEstimate() Bandwidth {
srtt := c.rttStats.SmoothedRTT()
if srtt == 0 {
// If we haven't measured an rtt, the bandwidth estimate is unknown.
return 0
}
return BandwidthFromDelta(c.GetCongestionWindow(), srtt)
}
// HybridSlowStart returns the hybrid slow start instance for testing
func (c *cubicSender) HybridSlowStart() *HybridSlowStart {
return &c.hybridSlowStart
}
// SetNumEmulatedConnections sets the number of emulated connections
func (c *cubicSender) SetNumEmulatedConnections(n int) {
c.numConnections = utils.Max(n, 1)
c.cubic.SetNumConnections(c.numConnections)
}
// OnRetransmissionTimeout is called on an retransmission timeout
func (c *cubicSender) OnRetransmissionTimeout(packetsRetransmitted bool) {
c.largestSentAtLastCutback = 0
if !packetsRetransmitted {
return
}
c.hybridSlowStart.Restart()
c.cubic.Reset()
c.slowstartThreshold = c.congestionWindow / 2
c.congestionWindow = c.minCongestionWindow
}
// OnConnectionMigration is called when the connection is migrated (?)
func (c *cubicSender) OnConnectionMigration() {
c.hybridSlowStart.Restart()
c.prr = PrrSender{}
c.largestSentPacketNumber = 0
c.largestAckedPacketNumber = 0
c.largestSentAtLastCutback = 0
c.lastCutbackExitedSlowstart = false
c.cubic.Reset()
c.numAckedPackets = 0
c.congestionWindow = c.initialCongestionWindow
c.slowstartThreshold = c.initialMaxCongestionWindow
c.maxCongestionWindow = c.initialMaxCongestionWindow
}
// SetSlowStartLargeReduction allows enabling the SSLR experiment
func (c *cubicSender) SetSlowStartLargeReduction(enabled bool) {
c.slowStartLargeReduction = enabled
}

View File

@ -1,111 +0,0 @@
package congestion
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
// Note(pwestin): the magic clamping numbers come from the original code in
// tcp_cubic.c.
const hybridStartLowWindow = protocol.ByteCount(16)
// Number of delay samples for detecting the increase of delay.
const hybridStartMinSamples = uint32(8)
// Exit slow start if the min rtt has increased by more than 1/8th.
const hybridStartDelayFactorExp = 3 // 2^3 = 8
// The original paper specifies 2 and 8ms, but those have changed over time.
const hybridStartDelayMinThresholdUs = int64(4000)
const hybridStartDelayMaxThresholdUs = int64(16000)
// HybridSlowStart implements the TCP hybrid slow start algorithm
type HybridSlowStart struct {
endPacketNumber protocol.PacketNumber
lastSentPacketNumber protocol.PacketNumber
started bool
currentMinRTT time.Duration
rttSampleCount uint32
hystartFound bool
}
// StartReceiveRound is called for the start of each receive round (burst) in the slow start phase.
func (s *HybridSlowStart) StartReceiveRound(lastSent protocol.PacketNumber) {
s.endPacketNumber = lastSent
s.currentMinRTT = 0
s.rttSampleCount = 0
s.started = true
}
// IsEndOfRound returns true if this ack is the last packet number of our current slow start round.
func (s *HybridSlowStart) IsEndOfRound(ack protocol.PacketNumber) bool {
return s.endPacketNumber < ack
}
// ShouldExitSlowStart should be called on every new ack frame, since a new
// RTT measurement can be made then.
// rtt: the RTT for this ack packet.
// minRTT: is the lowest delay (RTT) we have seen during the session.
// congestionWindow: the congestion window in packets.
func (s *HybridSlowStart) ShouldExitSlowStart(latestRTT time.Duration, minRTT time.Duration, congestionWindow protocol.ByteCount) bool {
if !s.started {
// Time to start the hybrid slow start.
s.StartReceiveRound(s.lastSentPacketNumber)
}
if s.hystartFound {
return true
}
// Second detection parameter - delay increase detection.
// Compare the minimum delay (s.currentMinRTT) of the current
// burst of packets relative to the minimum delay during the session.
// Note: we only look at the first few(8) packets in each burst, since we
// only want to compare the lowest RTT of the burst relative to previous
// bursts.
s.rttSampleCount++
if s.rttSampleCount <= hybridStartMinSamples {
if s.currentMinRTT == 0 || s.currentMinRTT > latestRTT {
s.currentMinRTT = latestRTT
}
}
// We only need to check this once per round.
if s.rttSampleCount == hybridStartMinSamples {
// Divide minRTT by 8 to get a rtt increase threshold for exiting.
minRTTincreaseThresholdUs := int64(minRTT / time.Microsecond >> hybridStartDelayFactorExp)
// Ensure the rtt threshold is never less than 2ms or more than 16ms.
minRTTincreaseThresholdUs = utils.MinInt64(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs)
minRTTincreaseThreshold := time.Duration(utils.MaxInt64(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond
if s.currentMinRTT > (minRTT + minRTTincreaseThreshold) {
s.hystartFound = true
}
}
// Exit from slow start if the cwnd is greater than 16 and
// increasing delay is found.
return congestionWindow >= hybridStartLowWindow && s.hystartFound
}
// OnPacketSent is called when a packet was sent
func (s *HybridSlowStart) OnPacketSent(packetNumber protocol.PacketNumber) {
s.lastSentPacketNumber = packetNumber
}
// OnPacketAcked gets invoked after ShouldExitSlowStart, so it's best to end
// the round when the final packet of the burst is received and start it on
// the next incoming ack.
func (s *HybridSlowStart) OnPacketAcked(ackedPacketNumber protocol.PacketNumber) {
if s.IsEndOfRound(ackedPacketNumber) {
s.started = false
}
}
// Started returns true if started
func (s *HybridSlowStart) Started() bool {
return s.started
}
// Restart the slow start phase
func (s *HybridSlowStart) Restart() {
s.started = false
s.hystartFound = false
}

View File

@ -1,36 +0,0 @@
package congestion
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
// A SendAlgorithm performs congestion control and calculates the congestion window
type SendAlgorithm interface {
TimeUntilSend(bytesInFlight protocol.ByteCount) time.Duration
OnPacketSent(sentTime time.Time, bytesInFlight protocol.ByteCount, packetNumber protocol.PacketNumber, bytes protocol.ByteCount, isRetransmittable bool)
GetCongestionWindow() protocol.ByteCount
MaybeExitSlowStart()
OnPacketAcked(number protocol.PacketNumber, ackedBytes protocol.ByteCount, priorInFlight protocol.ByteCount, eventTime time.Time)
OnPacketLost(number protocol.PacketNumber, lostBytes protocol.ByteCount, priorInFlight protocol.ByteCount)
SetNumEmulatedConnections(n int)
OnRetransmissionTimeout(packetsRetransmitted bool)
OnConnectionMigration()
// Experiments
SetSlowStartLargeReduction(enabled bool)
}
// SendAlgorithmWithDebugInfo adds some debug functions to SendAlgorithm
type SendAlgorithmWithDebugInfo interface {
SendAlgorithm
BandwidthEstimate() Bandwidth
// Stuff only used in testing
HybridSlowStart() *HybridSlowStart
SlowstartThreshold() protocol.ByteCount
RenoBeta() float32
InRecovery() bool
}

View File

@ -1,54 +0,0 @@
package congestion
import (
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
// PrrSender implements the Proportional Rate Reduction (PRR) per RFC 6937
type PrrSender struct {
bytesSentSinceLoss protocol.ByteCount
bytesDeliveredSinceLoss protocol.ByteCount
ackCountSinceLoss protocol.ByteCount
bytesInFlightBeforeLoss protocol.ByteCount
}
// OnPacketSent should be called after a packet was sent
func (p *PrrSender) OnPacketSent(sentBytes protocol.ByteCount) {
p.bytesSentSinceLoss += sentBytes
}
// OnPacketLost should be called on the first loss that triggers a recovery
// period and all other methods in this class should only be called when in
// recovery.
func (p *PrrSender) OnPacketLost(priorInFlight protocol.ByteCount) {
p.bytesSentSinceLoss = 0
p.bytesInFlightBeforeLoss = priorInFlight
p.bytesDeliveredSinceLoss = 0
p.ackCountSinceLoss = 0
}
// OnPacketAcked should be called after a packet was acked
func (p *PrrSender) OnPacketAcked(ackedBytes protocol.ByteCount) {
p.bytesDeliveredSinceLoss += ackedBytes
p.ackCountSinceLoss++
}
// CanSend returns if packets can be sent
func (p *PrrSender) CanSend(congestionWindow, bytesInFlight, slowstartThreshold protocol.ByteCount) bool {
// Return QuicTime::Zero In order to ensure limited transmit always works.
if p.bytesSentSinceLoss == 0 || bytesInFlight < protocol.DefaultTCPMSS {
return true
}
if congestionWindow > bytesInFlight {
// During PRR-SSRB, limit outgoing packets to 1 extra MSS per ack, instead
// of sending the entire available window. This prevents burst retransmits
// when more packets are lost than the CWND reduction.
// limit = MAX(prr_delivered - prr_out, DeliveredData) + MSS
return p.bytesDeliveredSinceLoss+p.ackCountSinceLoss*protocol.DefaultTCPMSS > p.bytesSentSinceLoss
}
// Implement Proportional Rate Reduction (RFC6937).
// Checks a simplified version of the PRR formula that doesn't use division:
// AvailableSendWindow =
// CEIL(prr_delivered * ssthresh / BytesInFlightAtLoss) - prr_sent
return p.bytesDeliveredSinceLoss*slowstartThreshold > p.bytesSentSinceLoss*p.bytesInFlightBeforeLoss
}

View File

@ -1,101 +0,0 @@
package congestion
import (
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
const (
rttAlpha float32 = 0.125
oneMinusAlpha float32 = (1 - rttAlpha)
rttBeta float32 = 0.25
oneMinusBeta float32 = (1 - rttBeta)
// The default RTT used before an RTT sample is taken.
defaultInitialRTT = 100 * time.Millisecond
)
// RTTStats provides round-trip statistics
type RTTStats struct {
minRTT time.Duration
latestRTT time.Duration
smoothedRTT time.Duration
meanDeviation time.Duration
}
// NewRTTStats makes a properly initialized RTTStats object
func NewRTTStats() *RTTStats {
return &RTTStats{}
}
// MinRTT Returns the minRTT for the entire connection.
// May return Zero if no valid updates have occurred.
func (r *RTTStats) MinRTT() time.Duration { return r.minRTT }
// LatestRTT returns the most recent rtt measurement.
// May return Zero if no valid updates have occurred.
func (r *RTTStats) LatestRTT() time.Duration { return r.latestRTT }
// SmoothedRTT returns the EWMA smoothed RTT for the connection.
// May return Zero if no valid updates have occurred.
func (r *RTTStats) SmoothedRTT() time.Duration { return r.smoothedRTT }
// SmoothedOrInitialRTT returns the EWMA smoothed RTT for the connection.
// If no valid updates have occurred, it returns the initial RTT.
func (r *RTTStats) SmoothedOrInitialRTT() time.Duration {
if r.smoothedRTT != 0 {
return r.smoothedRTT
}
return defaultInitialRTT
}
// MeanDeviation gets the mean deviation
func (r *RTTStats) MeanDeviation() time.Duration { return r.meanDeviation }
// UpdateRTT updates the RTT based on a new sample.
func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) {
if sendDelta == utils.InfDuration || sendDelta <= 0 {
return
}
// Update r.minRTT first. r.minRTT does not use an rttSample corrected for
// ackDelay but the raw observed sendDelta, since poor clock granularity at
// the client may cause a high ackDelay to result in underestimation of the
// r.minRTT.
if r.minRTT == 0 || r.minRTT > sendDelta {
r.minRTT = sendDelta
}
// Correct for ackDelay if information received from the peer results in a
// an RTT sample at least as large as minRTT. Otherwise, only use the
// sendDelta.
sample := sendDelta
if sample-r.minRTT >= ackDelay {
sample -= ackDelay
}
r.latestRTT = sample
// First time call.
if r.smoothedRTT == 0 {
r.smoothedRTT = sample
r.meanDeviation = sample / 2
} else {
r.meanDeviation = time.Duration(oneMinusBeta*float32(r.meanDeviation/time.Microsecond)+rttBeta*float32(utils.AbsDuration(r.smoothedRTT-sample)/time.Microsecond)) * time.Microsecond
r.smoothedRTT = time.Duration((float32(r.smoothedRTT/time.Microsecond)*oneMinusAlpha)+(float32(sample/time.Microsecond)*rttAlpha)) * time.Microsecond
}
}
// OnConnectionMigration is called when connection migrates and rtt measurement needs to be reset.
func (r *RTTStats) OnConnectionMigration() {
r.latestRTT = 0
r.minRTT = 0
r.smoothedRTT = 0
r.meanDeviation = 0
}
// ExpireSmoothedMetrics causes the smoothed_rtt to be increased to the latest_rtt if the latest_rtt
// is larger. The mean deviation is increased to the most recent deviation if
// it's larger.
func (r *RTTStats) ExpireSmoothedMetrics() {
r.meanDeviation = utils.MaxDuration(r.meanDeviation, utils.AbsDuration(r.smoothedRTT-r.latestRTT))
r.smoothedRTT = utils.MaxDuration(r.smoothedRTT, r.latestRTT)
}

View File

@ -1,8 +0,0 @@
package congestion
import "v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
type connectionStats struct {
slowstartPacketsLost protocol.PacketNumber
slowstartBytesLost protocol.ByteCount
}

View File

@ -1,10 +0,0 @@
package crypto
import "v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
// An AEAD implements QUIC's authenticated encryption and associated data
type AEAD interface {
Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, error)
Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) []byte
Overhead() int
}

View File

@ -1,74 +0,0 @@
package crypto
import (
"crypto/aes"
"crypto/cipher"
"encoding/binary"
"errors"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
type aeadAESGCM struct {
otherIV []byte
myIV []byte
encrypter cipher.AEAD
decrypter cipher.AEAD
}
var _ AEAD = &aeadAESGCM{}
const ivLen = 12
// NewAEADAESGCM creates a AEAD using AES-GCM
func NewAEADAESGCM(otherKey []byte, myKey []byte, otherIV []byte, myIV []byte) (AEAD, error) {
// the IVs need to be at least 8 bytes long, otherwise we can't compute the nonce
if len(otherIV) != ivLen || len(myIV) != ivLen {
return nil, errors.New("AES-GCM: expected 12 byte IVs")
}
encrypterCipher, err := aes.NewCipher(myKey)
if err != nil {
return nil, err
}
encrypter, err := cipher.NewGCM(encrypterCipher)
if err != nil {
return nil, err
}
decrypterCipher, err := aes.NewCipher(otherKey)
if err != nil {
return nil, err
}
decrypter, err := cipher.NewGCM(decrypterCipher)
if err != nil {
return nil, err
}
return &aeadAESGCM{
otherIV: otherIV,
myIV: myIV,
encrypter: encrypter,
decrypter: decrypter,
}, nil
}
func (aead *aeadAESGCM) Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, error) {
return aead.decrypter.Open(dst, aead.makeNonce(aead.otherIV, packetNumber), src, associatedData)
}
func (aead *aeadAESGCM) Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) []byte {
return aead.encrypter.Seal(dst, aead.makeNonce(aead.myIV, packetNumber), src, associatedData)
}
func (aead *aeadAESGCM) makeNonce(iv []byte, packetNumber protocol.PacketNumber) []byte {
nonce := make([]byte, ivLen)
binary.BigEndian.PutUint64(nonce[ivLen-8:], uint64(packetNumber))
for i := 0; i < ivLen; i++ {
nonce[i] ^= iv[i]
}
return nonce
}
func (aead *aeadAESGCM) Overhead() int {
return aead.encrypter.Overhead()
}

View File

@ -1,58 +0,0 @@
package crypto
import (
"crypto"
"crypto/hmac"
"encoding/binary"
)
// copied from https://v2ray.com/core/external/github.com/cloudflare/tls-tris/blob/master/hkdf.go
func hkdfExtract(hash crypto.Hash, secret, salt []byte) []byte {
if salt == nil {
salt = make([]byte, hash.Size())
}
if secret == nil {
secret = make([]byte, hash.Size())
}
extractor := hmac.New(hash.New, salt)
extractor.Write(secret)
return extractor.Sum(nil)
}
// copied from https://v2ray.com/core/external/github.com/cloudflare/tls-tris/blob/master/hkdf.go
func hkdfExpand(hash crypto.Hash, prk, info []byte, l int) []byte {
var (
expander = hmac.New(hash.New, prk)
res = make([]byte, l)
counter = byte(1)
prev []byte
)
if l > 255*expander.Size() {
panic("hkdf: requested too much output")
}
p := res
for len(p) > 0 {
expander.Reset()
expander.Write(prev)
expander.Write(info)
expander.Write([]byte{counter})
prev = expander.Sum(prev[:0])
counter++
n := copy(p, prev)
p = p[n:]
}
return res
}
// hkdfExpandLabel HKDF expands a label
func HkdfExpandLabel(hash crypto.Hash, secret []byte, label string, length int) []byte {
const prefix = "quic "
qlabel := make([]byte, 2 /* length */ +1 /* length of label */ +len(prefix)+len(label)+1 /* length of context (empty) */)
binary.BigEndian.PutUint16(qlabel[0:2], uint16(length))
qlabel[2] = uint8(len(prefix) + len(label))
copy(qlabel[3:], []byte(prefix+label))
return hkdfExpand(hash, secret, qlabel, length)
}

View File

@ -1,41 +0,0 @@
package crypto
import (
"crypto"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
var quicVersion1Salt = []byte{0x9c, 0x10, 0x8f, 0x98, 0x52, 0x0a, 0x5c, 0x5c, 0x32, 0x96, 0x8e, 0x95, 0x0e, 0x8a, 0x2c, 0x5f, 0xe0, 0x6d, 0x6c, 0x38}
// NewNullAEAD creates a NullAEAD
func NewNullAEAD(connectionID protocol.ConnectionID, pers protocol.Perspective) (AEAD, error) {
clientSecret, serverSecret := computeSecrets(connectionID)
var mySecret, otherSecret []byte
if pers == protocol.PerspectiveClient {
mySecret = clientSecret
otherSecret = serverSecret
} else {
mySecret = serverSecret
otherSecret = clientSecret
}
myKey, myIV := computeNullAEADKeyAndIV(mySecret)
otherKey, otherIV := computeNullAEADKeyAndIV(otherSecret)
return NewAEADAESGCM(otherKey, myKey, otherIV, myIV)
}
func computeSecrets(connID protocol.ConnectionID) (clientSecret, serverSecret []byte) {
initialSecret := hkdfExtract(crypto.SHA256, connID, quicVersion1Salt)
clientSecret = HkdfExpandLabel(crypto.SHA256, initialSecret, "client in", crypto.SHA256.Size())
serverSecret = HkdfExpandLabel(crypto.SHA256, initialSecret, "server in", crypto.SHA256.Size())
return
}
func computeNullAEADKeyAndIV(secret []byte) (key, iv []byte) {
key = HkdfExpandLabel(crypto.SHA256, secret, "key", 16)
iv = HkdfExpandLabel(crypto.SHA256, secret, "iv", 12)
return
}

View File

@ -1,122 +0,0 @@
package flowcontrol
import (
"sync"
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/congestion"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
type baseFlowController struct {
// for sending data
bytesSent protocol.ByteCount
sendWindow protocol.ByteCount
lastBlockedAt protocol.ByteCount
// for receiving data
mutex sync.RWMutex
bytesRead protocol.ByteCount
highestReceived protocol.ByteCount
receiveWindow protocol.ByteCount
receiveWindowSize protocol.ByteCount
maxReceiveWindowSize protocol.ByteCount
epochStartTime time.Time
epochStartOffset protocol.ByteCount
rttStats *congestion.RTTStats
logger utils.Logger
}
// IsNewlyBlocked says if it is newly blocked by flow control.
// For every offset, it only returns true once.
// If it is blocked, the offset is returned.
func (c *baseFlowController) IsNewlyBlocked() (bool, protocol.ByteCount) {
if c.sendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt {
return false, 0
}
c.lastBlockedAt = c.sendWindow
return true, c.sendWindow
}
func (c *baseFlowController) AddBytesSent(n protocol.ByteCount) {
c.bytesSent += n
}
// UpdateSendWindow should be called after receiving a WindowUpdateFrame
// it returns true if the window was actually updated
func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) {
if offset > c.sendWindow {
c.sendWindow = offset
}
}
func (c *baseFlowController) sendWindowSize() protocol.ByteCount {
// this only happens during connection establishment, when data is sent before we receive the peer's transport parameters
if c.bytesSent > c.sendWindow {
return 0
}
return c.sendWindow - c.bytesSent
}
func (c *baseFlowController) AddBytesRead(n protocol.ByteCount) {
c.mutex.Lock()
defer c.mutex.Unlock()
// pretend we sent a WindowUpdate when reading the first byte
// this way auto-tuning of the window size already works for the first WindowUpdate
if c.bytesRead == 0 {
c.startNewAutoTuningEpoch()
}
c.bytesRead += n
}
func (c *baseFlowController) hasWindowUpdate() bool {
bytesRemaining := c.receiveWindow - c.bytesRead
// update the window when more than the threshold was consumed
return bytesRemaining <= protocol.ByteCount((float64(c.receiveWindowSize) * float64((1 - protocol.WindowUpdateThreshold))))
}
// getWindowUpdate updates the receive window, if necessary
// it returns the new offset
func (c *baseFlowController) getWindowUpdate() protocol.ByteCount {
if !c.hasWindowUpdate() {
return 0
}
c.maybeAdjustWindowSize()
c.receiveWindow = c.bytesRead + c.receiveWindowSize
return c.receiveWindow
}
// maybeAdjustWindowSize increases the receiveWindowSize if we're sending updates too often.
// For details about auto-tuning, see https://docs.google.com/document/d/1SExkMmGiz8VYzV3s9E35JQlJ73vhzCekKkDi85F1qCE/edit?usp=sharing.
func (c *baseFlowController) maybeAdjustWindowSize() {
bytesReadInEpoch := c.bytesRead - c.epochStartOffset
// don't do anything if less than half the window has been consumed
if bytesReadInEpoch <= c.receiveWindowSize/2 {
return
}
rtt := c.rttStats.SmoothedRTT()
if rtt == 0 {
return
}
fraction := float64(bytesReadInEpoch) / float64(c.receiveWindowSize)
if time.Since(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) {
// window is consumed too fast, try to increase the window size
c.receiveWindowSize = utils.MinByteCount(2*c.receiveWindowSize, c.maxReceiveWindowSize)
}
c.startNewAutoTuningEpoch()
}
func (c *baseFlowController) startNewAutoTuningEpoch() {
c.epochStartTime = time.Now()
c.epochStartOffset = c.bytesRead
}
func (c *baseFlowController) checkFlowControlViolation() bool {
return c.highestReceived > c.receiveWindow
}

View File

@ -1,87 +0,0 @@
package flowcontrol
import (
"fmt"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/congestion"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/qerr"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
type connectionFlowController struct {
baseFlowController
queueWindowUpdate func()
}
var _ ConnectionFlowController = &connectionFlowController{}
// NewConnectionFlowController gets a new flow controller for the connection
// It is created before we receive the peer's transport paramenters, thus it starts with a sendWindow of 0.
func NewConnectionFlowController(
receiveWindow protocol.ByteCount,
maxReceiveWindow protocol.ByteCount,
queueWindowUpdate func(),
rttStats *congestion.RTTStats,
logger utils.Logger,
) ConnectionFlowController {
return &connectionFlowController{
baseFlowController: baseFlowController{
rttStats: rttStats,
receiveWindow: receiveWindow,
receiveWindowSize: receiveWindow,
maxReceiveWindowSize: maxReceiveWindow,
logger: logger,
},
queueWindowUpdate: queueWindowUpdate,
}
}
func (c *connectionFlowController) SendWindowSize() protocol.ByteCount {
return c.baseFlowController.sendWindowSize()
}
// IncrementHighestReceived adds an increment to the highestReceived value
func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount) error {
c.mutex.Lock()
defer c.mutex.Unlock()
c.highestReceived += increment
if c.checkFlowControlViolation() {
return qerr.Error(qerr.FlowControlReceivedTooMuchData, fmt.Sprintf("Received %d bytes for the connection, allowed %d bytes", c.highestReceived, c.receiveWindow))
}
return nil
}
func (c *connectionFlowController) MaybeQueueWindowUpdate() {
c.mutex.Lock()
hasWindowUpdate := c.hasWindowUpdate()
c.mutex.Unlock()
if hasWindowUpdate {
c.queueWindowUpdate()
}
}
func (c *connectionFlowController) GetWindowUpdate() protocol.ByteCount {
c.mutex.Lock()
oldWindowSize := c.receiveWindowSize
offset := c.baseFlowController.getWindowUpdate()
if oldWindowSize < c.receiveWindowSize {
c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10))
}
c.mutex.Unlock()
return offset
}
// EnsureMinimumWindowSize sets a minimum window size
// it should make sure that the connection-level window is increased when a stream-level window grows
func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount) {
c.mutex.Lock()
if inc > c.receiveWindowSize {
c.logger.Debugf("Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase", c.receiveWindowSize/(1<<10))
c.receiveWindowSize = utils.MinByteCount(inc, c.maxReceiveWindowSize)
c.startNewAutoTuningEpoch()
}
c.mutex.Unlock()
}

View File

@ -1,38 +0,0 @@
package flowcontrol
import "v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
type flowController interface {
// for sending
SendWindowSize() protocol.ByteCount
UpdateSendWindow(protocol.ByteCount)
AddBytesSent(protocol.ByteCount)
// for receiving
AddBytesRead(protocol.ByteCount)
GetWindowUpdate() protocol.ByteCount // returns 0 if no update is necessary
MaybeQueueWindowUpdate() // queues a window update, if necessary
IsNewlyBlocked() (bool, protocol.ByteCount)
}
// A StreamFlowController is a flow controller for a QUIC stream.
type StreamFlowController interface {
flowController
// for receiving
// UpdateHighestReceived should be called when a new highest offset is received
// final has to be to true if this is the final offset of the stream, as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame
UpdateHighestReceived(offset protocol.ByteCount, final bool) error
}
// The ConnectionFlowController is the flow controller for the connection.
type ConnectionFlowController interface {
flowController
}
type connectionFlowControllerI interface {
ConnectionFlowController
// The following two methods are not supposed to be called from outside this packet, but are needed internally
// for sending
EnsureMinimumWindowSize(protocol.ByteCount)
// for receiving
IncrementHighestReceived(protocol.ByteCount) error
}

View File

@ -1,131 +0,0 @@
package flowcontrol
import (
"fmt"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/congestion"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/qerr"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
type streamFlowController struct {
baseFlowController
streamID protocol.StreamID
queueWindowUpdate func()
connection connectionFlowControllerI
receivedFinalOffset bool
}
var _ StreamFlowController = &streamFlowController{}
// NewStreamFlowController gets a new flow controller for a stream
func NewStreamFlowController(
streamID protocol.StreamID,
cfc ConnectionFlowController,
receiveWindow protocol.ByteCount,
maxReceiveWindow protocol.ByteCount,
initialSendWindow protocol.ByteCount,
queueWindowUpdate func(protocol.StreamID),
rttStats *congestion.RTTStats,
logger utils.Logger,
) StreamFlowController {
return &streamFlowController{
streamID: streamID,
connection: cfc.(connectionFlowControllerI),
queueWindowUpdate: func() { queueWindowUpdate(streamID) },
baseFlowController: baseFlowController{
rttStats: rttStats,
receiveWindow: receiveWindow,
receiveWindowSize: receiveWindow,
maxReceiveWindowSize: maxReceiveWindow,
sendWindow: initialSendWindow,
logger: logger,
},
}
}
// UpdateHighestReceived updates the highestReceived value, if the byteOffset is higher
// it returns an ErrReceivedSmallerByteOffset if the received byteOffset is smaller than any byteOffset received before
func (c *streamFlowController) UpdateHighestReceived(byteOffset protocol.ByteCount, final bool) error {
c.mutex.Lock()
defer c.mutex.Unlock()
// when receiving a final offset, check that this final offset is consistent with a final offset we might have received earlier
if final && c.receivedFinalOffset && byteOffset != c.highestReceived {
return qerr.Error(qerr.StreamDataAfterTermination, fmt.Sprintf("Received inconsistent final offset for stream %d (old: %d, new: %d bytes)", c.streamID, c.highestReceived, byteOffset))
}
// if we already received a final offset, check that the offset in the STREAM frames is below the final offset
if c.receivedFinalOffset && byteOffset > c.highestReceived {
return qerr.StreamDataAfterTermination
}
if final {
c.receivedFinalOffset = true
}
if byteOffset == c.highestReceived {
return nil
}
if byteOffset <= c.highestReceived {
// a STREAM_FRAME with a higher offset was received before.
if final {
// If the current byteOffset is smaller than the offset in that STREAM_FRAME, this STREAM_FRAME contained data after the end of the stream
return qerr.StreamDataAfterTermination
}
// this is a reordered STREAM_FRAME
return nil
}
increment := byteOffset - c.highestReceived
c.highestReceived = byteOffset
if c.checkFlowControlViolation() {
return qerr.Error(qerr.FlowControlReceivedTooMuchData, fmt.Sprintf("Received %d bytes on stream %d, allowed %d bytes", byteOffset, c.streamID, c.receiveWindow))
}
return c.connection.IncrementHighestReceived(increment)
}
func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) {
c.baseFlowController.AddBytesRead(n)
c.connection.AddBytesRead(n)
}
func (c *streamFlowController) AddBytesSent(n protocol.ByteCount) {
c.baseFlowController.AddBytesSent(n)
c.connection.AddBytesSent(n)
}
func (c *streamFlowController) SendWindowSize() protocol.ByteCount {
return utils.MinByteCount(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize())
}
func (c *streamFlowController) MaybeQueueWindowUpdate() {
c.mutex.Lock()
hasWindowUpdate := !c.receivedFinalOffset && c.hasWindowUpdate()
c.mutex.Unlock()
if hasWindowUpdate {
c.queueWindowUpdate()
}
c.connection.MaybeQueueWindowUpdate()
}
func (c *streamFlowController) GetWindowUpdate() protocol.ByteCount {
// don't use defer for unlocking the mutex here, GetWindowUpdate() is called frequently and defer shows up in the profiler
c.mutex.Lock()
// if we already received the final offset for this stream, the peer won't need any additional flow control credit
if c.receivedFinalOffset {
c.mutex.Unlock()
return 0
}
oldWindowSize := c.receiveWindowSize
offset := c.baseFlowController.getWindowUpdate()
if c.receiveWindowSize > oldWindowSize { // auto-tuning enlarged the window size
c.logger.Debugf("Increasing receive flow control window for stream %d to %d kB", c.streamID, c.receiveWindowSize/(1<<10))
c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize) * protocol.ConnectionFlowControlMultiplier))
}
c.mutex.Unlock()
return offset
}

View File

@ -1,104 +0,0 @@
package handshake
import (
"crypto/cipher"
"encoding/binary"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
type sealer struct {
aead cipher.AEAD
hpEncrypter cipher.Block
// use a single slice to avoid allocations
nonceBuf []byte
hpMask []byte
// short headers protect 5 bits in the first byte, long headers only 4
is1RTT bool
}
var _ Sealer = &sealer{}
func newSealer(aead cipher.AEAD, hpEncrypter cipher.Block, is1RTT bool) Sealer {
return &sealer{
aead: aead,
nonceBuf: make([]byte, aead.NonceSize()),
is1RTT: is1RTT,
hpEncrypter: hpEncrypter,
hpMask: make([]byte, hpEncrypter.BlockSize()),
}
}
func (s *sealer) Seal(dst, src []byte, pn protocol.PacketNumber, ad []byte) []byte {
binary.BigEndian.PutUint64(s.nonceBuf[len(s.nonceBuf)-8:], uint64(pn))
// The AEAD we're using here will be the qtls.aeadAESGCM13.
// It uses the nonce provided here and XOR it with the IV.
return s.aead.Seal(dst, s.nonceBuf, src, ad)
}
func (s *sealer) EncryptHeader(sample []byte, firstByte *byte, pnBytes []byte) {
if len(sample) != s.hpEncrypter.BlockSize() {
panic("invalid sample size")
}
s.hpEncrypter.Encrypt(s.hpMask, sample)
if s.is1RTT {
*firstByte ^= s.hpMask[0] & 0x1f
} else {
*firstByte ^= s.hpMask[0] & 0xf
}
for i := range pnBytes {
pnBytes[i] ^= s.hpMask[i+1]
}
}
func (s *sealer) Overhead() int {
return s.aead.Overhead()
}
type opener struct {
aead cipher.AEAD
pnDecrypter cipher.Block
// use a single slice to avoid allocations
nonceBuf []byte
hpMask []byte
// short headers protect 5 bits in the first byte, long headers only 4
is1RTT bool
}
var _ Opener = &opener{}
func newOpener(aead cipher.AEAD, pnDecrypter cipher.Block, is1RTT bool) Opener {
return &opener{
aead: aead,
nonceBuf: make([]byte, aead.NonceSize()),
is1RTT: is1RTT,
pnDecrypter: pnDecrypter,
hpMask: make([]byte, pnDecrypter.BlockSize()),
}
}
func (o *opener) Open(dst, src []byte, pn protocol.PacketNumber, ad []byte) ([]byte, error) {
binary.BigEndian.PutUint64(o.nonceBuf[len(o.nonceBuf)-8:], uint64(pn))
// The AEAD we're using here will be the qtls.aeadAESGCM13.
// It uses the nonce provided here and XOR it with the IV.
return o.aead.Open(dst, o.nonceBuf, src, ad)
}
func (o *opener) DecryptHeader(sample []byte, firstByte *byte, pnBytes []byte) {
if len(sample) != o.pnDecrypter.BlockSize() {
panic("invalid sample size")
}
o.pnDecrypter.Encrypt(o.hpMask, sample)
if o.is1RTT {
*firstByte ^= o.hpMask[0] & 0x1f
} else {
*firstByte ^= o.hpMask[0] & 0xf
}
for i := range pnBytes {
pnBytes[i] ^= o.hpMask[i+1]
}
}

View File

@ -1,109 +0,0 @@
package handshake
import (
"encoding/asn1"
"fmt"
"net"
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
)
const (
cookiePrefixIP byte = iota
cookiePrefixString
)
// A Cookie is derived from the client address and can be used to verify the ownership of this address.
type Cookie struct {
RemoteAddr string
OriginalDestConnectionID protocol.ConnectionID
// The time that the Cookie was issued (resolution 1 second)
SentTime time.Time
}
// token is the struct that is used for ASN1 serialization and deserialization
type token struct {
RemoteAddr []byte
OriginalDestConnectionID []byte
Timestamp int64
}
// A CookieGenerator generates Cookies
type CookieGenerator struct {
cookieProtector cookieProtector
}
// NewCookieGenerator initializes a new CookieGenerator
func NewCookieGenerator() (*CookieGenerator, error) {
cookieProtector, err := newCookieProtector()
if err != nil {
return nil, err
}
return &CookieGenerator{
cookieProtector: cookieProtector,
}, nil
}
// NewToken generates a new Cookie for a given source address
func (g *CookieGenerator) NewToken(raddr net.Addr, origConnID protocol.ConnectionID) ([]byte, error) {
data, err := asn1.Marshal(token{
RemoteAddr: encodeRemoteAddr(raddr),
OriginalDestConnectionID: origConnID,
Timestamp: time.Now().Unix(),
})
if err != nil {
return nil, err
}
return g.cookieProtector.NewToken(data)
}
// DecodeToken decodes a Cookie
func (g *CookieGenerator) DecodeToken(encrypted []byte) (*Cookie, error) {
// if the client didn't send any Cookie, DecodeToken will be called with a nil-slice
if len(encrypted) == 0 {
return nil, nil
}
data, err := g.cookieProtector.DecodeToken(encrypted)
if err != nil {
return nil, err
}
t := &token{}
rest, err := asn1.Unmarshal(data, t)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, fmt.Errorf("rest when unpacking token: %d", len(rest))
}
cookie := &Cookie{
RemoteAddr: decodeRemoteAddr(t.RemoteAddr),
SentTime: time.Unix(t.Timestamp, 0),
}
if len(t.OriginalDestConnectionID) > 0 {
cookie.OriginalDestConnectionID = protocol.ConnectionID(t.OriginalDestConnectionID)
}
return cookie, nil
}
// encodeRemoteAddr encodes a remote address such that it can be saved in the Cookie
func encodeRemoteAddr(remoteAddr net.Addr) []byte {
if udpAddr, ok := remoteAddr.(*net.UDPAddr); ok {
return append([]byte{cookiePrefixIP}, udpAddr.IP...)
}
return append([]byte{cookiePrefixString}, []byte(remoteAddr.String())...)
}
// decodeRemoteAddr decodes the remote address saved in the Cookie
func decodeRemoteAddr(data []byte) string {
// data will never be empty for a Cookie that we generated. Check it to be on the safe side
if len(data) == 0 {
return ""
}
if data[0] == cookiePrefixIP {
return net.IP(data[1:]).String()
}
return string(data[1:])
}

View File

@ -1,86 +0,0 @@
package handshake
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"golang.org/x/crypto/hkdf"
)
// CookieProtector is used to create and verify a cookie
type cookieProtector interface {
// NewToken creates a new token
NewToken([]byte) ([]byte, error)
// DecodeToken decodes a token
DecodeToken([]byte) ([]byte, error)
}
const (
cookieSecretSize = 32
cookieNonceSize = 32
)
// cookieProtector is used to create and verify a cookie
type cookieProtectorImpl struct {
secret []byte
}
// newCookieProtector creates a source for source address tokens
func newCookieProtector() (cookieProtector, error) {
secret := make([]byte, cookieSecretSize)
if _, err := rand.Read(secret); err != nil {
return nil, err
}
return &cookieProtectorImpl{secret: secret}, nil
}
// NewToken encodes data into a new token.
func (s *cookieProtectorImpl) NewToken(data []byte) ([]byte, error) {
nonce := make([]byte, cookieNonceSize)
if _, err := rand.Read(nonce); err != nil {
return nil, err
}
aead, aeadNonce, err := s.createAEAD(nonce)
if err != nil {
return nil, err
}
return append(nonce, aead.Seal(nil, aeadNonce, data, nil)...), nil
}
// DecodeToken decodes a token.
func (s *cookieProtectorImpl) DecodeToken(p []byte) ([]byte, error) {
if len(p) < cookieNonceSize {
return nil, fmt.Errorf("Token too short: %d", len(p))
}
nonce := p[:cookieNonceSize]
aead, aeadNonce, err := s.createAEAD(nonce)
if err != nil {
return nil, err
}
return aead.Open(nil, aeadNonce, p[cookieNonceSize:], nil)
}
func (s *cookieProtectorImpl) createAEAD(nonce []byte) (cipher.AEAD, []byte, error) {
h := hkdf.New(sha256.New, s.secret, nonce, []byte("quic-go cookie source"))
key := make([]byte, 32) // use a 32 byte key, in order to select AES-256
if _, err := io.ReadFull(h, key); err != nil {
return nil, nil, err
}
aeadNonce := make([]byte, 12)
if _, err := io.ReadFull(h, aeadNonce); err != nil {
return nil, nil, err
}
c, err := aes.NewCipher(key)
if err != nil {
return nil, nil, err
}
aead, err := cipher.NewGCM(c)
if err != nil {
return nil, nil, err
}
return aead, aeadNonce, nil
}

View File

@ -1,537 +0,0 @@
package handshake
import (
"crypto/aes"
"crypto/tls"
"errors"
"fmt"
"io"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/marten-seemann/qtls"
)
type messageType uint8
// TLS handshake message types.
const (
typeClientHello messageType = 1
typeServerHello messageType = 2
typeEncryptedExtensions messageType = 8
typeCertificate messageType = 11
typeCertificateRequest messageType = 13
typeCertificateVerify messageType = 15
typeFinished messageType = 20
)
func (m messageType) String() string {
switch m {
case typeClientHello:
return "ClientHello"
case typeServerHello:
return "ServerHello"
case typeEncryptedExtensions:
return "EncryptedExtensions"
case typeCertificate:
return "Certificate"
case typeCertificateRequest:
return "CertificateRequest"
case typeCertificateVerify:
return "CertificateVerify"
case typeFinished:
return "Finished"
default:
return fmt.Sprintf("unknown message type: %d", m)
}
}
// ErrOpenerNotYetAvailable is returned when an opener is requested for an encryption level,
// but the corresponding opener has not yet been initialized
// This can happen when packets arrive out of order.
var ErrOpenerNotYetAvailable = errors.New("CryptoSetup: opener at this encryption level not yet available")
type cryptoSetup struct {
tlsConf *qtls.Config
conn *qtls.Conn
messageChan chan []byte
readEncLevel protocol.EncryptionLevel
writeEncLevel protocol.EncryptionLevel
handleParamsCallback func(*TransportParameters)
// There are two ways that an error can occur during the handshake:
// 1. as a return value from qtls.Handshake()
// 2. when new data is passed to the crypto setup via HandleData()
// handshakeErrChan is closed when qtls.Handshake() errors
handshakeErrChan chan struct{}
// HandleData() sends errors on the messageErrChan
messageErrChan chan error
// handshakeDone is closed as soon as the go routine running qtls.Handshake() returns
handshakeDone chan struct{}
// transport parameters are sent on the receivedTransportParams, as soon as they are received
receivedTransportParams <-chan TransportParameters
// is closed when Close() is called
closeChan chan struct{}
clientHelloWritten bool
clientHelloWrittenChan chan struct{}
initialStream io.Writer
initialOpener Opener
initialSealer Sealer
handshakeStream io.Writer
handshakeOpener Opener
handshakeSealer Sealer
opener Opener
sealer Sealer
// TODO: add a 1-RTT stream (used for session tickets)
receivedWriteKey chan struct{}
receivedReadKey chan struct{}
logger utils.Logger
perspective protocol.Perspective
}
var _ qtls.RecordLayer = &cryptoSetup{}
var _ CryptoSetup = &cryptoSetup{}
// NewCryptoSetupClient creates a new crypto setup for the client
func NewCryptoSetupClient(
initialStream io.Writer,
handshakeStream io.Writer,
origConnID protocol.ConnectionID,
connID protocol.ConnectionID,
params *TransportParameters,
handleParams func(*TransportParameters),
tlsConf *tls.Config,
initialVersion protocol.VersionNumber,
supportedVersions []protocol.VersionNumber,
currentVersion protocol.VersionNumber,
logger utils.Logger,
perspective protocol.Perspective,
) (CryptoSetup, <-chan struct{} /* ClientHello written */, error) {
extHandler, receivedTransportParams := newExtensionHandlerClient(
params,
origConnID,
initialVersion,
supportedVersions,
currentVersion,
logger,
)
cs, clientHelloWritten, err := newCryptoSetup(
initialStream,
handshakeStream,
connID,
extHandler,
receivedTransportParams,
handleParams,
tlsConf,
logger,
perspective,
)
if err != nil {
return nil, nil, err
}
cs.conn = qtls.Client(nil, cs.tlsConf)
return cs, clientHelloWritten, nil
}
// NewCryptoSetupServer creates a new crypto setup for the server
func NewCryptoSetupServer(
initialStream io.Writer,
handshakeStream io.Writer,
connID protocol.ConnectionID,
params *TransportParameters,
handleParams func(*TransportParameters),
tlsConf *tls.Config,
supportedVersions []protocol.VersionNumber,
currentVersion protocol.VersionNumber,
logger utils.Logger,
perspective protocol.Perspective,
) (CryptoSetup, error) {
extHandler, receivedTransportParams := newExtensionHandlerServer(
params,
supportedVersions,
currentVersion,
logger,
)
cs, _, err := newCryptoSetup(
initialStream,
handshakeStream,
connID,
extHandler,
receivedTransportParams,
handleParams,
tlsConf,
logger,
perspective,
)
if err != nil {
return nil, err
}
cs.conn = qtls.Server(nil, cs.tlsConf)
return cs, nil
}
func newCryptoSetup(
initialStream io.Writer,
handshakeStream io.Writer,
connID protocol.ConnectionID,
extHandler tlsExtensionHandler,
transportParamChan <-chan TransportParameters,
handleParams func(*TransportParameters),
tlsConf *tls.Config,
logger utils.Logger,
perspective protocol.Perspective,
) (*cryptoSetup, <-chan struct{} /* ClientHello written */, error) {
initialSealer, initialOpener, err := NewInitialAEAD(connID, perspective)
if err != nil {
return nil, nil, err
}
cs := &cryptoSetup{
initialStream: initialStream,
initialSealer: initialSealer,
initialOpener: initialOpener,
handshakeStream: handshakeStream,
readEncLevel: protocol.EncryptionInitial,
writeEncLevel: protocol.EncryptionInitial,
handleParamsCallback: handleParams,
receivedTransportParams: transportParamChan,
logger: logger,
perspective: perspective,
handshakeDone: make(chan struct{}),
handshakeErrChan: make(chan struct{}),
messageErrChan: make(chan error, 1),
clientHelloWrittenChan: make(chan struct{}),
messageChan: make(chan []byte, 100),
receivedReadKey: make(chan struct{}),
receivedWriteKey: make(chan struct{}),
closeChan: make(chan struct{}),
}
qtlsConf := tlsConfigToQtlsConfig(tlsConf)
qtlsConf.AlternativeRecordLayer = cs
qtlsConf.GetExtensions = extHandler.GetExtensions
qtlsConf.ReceivedExtensions = extHandler.ReceivedExtensions
cs.tlsConf = qtlsConf
return cs, cs.clientHelloWrittenChan, nil
}
func (h *cryptoSetup) RunHandshake() error {
// Handle errors that might occur when HandleData() is called.
handshakeErrChan := make(chan error, 1)
handshakeComplete := make(chan struct{})
go func() {
defer close(h.handshakeDone)
if err := h.conn.Handshake(); err != nil {
handshakeErrChan <- err
return
}
close(handshakeComplete)
}()
select {
case <-h.closeChan:
close(h.messageChan)
// wait until the Handshake() go routine has returned
<-handshakeErrChan
return errors.New("Handshake aborted")
case <-handshakeComplete: // return when the handshake is done
return nil
case err := <-handshakeErrChan:
// if handleMessageFor{server,client} are waiting for some qtls action, make them return
close(h.handshakeErrChan)
return err
case err := <-h.messageErrChan:
// If the handshake errored because of an error that occurred during HandleData(),
// that error message will be more useful than the error message generated by Handshake().
// Close the message chan that qtls is receiving messages from.
// This will make qtls.Handshake() return.
// Thereby the go routine running qtls.Handshake() will return.
close(h.messageChan)
return err
}
}
func (h *cryptoSetup) Close() error {
close(h.closeChan)
// wait until qtls.Handshake() actually returned
<-h.handshakeDone
return nil
}
// handleMessage handles a TLS handshake message.
// It is called by the crypto streams when a new message is available.
// It returns if it is done with messages on the same encryption level.
func (h *cryptoSetup) HandleMessage(data []byte, encLevel protocol.EncryptionLevel) bool /* stream finished */ {
msgType := messageType(data[0])
h.logger.Debugf("Received %s message (%d bytes, encryption level: %s)", msgType, len(data), encLevel)
if err := h.checkEncryptionLevel(msgType, encLevel); err != nil {
h.messageErrChan <- err
return false
}
h.messageChan <- data
switch h.perspective {
case protocol.PerspectiveClient:
return h.handleMessageForClient(msgType)
case protocol.PerspectiveServer:
return h.handleMessageForServer(msgType)
default:
panic("")
}
}
func (h *cryptoSetup) checkEncryptionLevel(msgType messageType, encLevel protocol.EncryptionLevel) error {
var expected protocol.EncryptionLevel
switch msgType {
case typeClientHello,
typeServerHello:
expected = protocol.EncryptionInitial
case typeEncryptedExtensions,
typeCertificate,
typeCertificateRequest,
typeCertificateVerify,
typeFinished:
expected = protocol.EncryptionHandshake
default:
return fmt.Errorf("unexpected handshake message: %d", msgType)
}
if encLevel != expected {
return fmt.Errorf("expected handshake message %s to have encryption level %s, has %s", msgType, expected, encLevel)
}
return nil
}
func (h *cryptoSetup) handleMessageForServer(msgType messageType) bool {
switch msgType {
case typeClientHello:
select {
case params := <-h.receivedTransportParams:
h.handleParamsCallback(&params)
case <-h.handshakeErrChan:
return false
}
// get the handshake write key
select {
case <-h.receivedWriteKey:
case <-h.handshakeErrChan:
return false
}
// get the 1-RTT write key
select {
case <-h.receivedWriteKey:
case <-h.handshakeErrChan:
return false
}
// get the handshake read key
select {
case <-h.receivedReadKey:
case <-h.handshakeErrChan:
return false
}
return true
case typeCertificate, typeCertificateVerify:
// nothing to do
return false
case typeFinished:
// get the 1-RTT read key
select {
case <-h.receivedReadKey:
case <-h.handshakeErrChan:
return false
}
return true
default:
panic("unexpected handshake message")
}
}
func (h *cryptoSetup) handleMessageForClient(msgType messageType) bool {
switch msgType {
case typeServerHello:
// get the handshake read key
select {
case <-h.receivedReadKey:
case <-h.handshakeErrChan:
return false
}
// get the handshake write key
select {
case <-h.receivedWriteKey:
case <-h.handshakeErrChan:
return false
}
return true
case typeEncryptedExtensions:
select {
case params := <-h.receivedTransportParams:
h.handleParamsCallback(&params)
case <-h.handshakeErrChan:
return false
}
return false
case typeCertificateRequest, typeCertificate, typeCertificateVerify:
// nothing to do
return false
case typeFinished:
// While the order of these two is not defined by the TLS spec,
// we have to do it on the same order as our TLS library does it.
// get the handshake write key
select {
case <-h.receivedWriteKey:
case <-h.handshakeErrChan:
return false
}
// get the 1-RTT read key
select {
case <-h.receivedReadKey:
case <-h.handshakeErrChan:
return false
}
return true
default:
panic("unexpected handshake message: ")
}
}
// ReadHandshakeMessage is called by TLS.
// It blocks until a new handshake message is available.
func (h *cryptoSetup) ReadHandshakeMessage() ([]byte, error) {
// TODO: add some error handling here (when the session is closed)
msg, ok := <-h.messageChan
if !ok {
return nil, errors.New("error while handling the handshake message")
}
return msg, nil
}
func (h *cryptoSetup) SetReadKey(suite *qtls.CipherSuite, trafficSecret []byte) {
key := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic key", suite.KeyLen())
iv := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic iv", suite.IVLen())
hpKey := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic hp", suite.KeyLen())
hpDecrypter, err := aes.NewCipher(hpKey)
if err != nil {
panic(fmt.Sprintf("error creating new AES cipher: %s", err))
}
switch h.readEncLevel {
case protocol.EncryptionInitial:
h.readEncLevel = protocol.EncryptionHandshake
h.handshakeOpener = newOpener(suite.AEAD(key, iv), hpDecrypter, false)
h.logger.Debugf("Installed Handshake Read keys")
case protocol.EncryptionHandshake:
h.readEncLevel = protocol.Encryption1RTT
h.opener = newOpener(suite.AEAD(key, iv), hpDecrypter, true)
h.logger.Debugf("Installed 1-RTT Read keys")
default:
panic("unexpected read encryption level")
}
h.receivedReadKey <- struct{}{}
}
func (h *cryptoSetup) SetWriteKey(suite *qtls.CipherSuite, trafficSecret []byte) {
key := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic key", suite.KeyLen())
iv := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic iv", suite.IVLen())
hpKey := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic hp", suite.KeyLen())
hpEncrypter, err := aes.NewCipher(hpKey)
if err != nil {
panic(fmt.Sprintf("error creating new AES cipher: %s", err))
}
switch h.writeEncLevel {
case protocol.EncryptionInitial:
h.writeEncLevel = protocol.EncryptionHandshake
h.handshakeSealer = newSealer(suite.AEAD(key, iv), hpEncrypter, false)
h.logger.Debugf("Installed Handshake Write keys")
case protocol.EncryptionHandshake:
h.writeEncLevel = protocol.Encryption1RTT
h.sealer = newSealer(suite.AEAD(key, iv), hpEncrypter, true)
h.logger.Debugf("Installed 1-RTT Write keys")
default:
panic("unexpected write encryption level")
}
h.receivedWriteKey <- struct{}{}
}
// WriteRecord is called when TLS writes data
func (h *cryptoSetup) WriteRecord(p []byte) (int, error) {
switch h.writeEncLevel {
case protocol.EncryptionInitial:
// assume that the first WriteRecord call contains the ClientHello
n, err := h.initialStream.Write(p)
if !h.clientHelloWritten && h.perspective == protocol.PerspectiveClient {
h.clientHelloWritten = true
close(h.clientHelloWrittenChan)
}
return n, err
case protocol.EncryptionHandshake:
return h.handshakeStream.Write(p)
default:
return 0, fmt.Errorf("unexpected write encryption level: %s", h.writeEncLevel)
}
}
func (h *cryptoSetup) GetSealer() (protocol.EncryptionLevel, Sealer) {
if h.sealer != nil {
return protocol.Encryption1RTT, h.sealer
}
if h.handshakeSealer != nil {
return protocol.EncryptionHandshake, h.handshakeSealer
}
return protocol.EncryptionInitial, h.initialSealer
}
func (h *cryptoSetup) GetSealerWithEncryptionLevel(level protocol.EncryptionLevel) (Sealer, error) {
errNoSealer := fmt.Errorf("CryptoSetup: no sealer with encryption level %s", level.String())
switch level {
case protocol.EncryptionInitial:
return h.initialSealer, nil
case protocol.EncryptionHandshake:
if h.handshakeSealer == nil {
return nil, errNoSealer
}
return h.handshakeSealer, nil
case protocol.Encryption1RTT:
if h.sealer == nil {
return nil, errNoSealer
}
return h.sealer, nil
default:
return nil, errNoSealer
}
}
func (h *cryptoSetup) GetOpener(level protocol.EncryptionLevel) (Opener, error) {
switch level {
case protocol.EncryptionInitial:
return h.initialOpener, nil
case protocol.EncryptionHandshake:
if h.handshakeOpener == nil {
return nil, ErrOpenerNotYetAvailable
}
return h.handshakeOpener, nil
case protocol.Encryption1RTT:
if h.opener == nil {
return nil, ErrOpenerNotYetAvailable
}
return h.opener, nil
default:
return nil, fmt.Errorf("CryptoSetup: no opener with encryption level %s", level)
}
}
func (h *cryptoSetup) ConnectionState() ConnectionState {
connState := h.conn.ConnectionState()
return ConnectionState{
HandshakeComplete: connState.HandshakeComplete,
ServerName: connState.ServerName,
PeerCertificates: connState.PeerCertificates,
}
}

View File

@ -1,52 +0,0 @@
package handshake
import (
"crypto"
"crypto/aes"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/marten-seemann/qtls"
)
var quicVersion1Salt = []byte{0xef, 0x4f, 0xb0, 0xab, 0xb4, 0x74, 0x70, 0xc4, 0x1b, 0xef, 0xcf, 0x80, 0x31, 0x33, 0x4f, 0xae, 0x48, 0x5e, 0x09, 0xa0}
// NewInitialAEAD creates a new AEAD for Initial encryption / decryption.
func NewInitialAEAD(connID protocol.ConnectionID, pers protocol.Perspective) (Sealer, Opener, error) {
clientSecret, serverSecret := computeSecrets(connID)
var mySecret, otherSecret []byte
if pers == protocol.PerspectiveClient {
mySecret = clientSecret
otherSecret = serverSecret
} else {
mySecret = serverSecret
otherSecret = clientSecret
}
myKey, myHPKey, myIV := computeInitialKeyAndIV(mySecret)
otherKey, otherHPKey, otherIV := computeInitialKeyAndIV(otherSecret)
encrypter := qtls.AEADAESGCM13(myKey, myIV)
hpEncrypter, err := aes.NewCipher(myHPKey)
if err != nil {
return nil, nil, err
}
decrypter := qtls.AEADAESGCM13(otherKey, otherIV)
hpDecrypter, err := aes.NewCipher(otherHPKey)
if err != nil {
return nil, nil, err
}
return newSealer(encrypter, hpEncrypter, false), newOpener(decrypter, hpDecrypter, false), nil
}
func computeSecrets(connID protocol.ConnectionID) (clientSecret, serverSecret []byte) {
initialSecret := qtls.HkdfExtract(crypto.SHA256, connID, quicVersion1Salt)
clientSecret = qtls.HkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "client in", crypto.SHA256.Size())
serverSecret = qtls.HkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "server in", crypto.SHA256.Size())
return
}
func computeInitialKeyAndIV(secret []byte) (key, hpKey, iv []byte) {
key = qtls.HkdfExpandLabel(crypto.SHA256, secret, []byte{}, "quic key", 16)
hpKey = qtls.HkdfExpandLabel(crypto.SHA256, secret, []byte{}, "quic hp", 16)
iv = qtls.HkdfExpandLabel(crypto.SHA256, secret, []byte{}, "quic iv", 12)
return
}

View File

@ -1,49 +0,0 @@
package handshake
import (
"crypto/x509"
"io"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/marten-seemann/qtls"
)
// Opener opens a packet
type Opener interface {
Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, error)
DecryptHeader(sample []byte, firstByte *byte, pnBytes []byte)
}
// Sealer seals a packet
type Sealer interface {
Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) []byte
EncryptHeader(sample []byte, firstByte *byte, pnBytes []byte)
Overhead() int
}
// A tlsExtensionHandler sends and received the QUIC TLS extension.
type tlsExtensionHandler interface {
GetExtensions(msgType uint8) []qtls.Extension
ReceivedExtensions(msgType uint8, exts []qtls.Extension) error
}
// CryptoSetup handles the handshake and protecting / unprotecting packets
type CryptoSetup interface {
RunHandshake() error
io.Closer
HandleMessage([]byte, protocol.EncryptionLevel) bool
ConnectionState() ConnectionState
GetSealer() (protocol.EncryptionLevel, Sealer)
GetSealerWithEncryptionLevel(protocol.EncryptionLevel) (Sealer, error)
GetOpener(protocol.EncryptionLevel) (Opener, error)
}
// ConnectionState records basic details about the QUIC connection.
// Warning: This API should not be considered stable and might change soon.
type ConnectionState struct {
HandshakeComplete bool // handshake is complete
ServerName string // server name requested by client, if any (server side only)
PeerCertificates []*x509.Certificate // certificate chain presented by remote peer
}

View File

@ -1,50 +0,0 @@
package handshake
import (
"crypto/tls"
"v2ray.com/core/external/github.com/marten-seemann/qtls"
)
func tlsConfigToQtlsConfig(c *tls.Config) *qtls.Config {
if c == nil {
c = &tls.Config{}
}
// QUIC requires TLS 1.3 or newer
minVersion := c.MinVersion
if minVersion < qtls.VersionTLS13 {
minVersion = qtls.VersionTLS13
}
maxVersion := c.MaxVersion
if maxVersion < qtls.VersionTLS13 {
maxVersion = qtls.VersionTLS13
}
return &qtls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
// TODO: make GetCertificate work
// GetCertificate: c.GetCertificate,
GetClientCertificate: c.GetClientCertificate,
// TODO: make GetConfigForClient work
// GetConfigForClient: c.GetConfigForClient,
VerifyPeerCertificate: c.VerifyPeerCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
MinVersion: minVersion,
MaxVersion: maxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
KeyLogWriter: c.KeyLogWriter,
}
}

View File

@ -1,90 +0,0 @@
package handshake
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
const quicTLSExtensionType = 0xffa5
type clientHelloTransportParameters struct {
InitialVersion protocol.VersionNumber
Parameters TransportParameters
}
func (p *clientHelloTransportParameters) Marshal() []byte {
const lenOffset = 4
b := &bytes.Buffer{}
utils.BigEndian.WriteUint32(b, uint32(p.InitialVersion))
b.Write([]byte{0, 0}) // length. Will be replaced later
p.Parameters.marshal(b)
data := b.Bytes()
binary.BigEndian.PutUint16(data[lenOffset:lenOffset+2], uint16(len(data)-lenOffset-2))
return data
}
func (p *clientHelloTransportParameters) Unmarshal(data []byte) error {
if len(data) < 6 {
return errors.New("transport parameter data too short")
}
p.InitialVersion = protocol.VersionNumber(binary.BigEndian.Uint32(data[:4]))
paramsLen := int(binary.BigEndian.Uint16(data[4:6]))
data = data[6:]
if len(data) != paramsLen {
return fmt.Errorf("expected transport parameters to be %d bytes long, have %d", paramsLen, len(data))
}
return p.Parameters.unmarshal(data, protocol.PerspectiveClient)
}
type encryptedExtensionsTransportParameters struct {
NegotiatedVersion protocol.VersionNumber
SupportedVersions []protocol.VersionNumber
Parameters TransportParameters
}
func (p *encryptedExtensionsTransportParameters) Marshal() []byte {
b := &bytes.Buffer{}
utils.BigEndian.WriteUint32(b, uint32(p.NegotiatedVersion))
b.WriteByte(uint8(4 * len(p.SupportedVersions)))
for _, v := range p.SupportedVersions {
utils.BigEndian.WriteUint32(b, uint32(v))
}
lenOffset := b.Len()
b.Write([]byte{0, 0}) // length. Will be replaced later
p.Parameters.marshal(b)
data := b.Bytes()
binary.BigEndian.PutUint16(data[lenOffset:lenOffset+2], uint16(len(data)-lenOffset-2))
return data
}
func (p *encryptedExtensionsTransportParameters) Unmarshal(data []byte) error {
if len(data) < 5 {
return errors.New("transport parameter data too short")
}
p.NegotiatedVersion = protocol.VersionNumber(binary.BigEndian.Uint32(data[:4]))
numVersions := int(data[4])
if numVersions%4 != 0 {
return fmt.Errorf("invalid length for version list: %d", numVersions)
}
numVersions /= 4
data = data[5:]
if len(data) < 4*numVersions+2 /*length field for the parameter list */ {
return errors.New("transport parameter data too short")
}
p.SupportedVersions = make([]protocol.VersionNumber, numVersions)
for i := 0; i < numVersions; i++ {
p.SupportedVersions[i] = protocol.VersionNumber(binary.BigEndian.Uint32(data[:4]))
data = data[4:]
}
paramsLen := int(binary.BigEndian.Uint16(data[:2]))
data = data[2:]
if len(data) != paramsLen {
return fmt.Errorf("expected transport parameters to be %d bytes long, have %d", paramsLen, len(data))
}
return p.Parameters.unmarshal(data, protocol.PerspectiveServer)
}

View File

@ -1,113 +0,0 @@
package handshake
import (
"errors"
"fmt"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/qerr"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/marten-seemann/qtls"
)
type extensionHandlerClient struct {
ourParams *TransportParameters
paramsChan chan<- TransportParameters
origConnID protocol.ConnectionID
initialVersion protocol.VersionNumber
supportedVersions []protocol.VersionNumber
version protocol.VersionNumber
logger utils.Logger
}
var _ tlsExtensionHandler = &extensionHandlerClient{}
// newExtensionHandlerClient creates a new extension handler for the client.
func newExtensionHandlerClient(
params *TransportParameters,
origConnID protocol.ConnectionID,
initialVersion protocol.VersionNumber,
supportedVersions []protocol.VersionNumber,
version protocol.VersionNumber,
logger utils.Logger,
) (tlsExtensionHandler, <-chan TransportParameters) {
// The client reads the transport parameters from the Encrypted Extensions message.
// The paramsChan is used in the session's run loop's select statement.
// We have to use an unbuffered channel here to make sure that the session actually processes the transport parameters immediately.
paramsChan := make(chan TransportParameters)
return &extensionHandlerClient{
ourParams: params,
paramsChan: paramsChan,
origConnID: origConnID,
initialVersion: initialVersion,
supportedVersions: supportedVersions,
version: version,
logger: logger,
}, paramsChan
}
func (h *extensionHandlerClient) GetExtensions(msgType uint8) []qtls.Extension {
if messageType(msgType) != typeClientHello {
return nil
}
h.logger.Debugf("Sending Transport Parameters: %s", h.ourParams)
return []qtls.Extension{{
Type: quicTLSExtensionType,
Data: (&clientHelloTransportParameters{
InitialVersion: h.initialVersion,
Parameters: *h.ourParams,
}).Marshal(),
}}
}
func (h *extensionHandlerClient) ReceivedExtensions(msgType uint8, exts []qtls.Extension) error {
if messageType(msgType) != typeEncryptedExtensions {
return nil
}
var found bool
eetp := &encryptedExtensionsTransportParameters{}
for _, ext := range exts {
if ext.Type != quicTLSExtensionType {
continue
}
if err := eetp.Unmarshal(ext.Data); err != nil {
return err
}
found = true
}
if !found {
return errors.New("EncryptedExtensions message didn't contain a QUIC extension")
}
// check that the negotiated_version is the current version
if eetp.NegotiatedVersion != h.version {
return qerr.Error(qerr.VersionNegotiationMismatch, "current version doesn't match negotiated_version")
}
// check that the current version is included in the supported versions
if !protocol.IsSupportedVersion(eetp.SupportedVersions, h.version) {
return qerr.Error(qerr.VersionNegotiationMismatch, "current version not included in the supported versions")
}
// if version negotiation was performed, check that we would have selected the current version based on the supported versions sent by the server
if h.version != h.initialVersion {
negotiatedVersion, ok := protocol.ChooseSupportedVersion(h.supportedVersions, eetp.SupportedVersions)
if !ok || h.version != negotiatedVersion {
return qerr.Error(qerr.VersionNegotiationMismatch, "would have picked a different version")
}
}
params := eetp.Parameters
// check that the server sent a stateless reset token
if len(params.StatelessResetToken) == 0 {
return errors.New("server didn't sent stateless_reset_token")
}
// check the Retry token
if !h.origConnID.Equal(params.OriginalConnectionID) {
return fmt.Errorf("expected original_connection_id to equal %s, is %s", h.origConnID, params.OriginalConnectionID)
}
h.logger.Debugf("Received Transport Parameters: %s", &params)
h.paramsChan <- params
return nil
}

View File

@ -1,86 +0,0 @@
package handshake
import (
"errors"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/qerr"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
"v2ray.com/core/external/github.com/marten-seemann/qtls"
)
type extensionHandlerServer struct {
ourParams *TransportParameters
paramsChan chan<- TransportParameters
version protocol.VersionNumber
supportedVersions []protocol.VersionNumber
logger utils.Logger
}
var _ tlsExtensionHandler = &extensionHandlerServer{}
// newExtensionHandlerServer creates a new extension handler for the server
func newExtensionHandlerServer(
params *TransportParameters,
supportedVersions []protocol.VersionNumber,
version protocol.VersionNumber,
logger utils.Logger,
) (tlsExtensionHandler, <-chan TransportParameters) {
// Processing the ClientHello is performed statelessly (and from a single go-routine).
// Therefore, we have to use a buffered chan to pass the transport parameters to that go routine.
paramsChan := make(chan TransportParameters)
return &extensionHandlerServer{
ourParams: params,
paramsChan: paramsChan,
supportedVersions: supportedVersions,
version: version,
logger: logger,
}, paramsChan
}
func (h *extensionHandlerServer) GetExtensions(msgType uint8) []qtls.Extension {
if messageType(msgType) != typeEncryptedExtensions {
return nil
}
h.logger.Debugf("Sending Transport Parameters: %s", h.ourParams)
return []qtls.Extension{{
Type: quicTLSExtensionType,
Data: (&encryptedExtensionsTransportParameters{
NegotiatedVersion: h.version,
SupportedVersions: protocol.GetGreasedVersions(h.supportedVersions),
Parameters: *h.ourParams,
}).Marshal(),
}}
}
func (h *extensionHandlerServer) ReceivedExtensions(msgType uint8, exts []qtls.Extension) error {
if messageType(msgType) != typeClientHello {
return nil
}
var found bool
chtp := &clientHelloTransportParameters{}
for _, ext := range exts {
if ext.Type != quicTLSExtensionType {
continue
}
if err := chtp.Unmarshal(ext.Data); err != nil {
return err
}
found = true
}
if !found {
return errors.New("ClientHello didn't contain a QUIC extension")
}
// perform the stateless version negotiation validation:
// make sure that we would have sent a Version Negotiation Packet if the client offered the initial version
// this is the case if and only if the initial version is not contained in the supported versions
if chtp.InitialVersion != h.version && protocol.IsSupportedVersion(h.supportedVersions, chtp.InitialVersion) {
return qerr.Error(qerr.VersionNegotiationMismatch, "Client should have used the initial version")
}
h.logger.Debugf("Received Transport Parameters: %s", &chtp.Parameters)
h.paramsChan <- chtp.Parameters
return nil
}

View File

@ -1,210 +0,0 @@
package handshake
import (
"bytes"
"errors"
"fmt"
"io"
"sort"
"time"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/protocol"
"v2ray.com/core/external/github.com/lucas-clemente/quic-go/internal/utils"
)
type transportParameterID uint16
const (
originalConnectionIDParameterID transportParameterID = 0x0
idleTimeoutParameterID transportParameterID = 0x1
statelessResetTokenParameterID transportParameterID = 0x2
maxPacketSizeParameterID transportParameterID = 0x3
initialMaxDataParameterID transportParameterID = 0x4
initialMaxStreamDataBidiLocalParameterID transportParameterID = 0x5
initialMaxStreamDataBidiRemoteParameterID transportParameterID = 0x6
initialMaxStreamDataUniParameterID transportParameterID = 0x7
initialMaxStreamsBidiParameterID transportParameterID = 0x8
initialMaxStreamsUniParameterID transportParameterID = 0x9
disableMigrationParameterID transportParameterID = 0xc
)
// TransportParameters are parameters sent to the peer during the handshake
type TransportParameters struct {
InitialMaxStreamDataBidiLocal protocol.ByteCount
InitialMaxStreamDataBidiRemote protocol.ByteCount
InitialMaxStreamDataUni protocol.ByteCount
InitialMaxData protocol.ByteCount
MaxPacketSize protocol.ByteCount
MaxUniStreams uint64
MaxBidiStreams uint64
IdleTimeout time.Duration
DisableMigration bool
StatelessResetToken []byte
OriginalConnectionID protocol.ConnectionID
}
func (p *TransportParameters) unmarshal(data []byte, sentBy protocol.Perspective) error {
// needed to check that every parameter is only sent at most once
var parameterIDs []transportParameterID
r := bytes.NewReader(data)
for r.Len() >= 4 {
paramIDInt, _ := utils.BigEndian.ReadUint16(r)
paramID := transportParameterID(paramIDInt)
paramLen, _ := utils.BigEndian.ReadUint16(r)
parameterIDs = append(parameterIDs, paramID)
switch paramID {
case initialMaxStreamDataBidiLocalParameterID,
initialMaxStreamDataBidiRemoteParameterID,
initialMaxStreamDataUniParameterID,
initialMaxDataParameterID,
initialMaxStreamsBidiParameterID,
initialMaxStreamsUniParameterID,
idleTimeoutParameterID,
maxPacketSizeParameterID:
if err := p.readNumericTransportParameter(r, paramID, int(paramLen)); err != nil {
return err
}
default:
if r.Len() < int(paramLen) {
return fmt.Errorf("remaining length (%d) smaller than parameter length (%d)", r.Len(), paramLen)
}
switch paramID {
case disableMigrationParameterID:
if paramLen != 0 {
return fmt.Errorf("wrong length for disable_migration: %d (expected empty)", paramLen)
}
p.DisableMigration = true
case statelessResetTokenParameterID:
if sentBy == protocol.PerspectiveClient {
return errors.New("client sent a stateless_reset_token")
}
if paramLen != 16 {
return fmt.Errorf("wrong length for stateless_reset_token: %d (expected 16)", paramLen)
}
b := make([]byte, 16)
r.Read(b)
p.StatelessResetToken = b
case originalConnectionIDParameterID:
if sentBy == protocol.PerspectiveClient {
return errors.New("client sent an original_connection_id")
}
p.OriginalConnectionID, _ = protocol.ReadConnectionID(r, int(paramLen))
default:
r.Seek(int64(paramLen), io.SeekCurrent)
}
}
}
// check that every transport parameter was sent at most once
sort.Slice(parameterIDs, func(i, j int) bool { return parameterIDs[i] < parameterIDs[j] })
for i := 0; i < len(parameterIDs)-1; i++ {
if parameterIDs[i] == parameterIDs[i+1] {
return fmt.Errorf("received duplicate transport parameter %#x", parameterIDs[i])
}
}
if r.Len() != 0 {
return fmt.Errorf("should have read all data. Still have %d bytes", r.Len())
}
return nil
}
func (p *TransportParameters) readNumericTransportParameter(
r *bytes.Reader,
paramID transportParameterID,
expectedLen int,
) error {
remainingLen := r.Len()
val, err := utils.ReadVarInt(r)
if err != nil {
return fmt.Errorf("error while reading transport parameter %d: %s", paramID, err)
}
if remainingLen-r.Len() != expectedLen {
return fmt.Errorf("inconsistent transport parameter length for %d", paramID)
}
switch paramID {
case initialMaxStreamDataBidiLocalParameterID:
p.InitialMaxStreamDataBidiLocal = protocol.ByteCount(val)
case initialMaxStreamDataBidiRemoteParameterID:
p.InitialMaxStreamDataBidiRemote = protocol.ByteCount(val)
case initialMaxStreamDataUniParameterID:
p.InitialMaxStreamDataUni = protocol.ByteCount(val)
case initialMaxDataParameterID:
p.InitialMaxData = protocol.ByteCount(val)
case initialMaxStreamsBidiParameterID:
p.MaxBidiStreams = val
case initialMaxStreamsUniParameterID:
p.MaxUniStreams = val
case idleTimeoutParameterID:
p.IdleTimeout = utils.MaxDuration(protocol.MinRemoteIdleTimeout, time.Duration(val)*time.Second)
case maxPacketSizeParameterID:
if val < 1200 {
return fmt.Errorf("invalid value for max_packet_size: %d (minimum 1200)", val)
}
p.MaxPacketSize = protocol.ByteCount(val)
default:
return fmt.Errorf("TransportParameter BUG: transport parameter %d not found", paramID)
}
return nil
}
func (p *TransportParameters) marshal(b *bytes.Buffer) {
// initial_max_stream_data_bidi_local
utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamDataBidiLocalParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxStreamDataBidiLocal))))
utils.WriteVarInt(b, uint64(p.InitialMaxStreamDataBidiLocal))
// initial_max_stream_data_bidi_remote
utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamDataBidiRemoteParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxStreamDataBidiRemote))))
utils.WriteVarInt(b, uint64(p.InitialMaxStreamDataBidiRemote))
// initial_max_stream_data_uni
utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamDataUniParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxStreamDataUni))))
utils.WriteVarInt(b, uint64(p.InitialMaxStreamDataUni))
// initial_max_data
utils.BigEndian.WriteUint16(b, uint16(initialMaxDataParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxData))))
utils.WriteVarInt(b, uint64(p.InitialMaxData))
// initial_max_bidi_streams
utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamsBidiParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(p.MaxBidiStreams)))
utils.WriteVarInt(b, p.MaxBidiStreams)
// initial_max_uni_streams
utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamsUniParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(p.MaxUniStreams)))
utils.WriteVarInt(b, p.MaxUniStreams)
// idle_timeout
utils.BigEndian.WriteUint16(b, uint16(idleTimeoutParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.IdleTimeout/time.Second))))
utils.WriteVarInt(b, uint64(p.IdleTimeout/time.Second))
// max_packet_size
utils.BigEndian.WriteUint16(b, uint16(maxPacketSizeParameterID))
utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(protocol.MaxReceivePacketSize))))
utils.WriteVarInt(b, uint64(protocol.MaxReceivePacketSize))
// disable_migration
if p.DisableMigration {
utils.BigEndian.WriteUint16(b, uint16(disableMigrationParameterID))
utils.BigEndian.WriteUint16(b, 0)
}
if len(p.StatelessResetToken) > 0 {
utils.BigEndian.WriteUint16(b, uint16(statelessResetTokenParameterID))
utils.BigEndian.WriteUint16(b, uint16(len(p.StatelessResetToken))) // should always be 16 bytes
b.Write(p.StatelessResetToken)
}
// original_connection_id
if p.OriginalConnectionID.Len() > 0 {
utils.BigEndian.WriteUint16(b, uint16(originalConnectionIDParameterID))
utils.BigEndian.WriteUint16(b, uint16(p.OriginalConnectionID.Len()))
b.Write(p.OriginalConnectionID.Bytes())
}
}
// String returns a string representation, intended for logging.
func (p *TransportParameters) String() string {
return fmt.Sprintf("&handshake.TransportParameters{OriginalConnectionID: %s, InitialMaxStreamDataBidiLocal: %#x, InitialMaxStreamDataBidiRemote: %#x, InitialMaxStreamDataUni: %#x, InitialMaxData: %#x, MaxBidiStreams: %d, MaxUniStreams: %d, IdleTimeout: %s}", p.OriginalConnectionID, p.InitialMaxStreamDataBidiLocal, p.InitialMaxStreamDataBidiRemote, p.InitialMaxStreamDataUni, p.InitialMaxData, p.MaxBidiStreams, p.MaxUniStreams, p.IdleTimeout)
}

View File

@ -1,36 +0,0 @@
#!/bin/bash
# Mockgen refuses to generate mocks for internal packages.
# This script copies the internal directory and renames it to internalpackage.
# It also creages a public alias for private types.
# It then creates a mock for this public (alias) type.
# Afterwards, it corrects the import paths (replaces internalpackage back to internal).
TEMP_DIR=$(mktemp -d)
mkdir -p $TEMP_DIR/src/github.com/lucas-clemente/quic-go/internalpackage
# uppercase the name of the interface (only has an effect for private interfaces)
INTERFACE_NAME="$(tr '[:lower:]' '[:upper:]' <<< ${4:0:1})${4:1}"
PACKAGE_NAME=`echo $3 | sed 's/.*\///'`
cp -r $GOPATH/src/github.com/lucas-clemente/quic-go/internal/* $TEMP_DIR/src/github.com/lucas-clemente/quic-go/internalpackage
find $TEMP_DIR -type f -name "*.go" -exec sed -i '' 's/internal/internalpackage/g' {} \;
export GOPATH="$TEMP_DIR:$GOPATH"
PACKAGE_PATH=${3/internal/internalpackage}
# if we're mocking a private interface, we need to add a public alias
if [ "$INTERFACE_NAME" != "$4" ]; then
# create a public alias for the interface, so that mockgen can process it
echo -e "package $PACKAGE_NAME\n" > $TEMP_DIR/src/$PACKAGE_PATH/mockgen_interface.go
echo "type $INTERFACE_NAME = $4" >> $TEMP_DIR/src/$PACKAGE_PATH/mockgen_interface.go
fi
mockgen -package $1 -self_package $1 -destination $2 $PACKAGE_PATH $INTERFACE_NAME
sed -i '' 's/internalpackage/internal/g' $2
# mockgen imports the package we're generating a mock for
sed -i '' "s/$1\.//g" $2
goimports -w $2
rm -r "$TEMP_DIR"

View File

@ -1,69 +0,0 @@
package protocol
import (
"bytes"
"crypto/rand"
"fmt"
"io"
)
// A ConnectionID in QUIC
type ConnectionID []byte
const maxConnectionIDLen = 18
// GenerateConnectionID generates a connection ID using cryptographic random
func GenerateConnectionID(len int) (ConnectionID, error) {
b := make([]byte, len)
if _, err := rand.Read(b); err != nil {
return nil, err
}
return ConnectionID(b), nil
}
// GenerateConnectionIDForInitial generates a connection ID for the Initial packet.
// It uses a length randomly chosen between 8 and 18 bytes.
func GenerateConnectionIDForInitial() (ConnectionID, error) {
r := make([]byte, 1)
if _, err := rand.Read(r); err != nil {
return nil, err
}
len := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1)
return GenerateConnectionID(len)
}
// ReadConnectionID reads a connection ID of length len from the given io.Reader.
// It returns io.EOF if there are not enough bytes to read.
func ReadConnectionID(r io.Reader, len int) (ConnectionID, error) {
if len == 0 {
return nil, nil
}
c := make(ConnectionID, len)
_, err := io.ReadFull(r, c)
if err == io.ErrUnexpectedEOF {
return nil, io.EOF
}
return c, err
}
// Equal says if two connection IDs are equal
func (c ConnectionID) Equal(other ConnectionID) bool {
return bytes.Equal(c, other)
}
// Len returns the length of the connection ID in bytes
func (c ConnectionID) Len() int {
return len(c)
}
// Bytes returns the byte representation
func (c ConnectionID) Bytes() []byte {
return []byte(c)
}
func (c ConnectionID) String() string {
if c.Len() == 0 {
return "(empty)"
}
return fmt.Sprintf("%#x", c.Bytes())
}

View File

@ -1,28 +0,0 @@
package protocol
// EncryptionLevel is the encryption level
// Default value is Unencrypted
type EncryptionLevel int
const (
// EncryptionUnspecified is a not specified encryption level
EncryptionUnspecified EncryptionLevel = iota
// EncryptionInitial is the Initial encryption level
EncryptionInitial
// EncryptionHandshake is the Handshake encryption level
EncryptionHandshake
// Encryption1RTT is the 1-RTT encryption level
Encryption1RTT
)
func (e EncryptionLevel) String() string {
switch e {
case EncryptionInitial:
return "Initial"
case EncryptionHandshake:
return "Handshake"
case Encryption1RTT:
return "1-RTT"
}
return "unknown"
}

View File

@ -1,85 +0,0 @@
package protocol
// PacketNumberLen is the length of the packet number in bytes
type PacketNumberLen uint8
const (
// PacketNumberLenInvalid is the default value and not a valid length for a packet number
PacketNumberLenInvalid PacketNumberLen = 0
// PacketNumberLen1 is a packet number length of 1 byte
PacketNumberLen1 PacketNumberLen = 1
// PacketNumberLen2 is a packet number length of 2 bytes
PacketNumberLen2 PacketNumberLen = 2
// PacketNumberLen3 is a packet number length of 3 bytes
PacketNumberLen3 PacketNumberLen = 3
// PacketNumberLen4 is a packet number length of 4 bytes
PacketNumberLen4 PacketNumberLen = 4
)
// DecodePacketNumber calculates the packet number based on the received packet number, its length and the last seen packet number
func DecodePacketNumber(
packetNumberLength PacketNumberLen,
lastPacketNumber PacketNumber,
wirePacketNumber PacketNumber,
) PacketNumber {
var epochDelta PacketNumber
switch packetNumberLength {
case PacketNumberLen1:
epochDelta = PacketNumber(1) << 8
case PacketNumberLen2:
epochDelta = PacketNumber(1) << 16
case PacketNumberLen3:
epochDelta = PacketNumber(1) << 24
case PacketNumberLen4:
epochDelta = PacketNumber(1) << 32
}
epoch := lastPacketNumber & ^(epochDelta - 1)
prevEpochBegin := epoch - epochDelta
nextEpochBegin := epoch + epochDelta
return closestTo(
lastPacketNumber+1,
epoch+wirePacketNumber,
closestTo(lastPacketNumber+1, prevEpochBegin+wirePacketNumber, nextEpochBegin+wirePacketNumber),
)
}
func closestTo(target, a, b PacketNumber) PacketNumber {
if delta(target, a) < delta(target, b) {
return a
}
return b
}
func delta(a, b PacketNumber) PacketNumber {
if a < b {
return b - a
}
return a - b
}
// GetPacketNumberLengthForHeader gets the length of the packet number for the public header
// it never chooses a PacketNumberLen of 1 byte, since this is too short under certain circumstances
func GetPacketNumberLengthForHeader(packetNumber, leastUnacked PacketNumber) PacketNumberLen {
diff := uint64(packetNumber - leastUnacked)
if diff < (1 << (16 - 1)) {
return PacketNumberLen2
}
if diff < (1 << (24 - 1)) {
return PacketNumberLen3
}
return PacketNumberLen4
}
// GetPacketNumberLength gets the minimum length needed to fully represent the packet number
func GetPacketNumberLength(packetNumber PacketNumber) PacketNumberLen {
if packetNumber < (1 << (uint8(PacketNumberLen1) * 8)) {
return PacketNumberLen1
}
if packetNumber < (1 << (uint8(PacketNumberLen2) * 8)) {
return PacketNumberLen2
}
if packetNumber < (1 << (uint8(PacketNumberLen3) * 8)) {
return PacketNumberLen3
}
return PacketNumberLen4
}

View File

@ -1,119 +0,0 @@
package protocol
import "time"
// MaxPacketSizeIPv4 is the maximum packet size that we use for sending IPv4 packets.
const MaxPacketSizeIPv4 = 1252
// MaxPacketSizeIPv6 is the maximum packet size that we use for sending IPv6 packets.
const MaxPacketSizeIPv6 = 1232
const defaultMaxCongestionWindowPackets = 1000
// DefaultMaxCongestionWindow is the default for the max congestion window
const DefaultMaxCongestionWindow ByteCount = defaultMaxCongestionWindowPackets * DefaultTCPMSS
// InitialCongestionWindow is the initial congestion window in QUIC packets
const InitialCongestionWindow ByteCount = 32 * DefaultTCPMSS
// MaxUndecryptablePackets limits the number of undecryptable packets that are queued in the session.
const MaxUndecryptablePackets = 10
// ConnectionFlowControlMultiplier determines how much larger the connection flow control windows needs to be relative to any stream's flow control window
// This is the value that Chromium is using
const ConnectionFlowControlMultiplier = 1.5
// InitialMaxStreamData is the stream-level flow control window for receiving data
const InitialMaxStreamData = (1 << 10) * 512 // 512 kb
// InitialMaxData is the connection-level flow control window for receiving data
const InitialMaxData = ConnectionFlowControlMultiplier * InitialMaxStreamData
// DefaultMaxReceiveStreamFlowControlWindow is the default maximum stream-level flow control window for receiving data, for the server
const DefaultMaxReceiveStreamFlowControlWindow = 6 * (1 << 20) // 6 MB
// DefaultMaxReceiveConnectionFlowControlWindow is the default connection-level flow control window for receiving data, for the server
const DefaultMaxReceiveConnectionFlowControlWindow = 15 * (1 << 20) // 12 MB
// WindowUpdateThreshold is the fraction of the receive window that has to be consumed before an higher offset is advertised to the client
const WindowUpdateThreshold = 0.25
// DefaultMaxIncomingStreams is the maximum number of streams that a peer may open
const DefaultMaxIncomingStreams = 100
// DefaultMaxIncomingUniStreams is the maximum number of unidirectional streams that a peer may open
const DefaultMaxIncomingUniStreams = 100
// MaxSessionUnprocessedPackets is the max number of packets stored in each session that are not yet processed.
const MaxSessionUnprocessedPackets = defaultMaxCongestionWindowPackets
// SkipPacketAveragePeriodLength is the average period length in which one packet number is skipped to prevent an Optimistic ACK attack
const SkipPacketAveragePeriodLength PacketNumber = 500
// MaxTrackedSkippedPackets is the maximum number of skipped packet numbers the SentPacketHandler keep track of for Optimistic ACK attack mitigation
const MaxTrackedSkippedPackets = 10
// MaxAcceptQueueSize is the maximum number of sessions that the server queues for accepting.
// If the queue is full, new connection attempts will be rejected.
const MaxAcceptQueueSize = 32
// CookieExpiryTime is the valid time of a cookie
const CookieExpiryTime = 24 * time.Hour
// MaxOutstandingSentPackets is maximum number of packets saved for retransmission.
// When reached, it imposes a soft limit on sending new packets:
// Sending ACKs and retransmission is still allowed, but now new regular packets can be sent.
const MaxOutstandingSentPackets = 2 * defaultMaxCongestionWindowPackets
// MaxTrackedSentPackets is maximum number of sent packets saved for retransmission.
// When reached, no more packets will be sent.
// This value *must* be larger than MaxOutstandingSentPackets.
const MaxTrackedSentPackets = MaxOutstandingSentPackets * 5 / 4
// MaxTrackedReceivedAckRanges is the maximum number of ACK ranges tracked
const MaxTrackedReceivedAckRanges = defaultMaxCongestionWindowPackets
// MaxNonRetransmittableAcks is the maximum number of packets containing an ACK, but no retransmittable frames, that we send in a row
const MaxNonRetransmittableAcks = 19
// MaxStreamFrameSorterGaps is the maximum number of gaps between received StreamFrames
// prevents DoS attacks against the streamFrameSorter
const MaxStreamFrameSorterGaps = 1000
// MaxCryptoStreamOffset is the maximum offset allowed on any of the crypto streams.
// This limits the size of the ClientHello and Certificates that can be received.
const MaxCryptoStreamOffset = 16 * (1 << 10)
// MinRemoteIdleTimeout is the minimum value that we accept for the remote idle timeout
const MinRemoteIdleTimeout = 5 * time.Second
// DefaultIdleTimeout is the default idle timeout
const DefaultIdleTimeout = 30 * time.Second
// DefaultHandshakeTimeout is the default timeout for a connection until the crypto handshake succeeds.
const DefaultHandshakeTimeout = 10 * time.Second
// RetiredConnectionIDDeleteTimeout is the time we keep closed sessions around in order to retransmit the CONNECTION_CLOSE.
// after this time all information about the old connection will be deleted
const RetiredConnectionIDDeleteTimeout = 5 * time.Second
// MinStreamFrameSize is the minimum size that has to be left in a packet, so that we add another STREAM frame.
// This avoids splitting up STREAM frames into small pieces, which has 2 advantages:
// 1. it reduces the framing overhead
// 2. it reduces the head-of-line blocking, when a packet is lost
const MinStreamFrameSize ByteCount = 128
// MaxAckFrameSize is the maximum size for an ACK frame that we write
// Due to the varint encoding, ACK frames can grow (almost) indefinitely large.
// The MaxAckFrameSize should be large enough to encode many ACK range,
// but must ensure that a maximum size ACK frame fits into one packet.
const MaxAckFrameSize ByteCount = 1000
// MinPacingDelay is the minimum duration that is used for packet pacing
// If the packet packing frequency is higher, multiple packets might be sent at once.
// Example: For a packet pacing delay of 20 microseconds, we would send 5 packets at once, wait for 100 microseconds, and so forth.
const MinPacingDelay time.Duration = 100 * time.Microsecond
// DefaultConnectionIDLength is the connection ID length that is used for multiplexed connections
// if no other value is configured.
const DefaultConnectionIDLength = 4

View File

@ -1,26 +0,0 @@
package protocol
// Perspective determines if we're acting as a server or a client
type Perspective int
// the perspectives
const (
PerspectiveServer Perspective = 1
PerspectiveClient Perspective = 2
)
// Opposite returns the perspective of the peer
func (p Perspective) Opposite() Perspective {
return 3 - p
}
func (p Perspective) String() string {
switch p {
case PerspectiveServer:
return "Server"
case PerspectiveClient:
return "Client"
default:
return "invalid perspective"
}
}

View File

@ -1,65 +0,0 @@
package protocol
import (
"fmt"
)
// A PacketNumber in QUIC
type PacketNumber uint64
// The PacketType is the Long Header Type
type PacketType uint8
const (
// PacketTypeInitial is the packet type of an Initial packet
PacketTypeInitial PacketType = 1 + iota
// PacketTypeRetry is the packet type of a Retry packet
PacketTypeRetry
// PacketTypeHandshake is the packet type of a Handshake packet
PacketTypeHandshake
// PacketType0RTT is the packet type of a 0-RTT packet
PacketType0RTT
)
func (t PacketType) String() string {
switch t {
case PacketTypeInitial:
return "Initial"
case PacketTypeRetry:
return "Retry"
case PacketTypeHandshake:
return "Handshake"
case PacketType0RTT:
return "0-RTT Protected"
default:
return fmt.Sprintf("unknown packet type: %d", t)
}
}
// A ByteCount in QUIC
type ByteCount uint64
// MaxByteCount is the maximum value of a ByteCount
const MaxByteCount = ByteCount(1<<62 - 1)
// An ApplicationErrorCode is an application-defined error code.
type ApplicationErrorCode uint16
// MaxReceivePacketSize maximum packet size of any QUIC packet, based on
// ethernet's max size, minus the IP and UDP headers. IPv6 has a 40 byte header,
// UDP adds an additional 8 bytes. This is a total overhead of 48 bytes.
// Ethernet's max packet size is 1500 bytes, 1500 - 48 = 1452.
const MaxReceivePacketSize ByteCount = 1452 - 64
// DefaultTCPMSS is the default maximum packet size used in the Linux TCP implementation.
// Used in QUIC for congestion window computations in bytes.
const DefaultTCPMSS ByteCount = 1460
// MinInitialPacketSize is the minimum size an Initial packet is required to have.
const MinInitialPacketSize = 1200
// MinStatelessResetSize is the minimum size of a stateless reset packet
const MinStatelessResetSize = 1 /* first byte */ + 22 /* random bytes */ + 16 /* token */
// MinConnectionIDLenInitial is the minimum length of the destination connection ID on an Initial packet.
const MinConnectionIDLenInitial = 8

View File

@ -1,67 +0,0 @@
package protocol
// A StreamID in QUIC
type StreamID uint64
// StreamType encodes if this is a unidirectional or bidirectional stream
type StreamType uint8
const (
// StreamTypeUni is a unidirectional stream
StreamTypeUni StreamType = iota
// StreamTypeBidi is a bidirectional stream
StreamTypeBidi
)
// InitiatedBy says if the stream was initiated by the client or by the server
func (s StreamID) InitiatedBy() Perspective {
if s%2 == 0 {
return PerspectiveClient
}
return PerspectiveServer
}
//Type says if this is a unidirectional or bidirectional stream
func (s StreamID) Type() StreamType {
if s%4 >= 2 {
return StreamTypeUni
}
return StreamTypeBidi
}
// StreamNum returns how many streams in total are below this
// Example: for stream 9 it returns 3 (i.e. streams 1, 5 and 9)
func (s StreamID) StreamNum() uint64 {
return uint64(s/4) + 1
}
// MaxStreamID is the highest stream ID that a peer is allowed to open,
// when it is allowed to open numStreams.
func MaxStreamID(stype StreamType, numStreams uint64, pers Perspective) StreamID {
if numStreams == 0 {
return 0
}
var first StreamID
switch stype {
case StreamTypeBidi:
switch pers {
case PerspectiveClient:
first = 0
case PerspectiveServer:
first = 1
}
case StreamTypeUni:
switch pers {
case PerspectiveClient:
first = 2
case PerspectiveServer:
first = 3
}
}
return first + 4*StreamID(numStreams-1)
}
// FirstStream returns the first valid stream ID
func FirstStream(stype StreamType, pers Perspective) StreamID {
return MaxStreamID(stype, 1, pers)
}

View File

@ -1,117 +0,0 @@
package protocol
import (
"crypto/rand"
"encoding/binary"
"fmt"
"math"
)
// VersionNumber is a version number as int
type VersionNumber uint32
// gQUIC version range as defined in the wiki: https://github.com/quicwg/base-drafts/wiki/QUIC-Versions
const (
gquicVersion0 = 0x51303030
maxGquicVersion = 0x51303439
)
// The version numbers, making grepping easier
const (
VersionTLS VersionNumber = 0x51474fff
VersionWhatever VersionNumber = 1 // for when the version doesn't matter
VersionUnknown VersionNumber = math.MaxUint32
)
// SupportedVersions lists the versions that the server supports
// must be in sorted descending order
var SupportedVersions = []VersionNumber{VersionTLS}
// IsValidVersion says if the version is known to quic-go
func IsValidVersion(v VersionNumber) bool {
return v == VersionTLS || IsSupportedVersion(SupportedVersions, v)
}
func (vn VersionNumber) String() string {
switch vn {
case VersionWhatever:
return "whatever"
case VersionUnknown:
return "unknown"
case VersionTLS:
return "TLS dev version (WIP)"
default:
if vn.isGQUIC() {
return fmt.Sprintf("gQUIC %d", vn.toGQUICVersion())
}
return fmt.Sprintf("%#x", uint32(vn))
}
}
// ToAltSvc returns the representation of the version for the H2 Alt-Svc parameters
func (vn VersionNumber) ToAltSvc() string {
return fmt.Sprintf("%d", vn)
}
func (vn VersionNumber) isGQUIC() bool {
return vn > gquicVersion0 && vn <= maxGquicVersion
}
func (vn VersionNumber) toGQUICVersion() int {
return int(10*(vn-gquicVersion0)/0x100) + int(vn%0x10)
}
// IsSupportedVersion returns true if the server supports this version
func IsSupportedVersion(supported []VersionNumber, v VersionNumber) bool {
for _, t := range supported {
if t == v {
return true
}
}
return false
}
// ChooseSupportedVersion finds the best version in the overlap of ours and theirs
// ours is a slice of versions that we support, sorted by our preference (descending)
// theirs is a slice of versions offered by the peer. The order does not matter.
// The bool returned indicates if a matching version was found.
func ChooseSupportedVersion(ours, theirs []VersionNumber) (VersionNumber, bool) {
for _, ourVer := range ours {
for _, theirVer := range theirs {
if ourVer == theirVer {
return ourVer, true
}
}
}
return 0, false
}
// generateReservedVersion generates a reserved version number (v & 0x0f0f0f0f == 0x0a0a0a0a)
func generateReservedVersion() VersionNumber {
b := make([]byte, 4)
_, _ = rand.Read(b) // ignore the error here. Failure to read random data doesn't break anything
return VersionNumber((binary.BigEndian.Uint32(b) | 0x0a0a0a0a) & 0xfafafafa)
}
// GetGreasedVersions adds one reserved version number to a slice of version numbers, at a random position
func GetGreasedVersions(supported []VersionNumber) []VersionNumber {
b := make([]byte, 1)
_, _ = rand.Read(b) // ignore the error here. Failure to read random data doesn't break anything
randPos := int(b[0]) % (len(supported) + 1)
greased := make([]VersionNumber, len(supported)+1)
copy(greased, supported[:randPos])
greased[randPos] = generateReservedVersion()
copy(greased[randPos+1:], supported[randPos:])
return greased
}
// StripGreasedVersions strips all greased versions from a slice of versions
func StripGreasedVersions(versions []VersionNumber) []VersionNumber {
realVersions := make([]VersionNumber, 0, len(versions))
for _, v := range versions {
if v&0x0f0f0f0f != 0x0a0a0a0a {
realVersions = append(realVersions, v)
}
}
return realVersions
}

View File

@ -1,193 +0,0 @@
package qerr
// The error codes defined by QUIC
// Remember to run `go generate ./...` whenever the error codes change.
//go:generate stringer -type=ErrorCode
const (
InternalError ErrorCode = 1
// There were data frames after the a fin or reset.
StreamDataAfterTermination ErrorCode = 2
// Control frame is malformed.
InvalidPacketHeader ErrorCode = 3
// Frame data is malformed.
InvalidFrameData ErrorCode = 4
// The packet contained no payload.
MissingPayload ErrorCode = 48
// FEC data is malformed.
InvalidFecData ErrorCode = 5
// STREAM frame data is malformed.
InvalidStreamData ErrorCode = 46
// STREAM frame data overlaps with buffered data.
OverlappingStreamData ErrorCode = 87
// Received STREAM frame data is not encrypted.
UnencryptedStreamData ErrorCode = 61
// Attempt to send unencrypted STREAM frame.
AttemptToSendUnencryptedStreamData ErrorCode = 88
// FEC frame data is not encrypted.
UnencryptedFecData ErrorCode = 77
// RST_STREAM frame data is malformed.
InvalidRstStreamData ErrorCode = 6
// CONNECTION_CLOSE frame data is malformed.
InvalidConnectionCloseData ErrorCode = 7
// GOAWAY frame data is malformed.
InvalidGoawayData ErrorCode = 8
// WINDOW_UPDATE frame data is malformed.
InvalidWindowUpdateData ErrorCode = 57
// BLOCKED frame data is malformed.
InvalidBlockedData ErrorCode = 58
// STOP_WAITING frame data is malformed.
InvalidStopWaitingData ErrorCode = 60
// PATH_CLOSE frame data is malformed.
InvalidPathCloseData ErrorCode = 78
// ACK frame data is malformed.
InvalidAckData ErrorCode = 9
// Version negotiation packet is malformed.
InvalidVersionNegotiationPacket ErrorCode = 10
// Public RST packet is malformed.
InvalidPublicRstPacket ErrorCode = 11
// There was an error decrypting.
DecryptionFailure ErrorCode = 12
// There was an error encrypting.
EncryptionFailure ErrorCode = 13
// The packet exceeded kMaxPacketSize.
PacketTooLarge ErrorCode = 14
// The peer is going away. May be a client or server.
PeerGoingAway ErrorCode = 16
// A stream ID was invalid.
InvalidStreamID ErrorCode = 17
// A priority was invalid.
InvalidPriority ErrorCode = 49
// Too many streams already open.
TooManyOpenStreams ErrorCode = 18
// The peer created too many available streams.
TooManyAvailableStreams ErrorCode = 76
// Received public reset for this connection.
PublicReset ErrorCode = 19
// Invalid protocol version.
InvalidVersion ErrorCode = 20
// The Header ID for a stream was too far from the previous.
InvalidHeaderID ErrorCode = 22
// Negotiable parameter received during handshake had invalid value.
InvalidNegotiatedValue ErrorCode = 23
// There was an error decompressing data.
DecompressionFailure ErrorCode = 24
// The connection timed out due to no network activity.
NetworkIdleTimeout ErrorCode = 25
// The connection timed out waiting for the handshake to complete.
HandshakeTimeout ErrorCode = 67
// There was an error encountered migrating addresses.
ErrorMigratingAddress ErrorCode = 26
// There was an error encountered migrating port only.
ErrorMigratingPort ErrorCode = 86
// There was an error while writing to the socket.
PacketWriteError ErrorCode = 27
// There was an error while reading from the socket.
PacketReadError ErrorCode = 51
// We received a STREAM_FRAME with no data and no fin flag set.
EmptyStreamFrameNoFin ErrorCode = 50
// We received invalid data on the headers stream.
InvalidHeadersStreamData ErrorCode = 56
// Invalid data on the headers stream received because of decompression
// failure.
HeadersStreamDataDecompressFailure ErrorCode = 97
// The peer received too much data, violating flow control.
FlowControlReceivedTooMuchData ErrorCode = 59
// The peer sent too much data, violating flow control.
FlowControlSentTooMuchData ErrorCode = 63
// The peer received an invalid flow control window.
FlowControlInvalidWindow ErrorCode = 64
// The connection has been IP pooled into an existing connection.
ConnectionIPPooled ErrorCode = 62
// The connection has too many outstanding sent packets.
TooManyOutstandingSentPackets ErrorCode = 68
// The connection has too many outstanding received packets.
TooManyOutstandingReceivedPackets ErrorCode = 69
// The quic connection has been cancelled.
ConnectionCancelled ErrorCode = 70
// Disabled QUIC because of high packet loss rate.
BadPacketLossRate ErrorCode = 71
// Disabled QUIC because of too many PUBLIC_RESETs post handshake.
PublicResetsPostHandshake ErrorCode = 73
// Disabled QUIC because of too many timeouts with streams open.
TimeoutsWithOpenStreams ErrorCode = 74
// Closed because we failed to serialize a packet.
FailedToSerializePacket ErrorCode = 75
// QUIC timed out after too many RTOs.
TooManyRtos ErrorCode = 85
// Crypto errors.
// Hanshake failed.
HandshakeFailed ErrorCode = 28
// Handshake message contained out of order tags.
CryptoTagsOutOfOrder ErrorCode = 29
// Handshake message contained too many entries.
CryptoTooManyEntries ErrorCode = 30
// Handshake message contained an invalid value length.
CryptoInvalidValueLength ErrorCode = 31
// A crypto message was received after the handshake was complete.
CryptoMessageAfterHandshakeComplete ErrorCode = 32
// A crypto message was received with an illegal message tag.
InvalidCryptoMessageType ErrorCode = 33
// A crypto message was received with an illegal parameter.
InvalidCryptoMessageParameter ErrorCode = 34
// An invalid channel id signature was supplied.
InvalidChannelIDSignature ErrorCode = 52
// A crypto message was received with a mandatory parameter missing.
CryptoMessageParameterNotFound ErrorCode = 35
// A crypto message was received with a parameter that has no overlap
// with the local parameter.
CryptoMessageParameterNoOverlap ErrorCode = 36
// A crypto message was received that contained a parameter with too few
// values.
CryptoMessageIndexNotFound ErrorCode = 37
// An internal error occurred in crypto processing.
CryptoInternalError ErrorCode = 38
// A crypto handshake message specified an unsupported version.
CryptoVersionNotSupported ErrorCode = 39
// A crypto handshake message resulted in a stateless reject.
CryptoHandshakeStatelessReject ErrorCode = 72
// There was no intersection between the crypto primitives supported by the
// peer and ourselves.
CryptoNoSupport ErrorCode = 40
// The server rejected our client hello messages too many times.
CryptoTooManyRejects ErrorCode = 41
// The client rejected the server's certificate chain or signature.
ProofInvalid ErrorCode = 42
// A crypto message was received with a duplicate tag.
CryptoDuplicateTag ErrorCode = 43
// A crypto message was received with the wrong encryption level (i.e. it
// should have been encrypted but was not.)
CryptoEncryptionLevelIncorrect ErrorCode = 44
// The server config for a server has expired.
CryptoServerConfigExpired ErrorCode = 45
// We failed to setup the symmetric keys for a connection.
CryptoSymmetricKeySetupFailed ErrorCode = 53
// A handshake message arrived, but we are still validating the
// previous handshake message.
CryptoMessageWhileValidatingClientHello ErrorCode = 54
// A server config update arrived before the handshake is complete.
CryptoUpdateBeforeHandshakeComplete ErrorCode = 65
// This connection involved a version negotiation which appears to have been
// tampered with.
VersionNegotiationMismatch ErrorCode = 55
// Multipath is not enabled, but a packet with multipath flag on is received.
BadMultipathFlag ErrorCode = 79
// IP address changed causing connection close.
IPAddressChanged ErrorCode = 80
// Connection migration errors.
// Network changed, but connection had no migratable streams.
ConnectionMigrationNoMigratableStreams ErrorCode = 81
// Connection changed networks too many times.
ConnectionMigrationTooManyChanges ErrorCode = 82
// Connection migration was attempted, but there was no new network to
// migrate to.
ConnectionMigrationNoNewNetwork ErrorCode = 83
// Network changed, but connection had one or more non-migratable streams.
ConnectionMigrationNonMigratableStream ErrorCode = 84
)

View File

@ -1,46 +0,0 @@
// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT.
package qerr
import "strconv"
const (
_ErrorCode_name_0 = "InternalErrorStreamDataAfterTerminationInvalidPacketHeaderInvalidFrameDataInvalidFecDataInvalidRstStreamDataInvalidConnectionCloseDataInvalidGoawayDataInvalidAckDataInvalidVersionNegotiationPacketInvalidPublicRstPacketDecryptionFailureEncryptionFailurePacketTooLarge"
_ErrorCode_name_1 = "PeerGoingAwayInvalidStreamIDTooManyOpenStreamsPublicResetInvalidVersion"
_ErrorCode_name_2 = "InvalidHeaderIDInvalidNegotiatedValueDecompressionFailureNetworkIdleTimeoutErrorMigratingAddressPacketWriteErrorHandshakeFailedCryptoTagsOutOfOrderCryptoTooManyEntriesCryptoInvalidValueLengthCryptoMessageAfterHandshakeCompleteInvalidCryptoMessageTypeInvalidCryptoMessageParameterCryptoMessageParameterNotFoundCryptoMessageParameterNoOverlapCryptoMessageIndexNotFoundCryptoInternalErrorCryptoVersionNotSupportedCryptoNoSupportCryptoTooManyRejectsProofInvalidCryptoDuplicateTagCryptoEncryptionLevelIncorrectCryptoServerConfigExpiredInvalidStreamData"
_ErrorCode_name_3 = "MissingPayloadInvalidPriorityEmptyStreamFrameNoFinPacketReadErrorInvalidChannelIDSignatureCryptoSymmetricKeySetupFailedCryptoMessageWhileValidatingClientHelloVersionNegotiationMismatchInvalidHeadersStreamDataInvalidWindowUpdateDataInvalidBlockedDataFlowControlReceivedTooMuchDataInvalidStopWaitingDataUnencryptedStreamDataConnectionIPPooledFlowControlSentTooMuchDataFlowControlInvalidWindowCryptoUpdateBeforeHandshakeComplete"
_ErrorCode_name_4 = "HandshakeTimeoutTooManyOutstandingSentPacketsTooManyOutstandingReceivedPacketsConnectionCancelledBadPacketLossRateCryptoHandshakeStatelessRejectPublicResetsPostHandshakeTimeoutsWithOpenStreamsFailedToSerializePacketTooManyAvailableStreamsUnencryptedFecDataInvalidPathCloseDataBadMultipathFlagIPAddressChangedConnectionMigrationNoMigratableStreamsConnectionMigrationTooManyChangesConnectionMigrationNoNewNetworkConnectionMigrationNonMigratableStreamTooManyRtosErrorMigratingPortOverlappingStreamDataAttemptToSendUnencryptedStreamData"
_ErrorCode_name_5 = "HeadersStreamDataDecompressFailure"
)
var (
_ErrorCode_index_0 = [...]uint16{0, 13, 39, 58, 74, 88, 108, 134, 151, 165, 196, 218, 235, 252, 266}
_ErrorCode_index_1 = [...]uint8{0, 13, 28, 46, 57, 71}
_ErrorCode_index_2 = [...]uint16{0, 15, 37, 57, 75, 96, 112, 127, 147, 167, 191, 226, 250, 279, 309, 340, 366, 385, 410, 425, 445, 457, 475, 505, 530, 547}
_ErrorCode_index_3 = [...]uint16{0, 14, 29, 50, 65, 90, 119, 158, 184, 208, 231, 249, 279, 301, 322, 340, 366, 390, 425}
_ErrorCode_index_4 = [...]uint16{0, 16, 45, 78, 97, 114, 144, 169, 192, 215, 238, 256, 276, 292, 308, 346, 379, 410, 448, 459, 477, 498, 532}
)
func (i ErrorCode) String() string {
switch {
case 1 <= i && i <= 14:
i -= 1
return _ErrorCode_name_0[_ErrorCode_index_0[i]:_ErrorCode_index_0[i+1]]
case 16 <= i && i <= 20:
i -= 16
return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
case 22 <= i && i <= 46:
i -= 22
return _ErrorCode_name_2[_ErrorCode_index_2[i]:_ErrorCode_index_2[i+1]]
case 48 <= i && i <= 65:
i -= 48
return _ErrorCode_name_3[_ErrorCode_index_3[i]:_ErrorCode_index_3[i+1]]
case 67 <= i && i <= 88:
i -= 67
return _ErrorCode_name_4[_ErrorCode_index_4[i]:_ErrorCode_index_4[i+1]]
case i == 97:
return _ErrorCode_name_5
default:
return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

View File

@ -1,53 +0,0 @@
package qerr
import (
"fmt"
)
// ErrorCode can be used as a normal error without reason.
type ErrorCode uint16
func (e ErrorCode) Error() string {
return e.String()
}
// A QuicError consists of an error code plus a error reason
type QuicError struct {
ErrorCode ErrorCode
ErrorMessage string
}
// Error creates a new QuicError instance
func Error(errorCode ErrorCode, errorMessage string) *QuicError {
return &QuicError{
ErrorCode: errorCode,
ErrorMessage: errorMessage,
}
}
func (e *QuicError) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorCode.String(), e.ErrorMessage)
}
// Timeout says if this error is a timeout.
func (e *QuicError) Timeout() bool {
switch e.ErrorCode {
case NetworkIdleTimeout,
HandshakeTimeout,
TimeoutsWithOpenStreams:
return true
}
return false
}
// ToQuicError converts an arbitrary error to a QuicError. It leaves QuicErrors
// unchanged, and properly handles `ErrorCode`s.
func ToQuicError(err error) *QuicError {
switch e := err.(type) {
case *QuicError:
return e
case ErrorCode:
return Error(e, "")
}
return Error(InternalError, err.Error())
}

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC0DCCAbgCCQCmiwJpSoekpDANBgkqhkiG9w0BAQsFADAqMRMwEQYDVQQKDApx
dWljLWdvIENBMRMwEQYDVQQLDApxdWljLWdvIENBMB4XDTE4MTIwODA2NDIyMVoX
DTI4MTIwNTA2NDIyMVowKjETMBEGA1UECgwKcXVpYy1nbyBDQTETMBEGA1UECwwK
cXVpYy1nbyBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN5MxI09
i01xRON732BFIuxO2SGjA9jYkvUvNXK886gifp2BfWLcOW1DHkXxBnhWMqfpcIWM
GviF4G2Mp0HEJDMe+4LBxje/1e2WA+nzQlIZD6LaDi98nXJaAcCMM4a64Vm0i8Z3
+4c+O93+5TekPn507nl7QA1IaEEtoek7w7wDw4ZF3ET+nns2HwVpV/ugfuYOQbTJ
8Np+zO8EfPMTUjEpKdl4bp/yqcouWD+oIhoxmx1V+LxshcpSwtzHIAi6gjHUDCEe
bk5Y2GBT4VR5WKmNGvlfe9L0Gn0ZLJoeXDshrunF0xEmSv8MxlHcKH/u4IHiO+6x
+5sdslqY7uEPEhkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAhvXUMiatkgsnoRHc
UobKraGttETivxvtKpc48o1TSkR+kCKbMnygmrvc5niEqc9iDg8JI6HjBKJ3/hfA
uKdyiR8cQNcQRgJ/3FVx0n3KGDUbHJSuIQzFvXom2ZPdlAHFqAT+8AVrz42v8gct
gyiGdFCSNisDbevOiRHuJtZ0m8YsGgtfU48wqGOaSSsRz4mYD6kqBFd0+Ja3/EGv
vl24L5xMCy1zGGl6wKPa7TT7ok4TfD1YmIXOfmWYop6cTLwePLj1nHrLi0AlsSn1
2pFlosc9/qEbO5drqNoxUZfeF0L9RUSuArHRSO779dW/AmOtFdK3yaBGqflg0r7p
lYombA==
-----END CERTIFICATE-----

View File

@ -1,57 +0,0 @@
package testdata
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"io/ioutil"
"path"
"runtime"
)
var certPath string
func init() {
_, filename, _, ok := runtime.Caller(0)
if !ok {
panic("Failed to get current frame")
}
certPath = path.Dir(filename)
}
// GetCertificatePaths returns the paths to certificate and key
func GetCertificatePaths() (string, string) {
return path.Join(certPath, "cert.pem"), path.Join(certPath, "priv.key")
}
// GetTLSConfig returns a tls config for quic.clemente.io
func GetTLSConfig() *tls.Config {
cert, err := tls.LoadX509KeyPair(GetCertificatePaths())
if err != nil {
panic(err)
}
return &tls.Config{
Certificates: []tls.Certificate{cert},
}
}
// GetRootCA returns an x509.CertPool containing the CA certificate
func GetRootCA() *x509.CertPool {
caCertPath := path.Join(certPath, "ca.pem")
caCertRaw, err := ioutil.ReadFile(caCertPath)
if err != nil {
panic(err)
}
p, _ := pem.Decode(caCertRaw)
if p.Type != "CERTIFICATE" {
panic("expected a certificate")
}
caCert, err := x509.ParseCertificate(p.Bytes)
if err != nil {
panic(err)
}
certPool := x509.NewCertPool()
certPool.AddCert(caCert)
return certPool
}

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC3jCCAcYCCQCV4BOv+SRo4zANBgkqhkiG9w0BAQUFADAqMRMwEQYDVQQKDApx
dWljLWdvIENBMRMwEQYDVQQLDApxdWljLWdvIENBMB4XDTE4MTIwODA2NDMwMloX
DTI4MTIwNTA2NDMwMlowODEQMA4GA1UECgwHcXVpYy1nbzEQMA4GA1UECwwHcXVp
Yy1nbzESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEAyc/hS8XHkOJaLrdPOSTZFUBVyHNSfQUX/3dEpmccPlLQLgopYZZO
W/cVhkxAfQ3e68xKkuZKfZN5Hytn5V/AOSk281BqxFxpfCcKVYqVpDZH99+jaVfG
ImPp5Y22qCnbSEwYrMTcLiK8PVa4MkpKf1KNacVlqawU+ZWI5fevAFGTtmrMJ4S+
qZY7tAaVkax+OiKWWfhLQjJCsN3IIDysTfbWao6cYKgtTfqVChEddzS7LRJVRaB+
+huUbB87tRBJbCuJX65yB7Fw77YiKoFjc5r2845fcS2Ew4+w29mbXoj7M7g6eup5
SnCydsCvyNy6VkgaSlWS0DXvxuzWshwUrwIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
AQBWgmFunf44X3/NIjNvVLeQsfGW+4L/lCi2F5tqa70Hkda+xhKACnQQGB2qCSCF
Jfxj4iKrFJ7+JB8GnribWthLuDq49PQrTI+1wKFd9c2b8DXzJLz4Onw+mPX97pZm
TflQSIxXRaFAIQuUWNTArZZEe1ESSlnaBuE5w77LMf4GMFD3P3jzSHKUyM1sF97j
gRbIt8Jw7Uyd8vlXk6m2wvO5H3hZrrhJUJH3WW13a7wLJRnff2meKU90hkLQwuxO
kyh0k/h158/r2ibiahTmQEgHs9vQaCM+HXuk5P+Tzq5Zl/n0dMFZMfkqNkD4nym/
nu7zfdwMlcBjKt9g3BGw+KE3
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAyc/hS8XHkOJaLrdPOSTZFUBVyHNSfQUX/3dEpmccPlLQLgop
YZZOW/cVhkxAfQ3e68xKkuZKfZN5Hytn5V/AOSk281BqxFxpfCcKVYqVpDZH99+j
aVfGImPp5Y22qCnbSEwYrMTcLiK8PVa4MkpKf1KNacVlqawU+ZWI5fevAFGTtmrM
J4S+qZY7tAaVkax+OiKWWfhLQjJCsN3IIDysTfbWao6cYKgtTfqVChEddzS7LRJV
RaB++huUbB87tRBJbCuJX65yB7Fw77YiKoFjc5r2845fcS2Ew4+w29mbXoj7M7g6
eup5SnCydsCvyNy6VkgaSlWS0DXvxuzWshwUrwIDAQABAoIBADunQwVO1Qqync2p
SbWueqyZc8HotL1XwBw3eQdm+yZA/GBfiJPcBhWRF7+20mkkrHwuyuxZPjOYX/ki
r3dRslQzJpcNckHQvy1/rMJUUJ9VnDhc1sTQuTR5LC46kX9rv/HC7JhFKIBKrDHF
bHURGKxCDqLxQnfA8gJEfU7cw9HnxMxmKv7qJ3O7EHYMuTQstkYsGOr60zX/C+Zm
7YA+d7nx1LpL0m2lKs70iz5MzGg+KgKyrkMWQ30gpxILBxNzzuQr7Kv/+63/3+G9
nfCGeLmwGakPFpm6/GwiABE0yGa71YNAQs18iUTZwP/ZEDw3KB2SoG8wcqWjNAd+
cUF2PgECgYEA5Xe/OZouw9h0NBo0Zut+HC0YOuUfY72Ug9Fm8bAS6wDuPiO3jIvK
J40d+ZHNp4AakfTuugiqEDJRlV7T/F2K/KHDWvXTg5ZpAC8dsZKJMxyyAp8EniYQ
vsoFWeHBfsD83rCVKLcjDB3hbQH+MSoT3lsqjZRNiNUMK13gyuX7k28CgYEA4SWF
ySRXUqUezX5D8kV5rQVYLcw6WVB3czYd7cKf8zHy4xJX0ZicyZjohknMmKCkdx+M
1mrxlqUO7EBGokM8vs87m/4rz6bjgZffpWzUmP/x1+3f3j/wIZeqNilW8NqY5nLi
tj3JxMwaesU86rOekSy27BlX4sjQ8NRs7Z2d8sECgYBKAD8kBWwVbqWy88x4cHOA
BK7ut1tTIB1YEVzgjobbULaERaJ46c/sx16mUHYBEZf///xI9Ghbxs52nFlC5qve
4xAMMoDey8/a5lbuIDKs0BE8NSoZEm+OB7qIDP0IspYZ/tprgfwEeVJshBsEoew8
Ziwn8m66tPIyvhizdk2WcwKBgH2M8RgDffaGQbESEk3N1FZZvpx7YKZhqtrCeNoX
SB7T4cAigHpPAk+hRzlref46xrvvChiftmztSm8QQNNHb15wLauFh2Taic/Ao2Sa
VcukHnbtHYPQX9Y7vx1I3ESfgdgwhKBfwF5P+wwvZRL0ax5FsxPh5hJ/LZS+wKeY
13WBAoGAXSqG3ANmCyvSLVmAXGIbr0Tuixf/a25sPrlq7Im1H1OnqLrcyxWCLV3E
6gprhG5An0Zlr/FFRxVojf0TKmtJZs9B70/6WPwVvFtBduCM1zuUuCQYU9opTJQL
ElMIP4VfjABm4tm1fqGIy1PQP0Osb6/qb2DPPJqsFiW0oRByyMA=
-----END RSA PRIVATE KEY-----

View File

@ -1,22 +0,0 @@
package utils
import "sync/atomic"
// An AtomicBool is an atomic bool
type AtomicBool struct {
v int32
}
// Set sets the value
func (a *AtomicBool) Set(value bool) {
var n int32
if value {
n = 1
}
atomic.StoreInt32(&a.v, n)
}
// Get gets the value
func (a *AtomicBool) Get() bool {
return atomic.LoadInt32(&a.v) != 0
}

View File

@ -1,217 +0,0 @@
// This file was automatically generated by genny.
// Any changes will be lost if this file is regenerated.
// see https://github.com/cheekybits/genny
package utils
// Linked list implementation from the Go standard library.
// ByteIntervalElement is an element of a linked list.
type ByteIntervalElement struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
next, prev *ByteIntervalElement
// The list to which this element belongs.
list *ByteIntervalList
// The value stored with this element.
Value ByteInterval
}
// Next returns the next list element or nil.
func (e *ByteIntervalElement) Next() *ByteIntervalElement {
if p := e.next; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// Prev returns the previous list element or nil.
func (e *ByteIntervalElement) Prev() *ByteIntervalElement {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// ByteIntervalList is a linked list of ByteIntervals.
type ByteIntervalList struct {
root ByteIntervalElement // sentinel list element, only &root, root.prev, and root.next are used
len int // current list length excluding (this) sentinel element
}
// Init initializes or clears list l.
func (l *ByteIntervalList) Init() *ByteIntervalList {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
// NewByteIntervalList returns an initialized list.
func NewByteIntervalList() *ByteIntervalList { return new(ByteIntervalList).Init() }
// Len returns the number of elements of list l.
// The complexity is O(1).
func (l *ByteIntervalList) Len() int { return l.len }
// Front returns the first element of list l or nil if the list is empty.
func (l *ByteIntervalList) Front() *ByteIntervalElement {
if l.len == 0 {
return nil
}
return l.root.next
}
// Back returns the last element of list l or nil if the list is empty.
func (l *ByteIntervalList) Back() *ByteIntervalElement {
if l.len == 0 {
return nil
}
return l.root.prev
}
// lazyInit lazily initializes a zero List value.
func (l *ByteIntervalList) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
func (l *ByteIntervalList) insert(e, at *ByteIntervalElement) *ByteIntervalElement {
n := at.next
at.next = e
e.prev = at
e.next = n
n.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
func (l *ByteIntervalList) insertValue(v ByteInterval, at *ByteIntervalElement) *ByteIntervalElement {
return l.insert(&ByteIntervalElement{Value: v}, at)
}
// remove removes e from its list, decrements l.len, and returns e.
func (l *ByteIntervalList) remove(e *ByteIntervalElement) *ByteIntervalElement {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
l.len--
return e
}
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
func (l *ByteIntervalList) Remove(e *ByteIntervalElement) ByteInterval {
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash
l.remove(e)
}
return e.Value
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
func (l *ByteIntervalList) PushFront(v ByteInterval) *ByteIntervalElement {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
func (l *ByteIntervalList) PushBack(v ByteInterval) *ByteIntervalElement {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *ByteIntervalList) InsertBefore(v ByteInterval, mark *ByteIntervalElement) *ByteIntervalElement {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark.prev)
}
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *ByteIntervalList) InsertAfter(v ByteInterval, mark *ByteIntervalElement) *ByteIntervalElement {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark)
}
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *ByteIntervalList) MoveToFront(e *ByteIntervalElement) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
l.insert(l.remove(e), &l.root)
}
// MoveToBack moves element e to the back of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *ByteIntervalList) MoveToBack(e *ByteIntervalElement) {
if e.list != l || l.root.prev == e {
return
}
// see comment in List.Remove about initialization of l
l.insert(l.remove(e), l.root.prev)
}
// MoveBefore moves element e to its new position before mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *ByteIntervalList) MoveBefore(e, mark *ByteIntervalElement) {
if e.list != l || e == mark || mark.list != l {
return
}
l.insert(l.remove(e), mark.prev)
}
// MoveAfter moves element e to its new position after mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *ByteIntervalList) MoveAfter(e, mark *ByteIntervalElement) {
if e.list != l || e == mark || mark.list != l {
return
}
l.insert(l.remove(e), mark)
}
// PushBackList inserts a copy of an other list at the back of list l.
// The lists l and other may be the same. They must not be nil.
func (l *ByteIntervalList) PushBackList(other *ByteIntervalList) {
l.lazyInit()
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
l.insertValue(e.Value, l.root.prev)
}
}
// PushFrontList inserts a copy of an other list at the front of list l.
// The lists l and other may be the same. They must not be nil.
func (l *ByteIntervalList) PushFrontList(other *ByteIntervalList) {
l.lazyInit()
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
l.insertValue(e.Value, &l.root)
}
}

View File

@ -1,17 +0,0 @@
package utils
import (
"bytes"
"io"
)
// A ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit unsigned integers.
type ByteOrder interface {
ReadUintN(b io.ByteReader, length uint8) (uint64, error)
ReadUint32(io.ByteReader) (uint32, error)
ReadUint16(io.ByteReader) (uint16, error)
WriteUintN(b *bytes.Buffer, length uint8, value uint64)
WriteUint32(*bytes.Buffer, uint32)
WriteUint16(*bytes.Buffer, uint16)
}

View File

@ -1,74 +0,0 @@
package utils
import (
"bytes"
"io"
)
// BigEndian is the big-endian implementation of ByteOrder.
var BigEndian ByteOrder = bigEndian{}
type bigEndian struct{}
var _ ByteOrder = &bigEndian{}
// ReadUintN reads N bytes
func (bigEndian) ReadUintN(b io.ByteReader, length uint8) (uint64, error) {
var res uint64
for i := uint8(0); i < length; i++ {
bt, err := b.ReadByte()
if err != nil {
return 0, err
}
res ^= uint64(bt) << ((length - 1 - i) * 8)
}
return res, nil
}
// ReadUint32 reads a uint32
func (bigEndian) ReadUint32(b io.ByteReader) (uint32, error) {
var b1, b2, b3, b4 uint8
var err error
if b4, err = b.ReadByte(); err != nil {
return 0, err
}
if b3, err = b.ReadByte(); err != nil {
return 0, err
}
if b2, err = b.ReadByte(); err != nil {
return 0, err
}
if b1, err = b.ReadByte(); err != nil {
return 0, err
}
return uint32(b1) + uint32(b2)<<8 + uint32(b3)<<16 + uint32(b4)<<24, nil
}
// ReadUint16 reads a uint16
func (bigEndian) ReadUint16(b io.ByteReader) (uint16, error) {
var b1, b2 uint8
var err error
if b2, err = b.ReadByte(); err != nil {
return 0, err
}
if b1, err = b.ReadByte(); err != nil {
return 0, err
}
return uint16(b1) + uint16(b2)<<8, nil
}
func (bigEndian) WriteUintN(b *bytes.Buffer, length uint8, i uint64) {
for j := length; j > 0; j-- {
b.WriteByte(uint8(i >> (8 * (j - 1))))
}
}
// WriteUint32 writes a uint32
func (bigEndian) WriteUint32(b *bytes.Buffer, i uint32) {
b.Write([]byte{uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i)})
}
// WriteUint16 writes a uint16
func (bigEndian) WriteUint16(b *bytes.Buffer, i uint16) {
b.Write([]byte{uint8(i >> 8), uint8(i)})
}

View File

@ -1,4 +0,0 @@
package utils
//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out byteinterval_linkedlist.go gen Item=ByteInterval
//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out packetinterval_linkedlist.go gen Item=PacketInterval

View File

@ -1,27 +0,0 @@
package utils
import (
"net/url"
"strings"
)
// HostnameFromAddr determines the hostname in an address string
func HostnameFromAddr(addr string) (string, error) {
p, err := url.Parse(addr)
if err != nil {
return "", err
}
h := p.Host
// copied from https://golang.org/src/net/http/transport.go
if hasPort(h) {
h = h[:strings.LastIndex(h, ":")]
}
return h, nil
}
// copied from https://golang.org/src/net/http/http.go
func hasPort(s string) bool {
return strings.LastIndex(s, ":") > strings.LastIndex(s, "]")
}

View File

@ -1,11 +0,0 @@
# Usage
This is the Go standard library implementation of a linked list
(https://golang.org/src/container/list/list.go), modified such that genny
(https://github.com/cheekybits/genny) can be used to generate a typed linked
list.
To generate, run
```
genny -pkg $PACKAGE -in linkedlist.go -out $OUTFILE gen Item=$TYPE
```

Some files were not shown because too many files have changed in this diff Show More