1
0
mirror of https://github.com/v2fly/v2ray-core.git synced 2024-07-03 20:45:23 +00:00
v2fly/transport/internet/kcp/kcp.go

694 lines
15 KiB
Go
Raw Normal View History

2016-06-14 21:25:06 +00:00
// Package kcp - A Fast and Reliable ARQ Protocol
//
// Acknowledgement:
// skywind3000@github for inventing the KCP protocol
// xtaci@github for translating to Golang
package kcp
import (
"encoding/binary"
2016-06-18 16:53:29 +00:00
"github.com/v2ray/v2ray-core/common/alloc"
2016-06-14 21:25:06 +00:00
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
2016-06-17 14:51:41 +00:00
IKCP_MTU_DEF = 1350
2016-06-14 21:25:06 +00:00
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// Output is a closure which captures conn and calls conn.Write
2016-06-18 14:52:02 +00:00
type Output func(buf []byte)
2016-06-14 21:25:06 +00:00
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
} else {
return b
}
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
} else {
return b
}
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// Segment defines a KCP segment
type Segment struct {
conv uint32
cmd uint32
frg uint32
wnd uint32
ts uint32
sn uint32
una uint32
resendts uint32
fastack uint32
xmit uint32
2016-06-18 16:53:29 +00:00
data *alloc.Buffer
2016-06-14 21:25:06 +00:00
}
// encode a segment into buffer
func (seg *Segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
ptr = ikcp_encode8u(ptr, uint8(seg.frg))
ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
2016-06-25 21:28:22 +00:00
ptr = ikcp_encode16u(ptr, uint16(seg.data.Len()))
2016-06-14 21:25:06 +00:00
return ptr
}
2016-06-18 17:08:02 +00:00
func (this *Segment) Release() {
this.data.Release()
this.data = nil
}
2016-06-14 21:25:06 +00:00
// NewSegment creates a KCP segment
2016-06-18 16:53:29 +00:00
func NewSegment() *Segment {
return &Segment{
data: alloc.NewSmallBuffer().Clear(),
}
2016-06-14 21:25:06 +00:00
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ts_recent, ts_lastack, ssthresh uint32
2016-06-25 20:10:17 +00:00
rx_rttvar, rx_srtt, rx_rto uint32
2016-06-14 21:25:06 +00:00
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
current, interval, ts_flush, xmit uint32
2016-06-25 21:28:22 +00:00
updated bool
2016-06-14 21:25:06 +00:00
ts_probe, probe_wait uint32
dead_link, incr uint32
2016-06-26 21:51:17 +00:00
snd_queue *SendingQueue
2016-06-18 17:08:02 +00:00
rcv_queue []*Segment
snd_buf []*Segment
2016-06-25 19:35:18 +00:00
rcv_buf *ReceivingWindow
2016-06-14 21:25:06 +00:00
acklist []uint32
2016-06-20 14:10:47 +00:00
buffer []byte
fastresend int32
congestionControl bool
output Output
2016-06-14 21:25:06 +00:00
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
2016-06-26 21:51:17 +00:00
func NewKCP(conv uint32, mtu uint32, sendingWindowSize uint32, receivingWindowSize uint32, sendingQueueSize uint32, output Output) *KCP {
2016-06-14 21:25:06 +00:00
kcp := new(KCP)
kcp.conv = conv
2016-06-25 19:35:18 +00:00
kcp.snd_wnd = sendingWindowSize
kcp.rcv_wnd = receivingWindowSize
2016-06-14 21:25:06 +00:00
kcp.rmt_wnd = IKCP_WND_RCV
2016-06-17 14:51:41 +00:00
kcp.mtu = mtu
2016-06-14 21:25:06 +00:00
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
2016-06-25 19:35:18 +00:00
kcp.rcv_buf = NewReceivingWindow(receivingWindowSize)
2016-06-26 21:51:17 +00:00
kcp.snd_queue = NewSendingQueue(sendingQueueSize)
2016-06-14 21:25:06 +00:00
return kcp
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
// merge fragment
count := 0
2016-06-18 17:08:02 +00:00
for _, seg := range kcp.rcv_queue {
2016-06-18 16:53:29 +00:00
dataLen := seg.data.Len()
if dataLen > len(buffer) {
2016-06-17 14:51:41 +00:00
break
}
2016-06-18 16:53:29 +00:00
copy(buffer, seg.data.Value)
2016-06-18 17:08:02 +00:00
seg.Release()
2016-06-18 16:53:29 +00:00
buffer = buffer[dataLen:]
n += dataLen
2016-06-14 21:25:06 +00:00
count++
}
kcp.rcv_queue = kcp.rcv_queue[count:]
2016-06-25 19:35:18 +00:00
kcp.DumpReceivingBuf()
2016-06-14 21:25:06 +00:00
return
}
2016-06-25 19:35:18 +00:00
// DumpReceivingBuf moves available data from rcv_buf -> rcv_queue
// @Private
func (kcp *KCP) DumpReceivingBuf() {
for {
seg := kcp.rcv_buf.RemoveFirst()
if seg == nil {
break
}
kcp.rcv_queue = append(kcp.rcv_queue, seg)
kcp.rcv_buf.Advance()
kcp.rcv_nxt++
}
}
2016-06-14 21:25:06 +00:00
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
2016-06-26 21:51:17 +00:00
nBytes := 0
for len(buffer) > 0 && !kcp.snd_queue.IsFull() {
2016-06-14 21:25:06 +00:00
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
2016-06-18 16:53:29 +00:00
seg := NewSegment()
seg.data.Append(buffer[:size])
2016-06-26 21:51:17 +00:00
kcp.snd_queue.Push(seg)
2016-06-14 21:25:06 +00:00
buffer = buffer[size:]
2016-06-26 21:51:17 +00:00
nBytes += size
2016-06-14 21:25:06 +00:00
}
2016-06-26 21:51:17 +00:00
return nBytes
2016-06-14 21:25:06 +00:00
}
// https://tools.ietf.org/html/rfc6298
func (kcp *KCP) update_ack(rtt int32) {
var rto uint32 = 0
if kcp.rx_srtt == 0 {
kcp.rx_srtt = uint32(rtt)
2016-06-25 20:10:17 +00:00
kcp.rx_rttvar = uint32(rtt) / 2
2016-06-14 21:25:06 +00:00
} else {
delta := rtt - int32(kcp.rx_srtt)
if delta < 0 {
delta = -delta
}
2016-06-25 20:10:17 +00:00
kcp.rx_rttvar = (3*kcp.rx_rttvar + uint32(delta)) / 4
2016-06-14 21:25:06 +00:00
kcp.rx_srtt = (7*kcp.rx_srtt + uint32(rtt)) / 8
2016-06-27 07:14:30 +00:00
if kcp.rx_srtt < kcp.interval {
kcp.rx_srtt = kcp.interval
2016-06-14 21:25:06 +00:00
}
}
2016-06-25 20:10:17 +00:00
rto = kcp.rx_srtt + _imax_(kcp.interval, 4*kcp.rx_rttvar)
2016-06-14 23:06:02 +00:00
if rto > IKCP_RTO_MAX {
rto = IKCP_RTO_MAX
}
kcp.rx_rto = rto
2016-06-14 21:25:06 +00:00
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
2016-06-18 17:08:02 +00:00
seg := kcp.snd_buf[0]
2016-06-14 21:25:06 +00:00
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
2016-06-18 17:08:02 +00:00
for k, seg := range kcp.snd_buf {
2016-06-14 21:25:06 +00:00
if sn == seg.sn {
kcp.snd_buf = append(kcp.snd_buf[:k], kcp.snd_buf[k+1:]...)
2016-06-18 17:08:02 +00:00
seg.Release()
2016-06-14 21:25:06 +00:00
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
2016-06-18 17:08:02 +00:00
for _, seg := range kcp.snd_buf {
2016-06-14 21:25:06 +00:00
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
2016-06-18 17:08:02 +00:00
for _, seg := range kcp.snd_buf {
2016-06-14 21:25:06 +00:00
if _itimediff(una, seg.sn) > 0 {
2016-06-18 17:08:02 +00:00
seg.Release()
2016-06-14 21:25:06 +00:00
count++
} else {
break
}
}
kcp.snd_buf = kcp.snd_buf[count:]
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, sn, ts)
}
func (kcp *KCP) ack_get(p int) (sn, ts uint32) {
return kcp.acklist[p*2+0], kcp.acklist[p*2+1]
}
func (kcp *KCP) parse_data(newseg *Segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
return
}
2016-06-25 19:35:18 +00:00
idx := sn - kcp.rcv_nxt
if !kcp.rcv_buf.Set(idx, newseg) {
newseg.Release()
2016-06-14 21:25:06 +00:00
}
2016-06-25 19:35:18 +00:00
kcp.DumpReceivingBuf()
2016-06-14 21:25:06 +00:00
}
// Input when you received a low level packet (eg. UDP packet), call it
func (kcp *KCP) Input(data []byte) int {
//una := kcp.snd_una
2016-06-14 21:25:06 +00:00
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var flag int
for {
2016-06-25 21:28:22 +00:00
var ts, sn, una, conv uint32
var wnd, length uint16
2016-06-14 21:25:06 +00:00
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
2016-06-25 21:28:22 +00:00
data = ikcp_decode16u(data, &length)
2016-06-14 21:25:06 +00:00
if len(data) < int(length) {
return -2
}
2016-06-27 10:16:36 +00:00
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK {
2016-06-14 21:25:06 +00:00
return -3
}
if kcp.rmt_wnd < uint32(wnd) {
kcp.rmt_wnd = uint32(wnd)
}
2016-06-27 10:16:36 +00:00
2016-06-14 21:25:06 +00:00
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
if _itimediff(kcp.current, ts) >= 0 {
kcp.update_ack(_itimediff(kcp.current, ts))
}
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
2016-06-18 16:53:29 +00:00
seg := NewSegment()
2016-06-14 21:25:06 +00:00
seg.conv = conv
seg.cmd = uint32(cmd)
seg.frg = uint32(frg)
seg.wnd = uint32(wnd)
seg.ts = ts
seg.sn = sn
seg.una = una
2016-06-18 16:53:29 +00:00
seg.data.Append(data[:length])
2016-06-14 21:25:06 +00:00
kcp.parse_data(seg)
}
}
} else {
return -3
}
data = data[length:]
}
if flag != 0 {
kcp.parse_fastack(maxack)
}
/*
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
2016-06-14 21:25:06 +00:00
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
2016-06-14 21:25:06 +00:00
}
}
}*/
2016-06-14 21:25:06 +00:00
return 0
}
// flush pending data
func (kcp *KCP) flush() {
current := kcp.current
buffer := kcp.buffer
change := 0
//lost := false
2016-06-14 21:25:06 +00:00
2016-06-25 21:28:22 +00:00
if !kcp.updated {
2016-06-14 21:25:06 +00:00
return
}
var seg Segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = uint32(kcp.rcv_nxt + kcp.rcv_wnd)
2016-06-14 21:25:06 +00:00
seg.una = kcp.rcv_nxt
// flush acknowledges
count := len(kcp.acklist) / 2
ptr := buffer
for i := 0; i < count; i++ {
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
2016-06-18 14:52:02 +00:00
kcp.output(buffer[:size])
2016-06-14 21:25:06 +00:00
ptr = buffer
}
seg.sn, seg.ts = kcp.ack_get(i)
ptr = seg.encode(ptr)
}
kcp.acklist = nil
// calculate window size
2016-06-27 09:18:14 +00:00
cwnd := _imin_(kcp.snd_una+kcp.snd_wnd, kcp.rmt_wnd)
2016-06-20 14:10:47 +00:00
if kcp.congestionControl {
2016-06-14 21:25:06 +00:00
cwnd = _imin_(kcp.cwnd, cwnd)
}
2016-06-26 21:51:17 +00:00
for !kcp.snd_queue.IsEmpty() && _itimediff(kcp.snd_nxt, cwnd) < 0 {
newseg := kcp.snd_queue.Pop()
2016-06-14 21:25:06 +00:00
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.wnd = seg.wnd
newseg.ts = current
newseg.sn = kcp.snd_nxt
newseg.una = kcp.rcv_nxt
newseg.resendts = current
newseg.fastack = 0
newseg.xmit = 0
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
}
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
// flush data segments
2016-06-18 17:08:02 +00:00
for _, segment := range kcp.snd_buf {
2016-06-14 21:25:06 +00:00
needsend := false
if segment.xmit == 0 {
needsend = true
segment.xmit++
2016-06-27 10:16:36 +00:00
segment.resendts = current + (kcp.rx_rto * 3 / 2) + kcp.interval
2016-06-14 21:25:06 +00:00
} else if _itimediff(current, segment.resendts) >= 0 {
needsend = true
segment.xmit++
kcp.xmit++
2016-06-27 10:16:36 +00:00
segment.resendts = current + (kcp.rx_rto * 3 / 2) + kcp.interval
//lost = true
2016-06-14 21:25:06 +00:00
} else if segment.fastack >= resent {
needsend = true
segment.xmit++
segment.fastack = 0
2016-06-27 10:16:36 +00:00
segment.resendts = current + (kcp.rx_rto * 3 / 2) + kcp.interval
2016-06-14 21:25:06 +00:00
change++
}
if needsend {
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
2016-06-18 16:53:29 +00:00
need := IKCP_OVERHEAD + segment.data.Len()
2016-06-14 21:25:06 +00:00
if size+need >= int(kcp.mtu) {
2016-06-18 14:52:02 +00:00
kcp.output(buffer[:size])
2016-06-14 21:25:06 +00:00
ptr = buffer
}
ptr = segment.encode(ptr)
2016-06-18 16:53:29 +00:00
copy(ptr, segment.data.Value)
ptr = ptr[segment.data.Len():]
2016-06-14 21:25:06 +00:00
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
2016-06-18 14:52:02 +00:00
kcp.output(buffer[:size])
2016-06-14 21:25:06 +00:00
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
/*
if change != 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}*/
2016-06-14 21:25:06 +00:00
// congestion control, https://tools.ietf.org/html/rfc5681
/*
if lost {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
2016-06-14 21:25:06 +00:00
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}*/
2016-06-14 21:25:06 +00:00
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update(current uint32) {
var slap int32
kcp.current = current
2016-06-25 21:28:22 +00:00
if !kcp.updated {
kcp.updated = true
2016-06-14 21:25:06 +00:00
kcp.ts_flush = kcp.current
}
slap = _itimediff(kcp.current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = kcp.current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(kcp.current, kcp.ts_flush) >= 0 {
kcp.ts_flush = kcp.current + kcp.interval
}
kcp.flush()
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check(current uint32) uint32 {
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
2016-06-25 21:28:22 +00:00
if !kcp.updated {
2016-06-14 21:25:06 +00:00
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
2016-06-18 17:08:02 +00:00
for _, seg := range kcp.snd_buf {
2016-06-14 21:25:06 +00:00
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
2016-06-25 19:35:18 +00:00
func (kcp *KCP) NoDelay(interval uint32, resend int, congestionControl bool) int {
kcp.interval = interval
2016-06-14 21:25:06 +00:00
if resend >= 0 {
kcp.fastresend = int32(resend)
}
2016-06-20 14:10:47 +00:00
kcp.congestionControl = congestionControl
2016-06-14 21:25:06 +00:00
return 0
}
// WaitSnd gets how many packet is waiting to be sent
2016-06-25 19:35:18 +00:00
func (kcp *KCP) WaitSnd() uint32 {
2016-06-26 21:51:17 +00:00
return uint32(len(kcp.snd_buf)) + kcp.snd_queue.Len()
2016-06-14 21:25:06 +00:00
}
2016-06-23 21:17:17 +00:00
func (this *KCP) ClearSendQueue() {
2016-06-26 21:51:17 +00:00
this.snd_queue.Clear()
2016-06-23 21:17:17 +00:00
for _, seg := range this.snd_buf {
seg.Release()
}
this.snd_buf = nil
}