2018-10-23 08:35:32 -04:00
|
|
|
package mux
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"io"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2021-02-16 15:31:50 -05:00
|
|
|
"github.com/v2fly/v2ray-core/v4/common"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/common/buf"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/common/errors"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/common/net"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/common/protocol"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/common/session"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/common/signal/done"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/common/task"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/proxy"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/transport"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/transport/internet"
|
|
|
|
"github.com/v2fly/v2ray-core/v4/transport/pipe"
|
2018-10-23 08:35:32 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
type ClientManager struct {
|
2019-06-29 11:43:30 -04:00
|
|
|
Enabled bool // wheather mux is enabled from user config
|
|
|
|
Picker WorkerPicker
|
2018-10-24 16:34:48 -04:00
|
|
|
}
|
|
|
|
|
2018-11-03 07:36:29 -04:00
|
|
|
func (m *ClientManager) Dispatch(ctx context.Context, link *transport.Link) error {
|
2018-11-04 02:59:34 -05:00
|
|
|
for i := 0; i < 16; i++ {
|
2018-10-24 16:34:48 -04:00
|
|
|
worker, err := m.Picker.PickAvailable()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if worker.Dispatch(ctx, link) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2018-11-04 02:59:34 -05:00
|
|
|
|
2018-11-16 09:42:18 -05:00
|
|
|
return newError("unable to find an available mux client").AtWarning()
|
2018-10-24 16:34:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
type WorkerPicker interface {
|
|
|
|
PickAvailable() (*ClientWorker, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type IncrementalWorkerPicker struct {
|
2018-10-25 03:32:03 -04:00
|
|
|
Factory ClientWorkerFactory
|
2018-10-24 16:34:48 -04:00
|
|
|
|
2018-10-23 08:35:32 -04:00
|
|
|
access sync.Mutex
|
2018-10-24 16:34:48 -04:00
|
|
|
workers []*ClientWorker
|
|
|
|
cleanupTask *task.Periodic
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (p *IncrementalWorkerPicker) cleanupFunc() error {
|
|
|
|
p.access.Lock()
|
|
|
|
defer p.access.Unlock()
|
|
|
|
|
|
|
|
if len(p.workers) == 0 {
|
|
|
|
return newError("no worker")
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
2018-10-24 16:34:48 -04:00
|
|
|
|
|
|
|
p.cleanup()
|
|
|
|
return nil
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (p *IncrementalWorkerPicker) cleanup() {
|
|
|
|
var activeWorkers []*ClientWorker
|
|
|
|
for _, w := range p.workers {
|
|
|
|
if !w.Closed() {
|
|
|
|
activeWorkers = append(activeWorkers, w)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p.workers = activeWorkers
|
|
|
|
}
|
2018-10-23 08:35:32 -04:00
|
|
|
|
2018-10-26 06:06:21 -04:00
|
|
|
func (p *IncrementalWorkerPicker) findAvailable() int {
|
|
|
|
for idx, w := range p.workers {
|
|
|
|
if !w.IsFull() {
|
|
|
|
return idx
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
2020-08-30 10:58:00 -04:00
|
|
|
func (p *IncrementalWorkerPicker) pickInternal() (*ClientWorker, bool, error) {
|
2018-10-24 16:34:48 -04:00
|
|
|
p.access.Lock()
|
|
|
|
defer p.access.Unlock()
|
|
|
|
|
2018-10-26 06:06:21 -04:00
|
|
|
idx := p.findAvailable()
|
|
|
|
if idx >= 0 {
|
|
|
|
n := len(p.workers)
|
|
|
|
if n > 1 && idx != n-1 {
|
|
|
|
p.workers[n-1], p.workers[idx] = p.workers[idx], p.workers[n-1]
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
2020-08-30 10:58:00 -04:00
|
|
|
return p.workers[idx], false, nil
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
p.cleanup()
|
|
|
|
|
2018-10-25 03:32:03 -04:00
|
|
|
worker, err := p.Factory.Create()
|
2018-10-23 08:35:32 -04:00
|
|
|
if err != nil {
|
2020-08-30 10:58:00 -04:00
|
|
|
return nil, false, err
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
2018-10-24 16:34:48 -04:00
|
|
|
p.workers = append(p.workers, worker)
|
2018-10-23 08:35:32 -04:00
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
if p.cleanupTask == nil {
|
|
|
|
p.cleanupTask = &task.Periodic{
|
|
|
|
Interval: time.Second * 30,
|
|
|
|
Execute: p.cleanupFunc,
|
|
|
|
}
|
|
|
|
}
|
2018-10-23 08:35:32 -04:00
|
|
|
|
2020-08-30 10:58:00 -04:00
|
|
|
return worker, true, nil
|
2018-10-24 16:34:48 -04:00
|
|
|
}
|
2018-10-23 08:35:32 -04:00
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (p *IncrementalWorkerPicker) PickAvailable() (*ClientWorker, error) {
|
2020-08-30 10:58:00 -04:00
|
|
|
worker, start, err := p.pickInternal()
|
2018-10-24 16:34:48 -04:00
|
|
|
if start {
|
2018-11-03 07:36:29 -04:00
|
|
|
common.Must(p.cleanupTask.Start())
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
2018-10-24 16:34:48 -04:00
|
|
|
|
|
|
|
return worker, err
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
|
2018-10-25 03:32:03 -04:00
|
|
|
type ClientWorkerFactory interface {
|
|
|
|
Create() (*ClientWorker, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type DialingWorkerFactory struct {
|
|
|
|
Proxy proxy.Outbound
|
|
|
|
Dialer internet.Dialer
|
|
|
|
Strategy ClientStrategy
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *DialingWorkerFactory) Create() (*ClientWorker, error) {
|
|
|
|
opts := []pipe.Option{pipe.WithSizeLimit(64 * 1024)}
|
|
|
|
uplinkReader, upLinkWriter := pipe.New(opts...)
|
|
|
|
downlinkReader, downlinkWriter := pipe.New(opts...)
|
|
|
|
|
2018-11-03 07:36:29 -04:00
|
|
|
c, err := NewClientWorker(transport.Link{
|
2018-10-25 03:32:03 -04:00
|
|
|
Reader: downlinkReader,
|
|
|
|
Writer: upLinkWriter,
|
|
|
|
}, f.Strategy)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
go func(p proxy.Outbound, d internet.Dialer, c common.Closable) {
|
|
|
|
ctx := session.ContextWithOutbound(context.Background(), &session.Outbound{
|
|
|
|
Target: net.TCPDestination(muxCoolAddress, muxCoolPort),
|
|
|
|
})
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
|
2018-11-03 07:36:29 -04:00
|
|
|
if err := p.Process(ctx, &transport.Link{Reader: uplinkReader, Writer: downlinkWriter}, d); err != nil {
|
2018-10-25 03:32:03 -04:00
|
|
|
errors.New("failed to handler mux client connection").Base(err).WriteToLog()
|
|
|
|
}
|
|
|
|
common.Must(c.Close())
|
|
|
|
cancel()
|
|
|
|
}(f.Proxy, f.Dialer, c.done)
|
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
type ClientStrategy struct {
|
|
|
|
MaxConcurrency uint32
|
|
|
|
MaxConnection uint32
|
|
|
|
}
|
|
|
|
|
|
|
|
type ClientWorker struct {
|
2018-10-23 08:35:32 -04:00
|
|
|
sessionManager *SessionManager
|
2018-11-03 07:36:29 -04:00
|
|
|
link transport.Link
|
2018-10-23 08:35:32 -04:00
|
|
|
done *done.Instance
|
2018-10-24 16:34:48 -04:00
|
|
|
strategy ClientStrategy
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var muxCoolAddress = net.DomainAddress("v1.mux.cool")
|
|
|
|
var muxCoolPort = net.Port(9527)
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
// NewClientWorker creates a new mux.Client.
|
2018-11-03 07:36:29 -04:00
|
|
|
func NewClientWorker(stream transport.Link, s ClientStrategy) (*ClientWorker, error) {
|
2018-10-24 16:34:48 -04:00
|
|
|
c := &ClientWorker{
|
2018-10-23 08:35:32 -04:00
|
|
|
sessionManager: NewSessionManager(),
|
2018-10-25 03:32:03 -04:00
|
|
|
link: stream,
|
|
|
|
done: done.New(),
|
|
|
|
strategy: s,
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
go c.fetchOutput()
|
|
|
|
go c.monitor()
|
2018-10-25 03:32:03 -04:00
|
|
|
|
2018-10-23 08:35:32 -04:00
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2018-10-28 04:08:43 -04:00
|
|
|
func (m *ClientWorker) TotalConnections() uint32 {
|
|
|
|
return uint32(m.sessionManager.Count())
|
|
|
|
}
|
|
|
|
|
2018-10-28 02:27:07 -04:00
|
|
|
func (m *ClientWorker) ActiveConnections() uint32 {
|
|
|
|
return uint32(m.sessionManager.Size())
|
|
|
|
}
|
|
|
|
|
2018-10-23 08:35:32 -04:00
|
|
|
// Closed returns true if this Client is closed.
|
2018-10-24 16:34:48 -04:00
|
|
|
func (m *ClientWorker) Closed() bool {
|
2018-10-23 08:35:32 -04:00
|
|
|
return m.done.Done()
|
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (m *ClientWorker) monitor() {
|
2018-10-23 08:35:32 -04:00
|
|
|
timer := time.NewTicker(time.Second * 16)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-m.done.Wait():
|
|
|
|
m.sessionManager.Close()
|
2020-10-11 07:22:46 -04:00
|
|
|
common.Close(m.link.Writer)
|
|
|
|
common.Interrupt(m.link.Reader)
|
2018-10-23 08:35:32 -04:00
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
size := m.sessionManager.Size()
|
|
|
|
if size == 0 && m.sessionManager.CloseIfNoSession() {
|
|
|
|
common.Must(m.done.Close())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeFirstPayload(reader buf.Reader, writer *Writer) error {
|
|
|
|
err := buf.CopyOnceTimeout(reader, writer, time.Millisecond*100)
|
|
|
|
if err == buf.ErrNotTimeoutReader || err == buf.ErrReadTimeout {
|
|
|
|
return writer.WriteMultiBuffer(buf.MultiBuffer{})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func fetchInput(ctx context.Context, s *Session, output buf.Writer) {
|
|
|
|
dest := session.OutboundFromContext(ctx).Target
|
|
|
|
transferType := protocol.TransferTypeStream
|
|
|
|
if dest.Network == net.Network_UDP {
|
|
|
|
transferType = protocol.TransferTypePacket
|
|
|
|
}
|
|
|
|
s.transferType = transferType
|
|
|
|
writer := NewWriter(s.ID, dest, output, transferType)
|
2020-10-11 07:22:46 -04:00
|
|
|
defer s.Close()
|
|
|
|
defer writer.Close()
|
2018-10-23 08:35:32 -04:00
|
|
|
|
|
|
|
newError("dispatching request to ", dest).WriteToLog(session.ExportIDToError(ctx))
|
|
|
|
if err := writeFirstPayload(s.input, writer); err != nil {
|
|
|
|
newError("failed to write first payload").Base(err).WriteToLog(session.ExportIDToError(ctx))
|
|
|
|
writer.hasError = true
|
2018-12-31 15:25:10 -05:00
|
|
|
common.Interrupt(s.input)
|
2018-10-23 08:35:32 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := buf.Copy(s.input, writer); err != nil {
|
|
|
|
newError("failed to fetch all input").Base(err).WriteToLog(session.ExportIDToError(ctx))
|
|
|
|
writer.hasError = true
|
2018-12-31 15:25:10 -05:00
|
|
|
common.Interrupt(s.input)
|
2018-10-23 08:35:32 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-25 03:32:03 -04:00
|
|
|
func (m *ClientWorker) IsClosing() bool {
|
2018-10-23 08:35:32 -04:00
|
|
|
sm := m.sessionManager
|
2018-10-25 03:32:03 -04:00
|
|
|
if m.strategy.MaxConnection > 0 && sm.Count() >= int(m.strategy.MaxConnection) {
|
2018-10-24 16:34:48 -04:00
|
|
|
return true
|
|
|
|
}
|
2018-10-25 03:32:03 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *ClientWorker) IsFull() bool {
|
2018-11-16 09:42:18 -05:00
|
|
|
if m.IsClosing() || m.Closed() {
|
2018-10-25 03:32:03 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
sm := m.sessionManager
|
|
|
|
if m.strategy.MaxConcurrency > 0 && sm.Size() >= int(m.strategy.MaxConcurrency) {
|
2018-10-24 16:34:48 -04:00
|
|
|
return true
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
2018-10-24 16:34:48 -04:00
|
|
|
return false
|
|
|
|
}
|
2018-10-23 08:35:32 -04:00
|
|
|
|
2018-11-03 07:36:29 -04:00
|
|
|
func (m *ClientWorker) Dispatch(ctx context.Context, link *transport.Link) bool {
|
2018-10-24 16:34:48 -04:00
|
|
|
if m.IsFull() || m.Closed() {
|
2018-10-23 08:35:32 -04:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
sm := m.sessionManager
|
2018-10-23 08:35:32 -04:00
|
|
|
s := sm.Allocate()
|
|
|
|
if s == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
s.input = link.Reader
|
|
|
|
s.output = link.Writer
|
|
|
|
go fetchInput(ctx, s, m.link.Writer)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (m *ClientWorker) handleStatueKeepAlive(meta *FrameMetadata, reader *buf.BufferedReader) error {
|
2018-10-23 08:35:32 -04:00
|
|
|
if meta.Option.Has(OptionData) {
|
2018-10-24 06:33:42 -04:00
|
|
|
return buf.Copy(NewStreamReader(reader), buf.Discard)
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (m *ClientWorker) handleStatusNew(meta *FrameMetadata, reader *buf.BufferedReader) error {
|
2018-10-23 08:35:32 -04:00
|
|
|
if meta.Option.Has(OptionData) {
|
2018-10-24 06:33:42 -04:00
|
|
|
return buf.Copy(NewStreamReader(reader), buf.Discard)
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (m *ClientWorker) handleStatusKeep(meta *FrameMetadata, reader *buf.BufferedReader) error {
|
2018-10-23 08:35:32 -04:00
|
|
|
if !meta.Option.Has(OptionData) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-24 06:33:42 -04:00
|
|
|
s, found := m.sessionManager.Get(meta.SessionID)
|
|
|
|
if !found {
|
2018-11-07 06:46:20 -05:00
|
|
|
// Notify remote peer to close this session.
|
|
|
|
closingWriter := NewResponseWriter(meta.SessionID, m.link.Writer, protocol.TransferTypeStream)
|
|
|
|
closingWriter.Close()
|
|
|
|
|
2018-10-24 06:33:42 -04:00
|
|
|
return buf.Copy(NewStreamReader(reader), buf.Discard)
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
2018-10-24 06:33:42 -04:00
|
|
|
|
|
|
|
rr := s.NewReader(reader)
|
|
|
|
err := buf.Copy(rr, s.output)
|
|
|
|
if err != nil && buf.IsWriteError(err) {
|
|
|
|
newError("failed to write to downstream. closing session ", s.ID).Base(err).WriteToLog()
|
|
|
|
|
2018-11-07 06:46:20 -05:00
|
|
|
// Notify remote peer to close this session.
|
|
|
|
closingWriter := NewResponseWriter(meta.SessionID, m.link.Writer, protocol.TransferTypeStream)
|
|
|
|
closingWriter.Close()
|
|
|
|
|
2018-10-24 06:33:42 -04:00
|
|
|
drainErr := buf.Copy(rr, buf.Discard)
|
2018-12-31 15:25:10 -05:00
|
|
|
common.Interrupt(s.input)
|
2018-10-24 06:33:42 -04:00
|
|
|
s.Close()
|
|
|
|
return drainErr
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (m *ClientWorker) handleStatusEnd(meta *FrameMetadata, reader *buf.BufferedReader) error {
|
2018-10-23 08:35:32 -04:00
|
|
|
if s, found := m.sessionManager.Get(meta.SessionID); found {
|
|
|
|
if meta.Option.Has(OptionError) {
|
2018-12-31 15:25:10 -05:00
|
|
|
common.Interrupt(s.input)
|
|
|
|
common.Interrupt(s.output)
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
s.Close()
|
|
|
|
}
|
|
|
|
if meta.Option.Has(OptionData) {
|
2018-10-24 06:33:42 -04:00
|
|
|
return buf.Copy(NewStreamReader(reader), buf.Discard)
|
2018-10-23 08:35:32 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-24 16:34:48 -04:00
|
|
|
func (m *ClientWorker) fetchOutput() {
|
2018-10-23 08:35:32 -04:00
|
|
|
defer func() {
|
|
|
|
common.Must(m.done.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
reader := &buf.BufferedReader{Reader: m.link.Reader}
|
|
|
|
|
|
|
|
var meta FrameMetadata
|
|
|
|
for {
|
|
|
|
err := meta.Unmarshal(reader)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Cause(err) != io.EOF {
|
|
|
|
newError("failed to read metadata").Base(err).WriteToLog()
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
switch meta.SessionStatus {
|
|
|
|
case SessionStatusKeepAlive:
|
|
|
|
err = m.handleStatueKeepAlive(&meta, reader)
|
|
|
|
case SessionStatusEnd:
|
|
|
|
err = m.handleStatusEnd(&meta, reader)
|
|
|
|
case SessionStatusNew:
|
|
|
|
err = m.handleStatusNew(&meta, reader)
|
|
|
|
case SessionStatusKeep:
|
|
|
|
err = m.handleStatusKeep(&meta, reader)
|
|
|
|
default:
|
|
|
|
status := meta.SessionStatus
|
|
|
|
newError("unknown status: ", status).AtError().WriteToLog()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
newError("failed to process data").Base(err).WriteToLog()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|