Cloak/internal/multiplex/switchboard.go

190 lines
4.6 KiB
Go
Raw Normal View History

2018-10-05 22:44:20 +00:00
package multiplex
import (
2018-10-28 22:51:00 +00:00
"errors"
2018-10-09 20:53:55 +00:00
"log"
2018-10-05 22:44:20 +00:00
"net"
2018-10-28 21:22:38 +00:00
"sync"
"sync/atomic"
2018-10-05 22:44:20 +00:00
)
2018-10-20 20:41:01 +00:00
// switchboard is responsible for keeping the reference of TLS connections between client and server
2018-10-05 22:44:20 +00:00
type switchboard struct {
session *Session
2018-11-07 21:16:13 +00:00
*Valve
2018-10-28 22:51:00 +00:00
2018-11-07 21:16:13 +00:00
// optimum is the connEnclave with the smallest sendQueue
optimum atomic.Value // *connEnclave
2018-10-28 21:22:38 +00:00
cesM sync.RWMutex
ces []*connEnclave
2018-11-07 21:16:13 +00:00
2018-11-22 21:55:23 +00:00
/*
//debug
hM sync.Mutex
used map[uint32]bool
*/
2018-11-07 21:16:13 +00:00
}
func (sb *switchboard) getOptimum() *connEnclave {
if i := sb.optimum.Load(); i == nil {
return nil
} else {
return i.(*connEnclave)
}
}
func (sb *switchboard) setOptimum(ce *connEnclave) {
sb.optimum.Store(ce)
2018-10-05 22:44:20 +00:00
}
// Some data comes from a Stream to be sent through one of the many
// remoteConn, but which remoteConn should we use to send the data?
//
// In this case, we pick the remoteConn that has about the smallest sendQueue.
type connEnclave struct {
remoteConn net.Conn
2018-10-28 21:22:38 +00:00
sendQueue uint32
2018-10-05 22:44:20 +00:00
}
2018-11-07 21:16:13 +00:00
func makeSwitchboard(sesh *Session, valve *Valve) *switchboard {
// rates are uint64 because in the usermanager we want the bandwidth to be atomically
// operated (so that the bandwidth can change on the fly).
2018-10-05 22:44:20 +00:00
sb := &switchboard{
2018-10-28 21:22:38 +00:00
session: sesh,
2018-11-07 21:16:13 +00:00
Valve: valve,
2018-10-28 21:22:38 +00:00
ces: []*connEnclave{},
2018-11-22 21:55:23 +00:00
//debug
// used: make(map[uint32]bool),
2018-10-05 22:44:20 +00:00
}
return sb
}
2018-10-28 22:51:00 +00:00
var errNilOptimum error = errors.New("The optimal connection is nil")
2018-11-07 21:16:13 +00:00
var ErrNoRxCredit error = errors.New("No Rx credit is left")
var ErrNoTxCredit error = errors.New("No Tx credit is left")
2018-10-28 21:22:38 +00:00
func (sb *switchboard) send(data []byte) (int, error) {
2018-11-07 21:16:13 +00:00
ce := sb.getOptimum()
2018-10-28 22:51:00 +00:00
if ce == nil {
return 0, errNilOptimum
}
2018-10-28 21:22:38 +00:00
atomic.AddUint32(&ce.sendQueue, uint32(len(data)))
go sb.updateOptimum()
2018-10-20 16:03:39 +00:00
n, err := ce.remoteConn.Write(data)
2018-10-14 19:32:54 +00:00
if err != nil {
2018-11-07 21:16:13 +00:00
return n, err
2018-10-14 19:32:54 +00:00
}
2018-12-26 00:46:39 +00:00
sb.txWait(n)
2018-11-07 21:16:13 +00:00
if sb.AddTxCredit(-int64(n)) < 0 {
log.Println(ErrNoTxCredit)
defer sb.session.Close()
return n, ErrNoTxCredit
}
2018-10-28 21:22:38 +00:00
atomic.AddUint32(&ce.sendQueue, ^uint32(n-1))
go sb.updateOptimum()
return n, nil
}
2018-10-20 16:03:39 +00:00
2018-10-28 21:22:38 +00:00
func (sb *switchboard) updateOptimum() {
2018-11-07 21:16:13 +00:00
currentOpti := sb.getOptimum()
2018-10-28 21:22:38 +00:00
currentOptiQ := atomic.LoadUint32(&currentOpti.sendQueue)
sb.cesM.RLock()
for _, ce := range sb.ces {
ceQ := atomic.LoadUint32(&ce.sendQueue)
if ceQ < currentOptiQ {
currentOpti = ce
currentOptiQ = ceQ
}
2018-10-20 16:03:39 +00:00
}
2018-10-28 21:22:38 +00:00
sb.cesM.RUnlock()
2018-11-07 21:16:13 +00:00
sb.setOptimum(currentOpti)
2018-10-28 21:22:38 +00:00
}
2018-10-20 16:03:39 +00:00
2018-10-28 21:22:38 +00:00
func (sb *switchboard) addConn(conn net.Conn) {
newCe := &connEnclave{
remoteConn: conn,
sendQueue: 0,
}
sb.cesM.Lock()
sb.ces = append(sb.ces, newCe)
sb.cesM.Unlock()
2018-11-07 21:16:13 +00:00
sb.setOptimum(newCe)
2018-10-28 21:22:38 +00:00
go sb.deplex(newCe)
2018-10-05 22:44:20 +00:00
}
2018-10-28 21:22:38 +00:00
func (sb *switchboard) removeConn(closing *connEnclave) {
sb.cesM.Lock()
for i, ce := range sb.ces {
if closing == ce {
sb.ces = append(sb.ces[:i], sb.ces[i+1:]...)
break
2018-10-05 22:44:20 +00:00
}
}
2018-10-28 21:22:38 +00:00
if len(sb.ces) == 0 {
sb.session.Close()
}
2018-11-07 21:16:13 +00:00
sb.cesM.Unlock()
2018-10-28 21:22:38 +00:00
}
func (sb *switchboard) shutdown() {
for _, ce := range sb.ces {
ce.remoteConn.Close()
}
2018-10-05 22:44:20 +00:00
}
2018-10-28 21:22:38 +00:00
// deplex function costantly reads from a TCP connection, call deobfs and distribute it
// to the corresponding frame
2018-10-09 20:53:55 +00:00
func (sb *switchboard) deplex(ce *connEnclave) {
2018-10-16 20:13:19 +00:00
buf := make([]byte, 20480)
2018-10-09 20:53:55 +00:00
for {
2018-11-07 21:16:13 +00:00
n, err := sb.session.obfsedRead(ce.remoteConn, buf)
sb.rxWait(n)
2018-10-09 20:53:55 +00:00
if err != nil {
log.Println(err)
go ce.remoteConn.Close()
2018-10-28 21:22:38 +00:00
sb.removeConn(ce)
2018-10-09 20:53:55 +00:00
return
}
2018-11-07 21:16:13 +00:00
if sb.AddRxCredit(-int64(n)) < 0 {
log.Println(ErrNoRxCredit)
sb.session.Close()
return
}
2018-12-09 23:45:06 +00:00
frame, err := sb.session.deobfs(buf[:n])
if err != nil {
log.Println(err)
continue
}
2018-11-07 21:16:13 +00:00
2018-11-24 01:24:47 +00:00
// FIXME: there has been a bug in which a packet has
// a seemingly corrupted StreamID (e.g. when the largest streamID is something like 3000
// and suddently a streamID of 3358661675 is added.
// It happens once ~6 hours and the occourance is realy unstable
// I couldn't find a way to reproduce it. But I do have some clue.
// I commented out the util.genXorKeys function so that the stream headers are being
// sent in plaintext, and this bug didn't happen again. So I suspect it has to do
// with xxHash. Either it's to do with my usage of the libary or the implementation
// of the library. Maybe there's a race somewhere? I may eventually use another
// method to encrypt the headers. xxHash isn't cryptographic afterall.
2018-11-24 00:55:26 +00:00
stream := sb.session.getOrAddStream(frame.StreamID, frame.Closing == 1)
// if the frame is telling us to close a closed stream
// (this happens when ss-server and ss-local closes the stream
// simutaneously), we don't do anything
if stream != nil {
stream.writeNewFrame(frame)
2018-10-20 10:35:50 +00:00
}
2018-11-24 00:55:26 +00:00
//debug
/*
sb.hM.Lock()
if sb.used[frame.StreamID] {
log.Printf("%v lost!\n", frame.StreamID)
}
sb.used[frame.StreamID] = true
sb.hM.Unlock()
*/
2018-10-05 22:44:20 +00:00
}
}