2019-10-30 03:34:09 +00:00
|
|
|
// hkextun.go - Tunnel setup using an established xsnet.Conn
|
2018-10-31 04:07:42 +00:00
|
|
|
|
2019-10-30 03:34:09 +00:00
|
|
|
// Copyright (c) 2017-2019 Russell Magee
|
2018-10-31 04:07:42 +00:00
|
|
|
// Licensed under the terms of the MIT license (see LICENSE.mit in this
|
|
|
|
// distribution)
|
|
|
|
//
|
|
|
|
// golang implementation by Russ Magee (rmagee_at_gmail.com)
|
|
|
|
|
2019-10-30 03:34:09 +00:00
|
|
|
package xsnet
|
2018-10-31 04:07:42 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2018-11-02 01:52:01 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2018-10-31 04:07:42 +00:00
|
|
|
"net"
|
2018-11-12 02:56:08 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2018-11-11 20:34:54 +00:00
|
|
|
"time"
|
2018-11-02 01:52:01 +00:00
|
|
|
|
2019-10-30 03:34:09 +00:00
|
|
|
"blitter.com/go/xs/logger"
|
2018-10-31 04:07:42 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
// Tunnels
|
|
|
|
// --
|
|
|
|
// 1. client is given (lport, remhost, rport) by local user
|
|
|
|
// 2. client sends [CSOTunReq:rport] to server
|
|
|
|
// client=> [CSOTunReq:rport] =>remhost
|
|
|
|
//
|
|
|
|
// remhost starts worker to receive/send data using rport
|
|
|
|
// remhost replies to client with rport to acknowledge tun is ready
|
|
|
|
// client<= [CSOTunAck:rport] <=remhost
|
|
|
|
// ... or if rhost rport refuses connection, sends
|
|
|
|
// [CSOTunRefused:rport]
|
|
|
|
//
|
|
|
|
// client starts worker to receive/send data using lport
|
|
|
|
// ... client disconnects: sends remhost [CSOTunClose:rport]
|
|
|
|
// ... or server disconnects: sends client [CSOTunClose:lport]
|
|
|
|
// server at any time sends [CSOTunRefused:rport] if daemon died
|
|
|
|
// --
|
|
|
|
|
|
|
|
// TunEndpoint [securePort:peer:dataPort]
|
|
|
|
TunEndpoint struct {
|
2018-11-12 06:46:39 +00:00
|
|
|
Rport uint16 // Names are from client's perspective
|
|
|
|
Lport uint16 // ... ie., RPort is on server, LPort is on client
|
|
|
|
Peer string //net.Addr
|
|
|
|
Died bool // set by client upon receipt of a CSOTunDisconn
|
2019-06-28 05:10:59 +00:00
|
|
|
KeepAlive uint32 // must be reset by client to keep server dial() alive
|
2018-11-12 06:46:39 +00:00
|
|
|
Ctl chan rune //See TunCtl_* consts
|
|
|
|
Data chan []byte
|
2018-10-31 04:07:42 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2018-11-12 02:56:08 +00:00
|
|
|
func (hc *Conn) CollapseAllTunnels(client bool) {
|
2018-11-12 04:12:29 +00:00
|
|
|
for k, t := range *hc.tuns {
|
2018-11-12 02:56:08 +00:00
|
|
|
var tunDst bytes.Buffer
|
|
|
|
binary.Write(&tunDst, binary.BigEndian, t.Lport)
|
|
|
|
binary.Write(&tunDst, binary.BigEndian, t.Rport)
|
|
|
|
if client {
|
|
|
|
hc.WritePacket(tunDst.Bytes(), CSOTunHangup)
|
|
|
|
} else {
|
|
|
|
hc.WritePacket(tunDst.Bytes(), CSOTunDisconn)
|
|
|
|
}
|
|
|
|
delete(*hc.tuns, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-02 01:52:01 +00:00
|
|
|
func (hc *Conn) InitTunEndpoint(lp uint16, p string /* net.Addr */, rp uint16) {
|
2019-06-28 05:10:59 +00:00
|
|
|
hc.Lock()
|
|
|
|
defer hc.Unlock()
|
2018-11-08 03:35:32 +00:00
|
|
|
if (*hc.tuns) == nil {
|
|
|
|
(*hc.tuns) = make(map[uint16]*TunEndpoint)
|
2018-10-31 04:07:42 +00:00
|
|
|
}
|
2018-11-08 03:35:32 +00:00
|
|
|
if (*hc.tuns)[rp] == nil {
|
2018-11-02 01:52:01 +00:00
|
|
|
var addrs []net.Addr
|
|
|
|
if p == "" {
|
|
|
|
addrs, _ = net.InterfaceAddrs()
|
|
|
|
p = addrs[0].String()
|
|
|
|
}
|
2018-11-08 03:35:32 +00:00
|
|
|
(*hc.tuns)[rp] = &TunEndpoint{ /*Status: CSOTunSetup,*/ Peer: p,
|
2018-11-02 05:14:01 +00:00
|
|
|
Lport: lp, Rport: rp, Data: make(chan []byte, 1),
|
|
|
|
Ctl: make(chan rune, 1)}
|
2018-11-08 03:35:32 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("InitTunEndpoint [%d:%s:%d]", lp, p, rp))
|
|
|
|
} else {
|
2018-11-12 04:12:29 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("InitTunEndpoint [reusing] %v", (*hc.tuns)[rp]))
|
2018-11-12 02:56:08 +00:00
|
|
|
if (*hc.tuns)[rp].Data == nil {
|
|
|
|
// When re-using a tunnel it will have its
|
|
|
|
// data channel removed on closure. Re-create it
|
|
|
|
(*hc.tuns)[rp].Data = make(chan []byte, 1)
|
|
|
|
}
|
2019-06-28 05:10:59 +00:00
|
|
|
(*hc.tuns)[rp].KeepAlive = 0
|
2018-11-12 02:56:08 +00:00
|
|
|
(*hc.tuns)[rp].Died = false
|
2018-10-31 04:07:42 +00:00
|
|
|
}
|
2018-11-02 01:52:01 +00:00
|
|
|
return
|
|
|
|
}
|
2018-10-31 04:07:42 +00:00
|
|
|
|
2018-11-02 01:52:01 +00:00
|
|
|
func (hc *Conn) StartClientTunnel(lport, rport uint16) {
|
|
|
|
hc.InitTunEndpoint(lport, "", rport)
|
2018-11-12 04:12:29 +00:00
|
|
|
|
2018-11-02 01:52:01 +00:00
|
|
|
go func() {
|
2018-11-12 02:56:08 +00:00
|
|
|
var wg sync.WaitGroup
|
2018-11-01 03:11:00 +00:00
|
|
|
|
2018-11-12 04:12:29 +00:00
|
|
|
for cmd := range (*hc.tuns)[rport].Ctl {
|
2018-11-12 05:05:25 +00:00
|
|
|
if cmd == 'a' {
|
2018-11-12 02:56:08 +00:00
|
|
|
l, e := net.Listen("tcp4", fmt.Sprintf(":%d", lport))
|
2018-11-02 01:52:01 +00:00
|
|
|
if e != nil {
|
2018-11-08 03:35:32 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Could not get lport %d! (%s)", lport, e))
|
2018-11-02 01:52:01 +00:00
|
|
|
} else {
|
2018-11-12 04:12:29 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Listening for client tunnel port %d", lport))
|
|
|
|
|
2018-11-08 03:35:32 +00:00
|
|
|
for {
|
2018-11-12 05:05:25 +00:00
|
|
|
c, e := l.Accept() // blocks until new conn
|
2018-11-12 02:56:08 +00:00
|
|
|
// If tunnel is being re-used, re-init it
|
|
|
|
if (*hc.tuns)[rport] == nil {
|
|
|
|
hc.InitTunEndpoint(lport, "", rport)
|
|
|
|
}
|
2018-11-08 03:35:32 +00:00
|
|
|
// ask server to dial() its side, rport
|
2018-11-12 02:56:08 +00:00
|
|
|
var tunDst bytes.Buffer
|
2018-11-02 01:52:01 +00:00
|
|
|
binary.Write(&tunDst, binary.BigEndian, lport)
|
|
|
|
binary.Write(&tunDst, binary.BigEndian, rport)
|
2018-11-08 03:35:32 +00:00
|
|
|
hc.WritePacket(tunDst.Bytes(), CSOTunSetup)
|
|
|
|
|
|
|
|
if e != nil {
|
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Accept() got error(%v), hanging up.", e))
|
|
|
|
} else {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Accepted tunnel client %v", (*hc.tuns)[rport]))
|
2018-11-08 03:35:32 +00:00
|
|
|
|
|
|
|
// outside client -> tunnel lport
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Add(1)
|
2018-11-08 03:35:32 +00:00
|
|
|
go func() {
|
|
|
|
defer func() {
|
2018-11-11 20:34:54 +00:00
|
|
|
if c.Close() != nil {
|
|
|
|
logger.LogDebug("[ClientTun] worker A: conn c already closed")
|
2018-11-12 02:56:08 +00:00
|
|
|
} else {
|
|
|
|
logger.LogDebug("[ClientTun] worker A: closed conn c")
|
2018-11-11 20:34:54 +00:00
|
|
|
}
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Done()
|
2018-11-08 03:35:32 +00:00
|
|
|
}()
|
|
|
|
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ClientTun] worker A: starting")
|
|
|
|
|
2018-11-08 03:35:32 +00:00
|
|
|
var tunDst bytes.Buffer
|
|
|
|
binary.Write(&tunDst, binary.BigEndian, lport)
|
|
|
|
binary.Write(&tunDst, binary.BigEndian, rport)
|
|
|
|
for {
|
|
|
|
rBuf := make([]byte, 1024)
|
|
|
|
//Read data from c, encrypt/write via hc to client(lport)
|
2018-11-12 04:12:29 +00:00
|
|
|
c.SetReadDeadline(time.Now().Add(200 * time.Millisecond))
|
2018-11-08 03:35:32 +00:00
|
|
|
n, e := c.Read(rBuf)
|
|
|
|
if e != nil {
|
|
|
|
if e == io.EOF {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] worker A: lport Disconnected: shutting down tunnel %v", (*hc.tuns)[rport]))
|
|
|
|
// if Died was already set, server-side already is gone.
|
2019-06-28 05:10:59 +00:00
|
|
|
if hc.TunIsAlive(rport) {
|
2018-11-12 02:56:08 +00:00
|
|
|
hc.WritePacket(tunDst.Bytes(), CSOTunHangup)
|
|
|
|
}
|
2019-06-29 05:50:58 +00:00
|
|
|
hc.ShutdownTun(rport) // FIXME: race-C
|
2018-11-12 02:56:08 +00:00
|
|
|
break
|
|
|
|
} else if strings.Contains(e.Error(), "i/o timeout") {
|
2019-06-28 05:10:59 +00:00
|
|
|
if !hc.TunIsAlive(rport) {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] worker A: timeout: Server side died, hanging up %v", (*hc.tuns)[rport]))
|
2019-06-28 05:10:59 +00:00
|
|
|
hc.ShutdownTun(rport)
|
2018-11-12 02:56:08 +00:00
|
|
|
break
|
|
|
|
}
|
2018-11-08 03:35:32 +00:00
|
|
|
} else {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] worker A: Read error from lport of tun %v\n%s", (*hc.tuns)[rport], e))
|
2019-06-28 05:10:59 +00:00
|
|
|
if hc.TunIsAlive(rport) {
|
2018-11-12 02:56:08 +00:00
|
|
|
hc.WritePacket(tunDst.Bytes(), CSOTunHangup)
|
|
|
|
}
|
2019-06-28 05:10:59 +00:00
|
|
|
hc.ShutdownTun(rport)
|
2018-11-12 02:56:08 +00:00
|
|
|
break
|
2018-11-08 03:35:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if n > 0 {
|
|
|
|
rBuf = append(tunDst.Bytes(), rBuf[:n]...)
|
|
|
|
_, de := hc.WritePacket(rBuf[:n+4], CSOTunData)
|
|
|
|
if de != nil {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] worker A: Error writing to tunnel %v, %s]\n", (*hc.tuns)[rport], de))
|
2018-11-08 03:35:32 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-11-01 03:11:00 +00:00
|
|
|
}
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ClientTun] worker A: exiting")
|
2018-11-08 03:35:32 +00:00
|
|
|
}()
|
2018-10-31 04:07:42 +00:00
|
|
|
|
2018-11-08 03:35:32 +00:00
|
|
|
// tunnel lport -> outside client (c)
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Add(1)
|
2018-11-08 03:35:32 +00:00
|
|
|
go func() {
|
|
|
|
defer func() {
|
2018-11-11 20:34:54 +00:00
|
|
|
if c.Close() != nil {
|
|
|
|
logger.LogDebug("[ClientTun] worker B: conn c already closed")
|
2018-11-12 02:56:08 +00:00
|
|
|
} else {
|
|
|
|
logger.LogDebug("[ClientTun] worker B: closed conn c")
|
2018-11-11 20:34:54 +00:00
|
|
|
}
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Done()
|
2018-11-08 03:35:32 +00:00
|
|
|
}()
|
2018-11-02 01:52:01 +00:00
|
|
|
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ClientTun] worker B: starting")
|
|
|
|
|
2018-11-08 03:35:32 +00:00
|
|
|
for {
|
2019-06-29 05:50:58 +00:00
|
|
|
bytes, ok := <-(*hc.tuns)[rport].Data // FIXME: race-C w/ShutdownTun calls
|
2018-11-08 03:35:32 +00:00
|
|
|
if ok {
|
2018-11-12 04:12:29 +00:00
|
|
|
c.SetWriteDeadline(time.Now().Add(200 * time.Millisecond))
|
2018-11-08 03:35:32 +00:00
|
|
|
_, e := c.Write(bytes)
|
|
|
|
if e != nil {
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] worker B: lport conn closed"))
|
2018-11-08 03:35:32 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
} else {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ClientTun] worker B: Channel was closed?"))
|
2018-11-08 03:35:32 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ClientTun] worker B: exiting")
|
2018-11-08 03:35:32 +00:00
|
|
|
}()
|
2018-10-31 04:07:42 +00:00
|
|
|
|
2018-11-08 03:35:32 +00:00
|
|
|
} // end Accept() worker block
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Wait()
|
2018-11-12 05:05:25 +00:00
|
|
|
|
|
|
|
// When both workers have exited due to a disconnect or other
|
|
|
|
// condition, it's safe to remove the tunnel descriptor.
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug("[ClientTun] workers exited")
|
2019-06-28 05:10:59 +00:00
|
|
|
hc.ShutdownTun(rport)
|
2018-11-08 03:35:32 +00:00
|
|
|
} // end for-accept
|
|
|
|
} // end Listen() block
|
2018-11-02 01:52:01 +00:00
|
|
|
}
|
2018-11-08 03:35:32 +00:00
|
|
|
} // end t.Ctl for
|
2018-11-02 01:52:01 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-06-28 05:10:59 +00:00
|
|
|
func (hc *Conn) AgeTunnel(endp uint16) uint32 {
|
2019-06-29 05:50:58 +00:00
|
|
|
hc.Lock()
|
|
|
|
defer hc.Unlock()
|
|
|
|
(*hc.tuns)[endp].KeepAlive += 1
|
|
|
|
return (*hc.tuns)[endp].KeepAlive
|
2019-06-28 05:10:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *Conn) ResetTunnelAge(endp uint16) {
|
2019-06-29 05:50:58 +00:00
|
|
|
hc.Lock()
|
|
|
|
defer hc.Unlock()
|
|
|
|
(*hc.tuns)[endp].KeepAlive = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *Conn) TunIsNil(endp uint16) bool {
|
|
|
|
hc.Lock()
|
|
|
|
defer hc.Unlock()
|
|
|
|
return (*hc.tuns)[endp] == nil
|
2019-06-28 05:10:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *Conn) TunIsAlive(endp uint16) bool {
|
|
|
|
hc.Lock()
|
|
|
|
defer hc.Unlock()
|
|
|
|
return !(*hc.tuns)[endp].Died
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *Conn) MarkTunDead(endp uint16) {
|
|
|
|
hc.Lock()
|
|
|
|
defer hc.Unlock()
|
|
|
|
(*hc.tuns)[endp].Died = true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *Conn) ShutdownTun(endp uint16) {
|
|
|
|
hc.Lock()
|
|
|
|
defer hc.Unlock()
|
|
|
|
if (*hc.tuns)[endp] != nil {
|
|
|
|
(*hc.tuns)[endp].Died = true
|
|
|
|
if (*hc.tuns)[endp].Data != nil {
|
|
|
|
close((*hc.tuns)[endp].Data)
|
|
|
|
(*hc.tuns)[endp].Data = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete((*hc.tuns), endp)
|
|
|
|
}
|
|
|
|
|
2018-11-02 01:52:01 +00:00
|
|
|
func (hc *Conn) StartServerTunnel(lport, rport uint16) {
|
|
|
|
hc.InitTunEndpoint(lport, "", rport)
|
2018-11-02 05:47:25 +00:00
|
|
|
var err error
|
2018-11-02 01:52:01 +00:00
|
|
|
|
2018-11-02 05:47:25 +00:00
|
|
|
go func() {
|
2018-11-12 02:56:08 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
2019-06-22 07:44:23 +00:00
|
|
|
//
|
|
|
|
// worker to age server tunnel and kill it if keepalives
|
|
|
|
// stop from client
|
|
|
|
//
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2019-06-29 05:50:58 +00:00
|
|
|
if hc.TunIsNil(rport) {
|
2019-06-27 08:19:52 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker A: Client endpoint removed.")
|
|
|
|
break
|
|
|
|
}
|
2019-06-28 05:10:59 +00:00
|
|
|
age := hc.AgeTunnel(rport)
|
|
|
|
if age > 25 {
|
|
|
|
hc.MarkTunDead(rport)
|
2019-06-22 07:44:23 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker A: Client died, hanging up.")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-11-12 02:56:08 +00:00
|
|
|
for cmd := range (*hc.tuns)[rport].Ctl {
|
2018-11-02 05:47:25 +00:00
|
|
|
var c net.Conn
|
2018-11-12 05:55:21 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] got Ctl '%c'.", cmd))
|
|
|
|
if cmd == 'd' {
|
2018-11-12 02:56:08 +00:00
|
|
|
// if re-using tunnel, re-init it
|
2019-06-29 05:50:58 +00:00
|
|
|
if hc.TunIsNil(rport) {
|
2018-11-12 02:56:08 +00:00
|
|
|
hc.InitTunEndpoint(lport, "", rport)
|
|
|
|
}
|
2018-11-08 03:35:32 +00:00
|
|
|
logger.LogDebug("[ServerTun] dialling...")
|
2018-11-12 02:56:08 +00:00
|
|
|
c, err = net.Dial("tcp4", fmt.Sprintf(":%d", rport))
|
2018-11-02 05:47:25 +00:00
|
|
|
if err != nil {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] Dial() error for tun %v: %s", (*hc.tuns)[rport], err))
|
2018-11-02 05:47:25 +00:00
|
|
|
var resp bytes.Buffer
|
|
|
|
binary.Write(&resp, binary.BigEndian /*lport*/, uint16(0))
|
|
|
|
binary.Write(&resp, binary.BigEndian, rport)
|
|
|
|
hc.WritePacket(resp.Bytes(), CSOTunRefused)
|
|
|
|
} else {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] Tunnel Opened - %v", (*hc.tuns)[rport]))
|
2018-11-02 01:52:01 +00:00
|
|
|
var resp bytes.Buffer
|
|
|
|
binary.Write(&resp, binary.BigEndian, lport)
|
|
|
|
binary.Write(&resp, binary.BigEndian, rport)
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] Writing CSOTunSetupAck %v", (*hc.tuns)[rport]))
|
2018-11-02 05:47:25 +00:00
|
|
|
hc.WritePacket(resp.Bytes(), CSOTunSetupAck)
|
2018-11-01 03:11:00 +00:00
|
|
|
|
2018-11-02 05:47:25 +00:00
|
|
|
//
|
|
|
|
// worker to read data from the rport (to encrypt & send to client)
|
|
|
|
//
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Add(1)
|
2018-11-02 05:47:25 +00:00
|
|
|
go func() {
|
|
|
|
defer func() {
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker A: deferred hangup")
|
|
|
|
if c.Close() != nil {
|
|
|
|
logger.LogDebug("[ServerTun] workerA: conn c already closed")
|
|
|
|
}
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Done()
|
2018-11-02 05:47:25 +00:00
|
|
|
}()
|
2018-11-02 01:52:01 +00:00
|
|
|
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker A: starting")
|
|
|
|
|
2018-11-02 05:47:25 +00:00
|
|
|
var tunDst bytes.Buffer
|
2018-11-12 02:56:08 +00:00
|
|
|
binary.Write(&tunDst, binary.BigEndian, (*hc.tuns)[rport].Lport)
|
|
|
|
binary.Write(&tunDst, binary.BigEndian, (*hc.tuns)[rport].Rport)
|
2018-11-02 05:47:25 +00:00
|
|
|
for {
|
|
|
|
rBuf := make([]byte, 1024)
|
|
|
|
// Read data from c, encrypt/write via hc to client(lport)
|
2018-11-12 04:12:29 +00:00
|
|
|
c.SetReadDeadline(time.Now().Add(200 * time.Millisecond))
|
2018-11-02 05:47:25 +00:00
|
|
|
n, e := c.Read(rBuf)
|
|
|
|
if e != nil {
|
|
|
|
if e == io.EOF {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] worker A: rport Disconnected: shutting down tunnel %v", (*hc.tuns)[rport]))
|
2019-06-28 05:10:59 +00:00
|
|
|
if hc.TunIsAlive(rport) {
|
2018-11-12 02:56:08 +00:00
|
|
|
hc.WritePacket(tunDst.Bytes(), CSOTunDisconn)
|
|
|
|
}
|
2019-06-29 05:50:58 +00:00
|
|
|
hc.ShutdownTun(rport) // FIXME: race-A
|
2018-11-12 02:56:08 +00:00
|
|
|
break
|
|
|
|
} else if strings.Contains(e.Error(), "i/o timeout") {
|
2019-06-28 05:10:59 +00:00
|
|
|
if !hc.TunIsAlive(rport) {
|
2019-06-28 05:28:53 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] worker A: timeout: Server side died, hanging up %v", (*hc.tuns)[rport]))
|
2019-06-29 05:50:58 +00:00
|
|
|
hc.ShutdownTun(rport) // FIXME: race-B
|
2018-11-12 02:56:08 +00:00
|
|
|
break
|
|
|
|
}
|
2018-11-02 05:47:25 +00:00
|
|
|
} else {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] worker A: Read error from rport of tun %v: %s", (*hc.tuns)[rport], e))
|
2019-06-28 05:10:59 +00:00
|
|
|
if hc.TunIsAlive(rport) {
|
2018-11-12 02:56:08 +00:00
|
|
|
hc.WritePacket(tunDst.Bytes(), CSOTunDisconn)
|
|
|
|
}
|
2019-06-29 05:50:58 +00:00
|
|
|
hc.ShutdownTun(rport) // FIXME: race-C
|
2018-11-12 02:56:08 +00:00
|
|
|
break
|
2018-11-02 05:47:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if n > 0 {
|
|
|
|
rBuf = append(tunDst.Bytes(), rBuf[:n]...)
|
|
|
|
hc.WritePacket(rBuf[:n+4], CSOTunData)
|
|
|
|
}
|
|
|
|
}
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker A: exiting")
|
2018-11-02 05:47:25 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// worker to read data from client (already decrypted) & fwd to rport
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Add(1)
|
2018-11-02 05:47:25 +00:00
|
|
|
go func() {
|
|
|
|
defer func() {
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker B: deferred hangup")
|
|
|
|
if c.Close() != nil {
|
|
|
|
logger.LogDebug("[ServerTun] worker B: conn c already closed")
|
|
|
|
}
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Done()
|
2018-11-02 05:47:25 +00:00
|
|
|
}()
|
|
|
|
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker B: starting")
|
2018-11-02 05:47:25 +00:00
|
|
|
for {
|
2019-06-29 05:50:58 +00:00
|
|
|
rData, ok := <-(*hc.tuns)[rport].Data // FIXME: race-A, race-B, race-C (w/ShutdownTun() calls)
|
2018-11-02 05:47:25 +00:00
|
|
|
if ok {
|
2018-11-12 04:12:29 +00:00
|
|
|
c.SetWriteDeadline(time.Now().Add(200 * time.Millisecond))
|
2018-11-08 03:35:32 +00:00
|
|
|
_, e := c.Write(rData)
|
|
|
|
if e != nil {
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] worker B: ERROR writing to rport conn"))
|
2018-11-08 03:35:32 +00:00
|
|
|
break
|
|
|
|
}
|
2018-11-02 05:47:25 +00:00
|
|
|
} else {
|
2018-11-12 02:56:08 +00:00
|
|
|
logger.LogDebug(fmt.Sprintf("[ServerTun] worker B: Channel was closed?"))
|
2018-11-02 05:47:25 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2018-11-11 20:34:54 +00:00
|
|
|
logger.LogDebug("[ServerTun] worker B: exiting")
|
2018-11-02 05:47:25 +00:00
|
|
|
}()
|
2018-11-12 02:56:08 +00:00
|
|
|
wg.Wait()
|
|
|
|
} // end if Dialled successfully
|
|
|
|
delete((*hc.tuns), rport)
|
2018-11-08 03:35:32 +00:00
|
|
|
}
|
|
|
|
} // t.Ctl read loop
|
|
|
|
logger.LogDebug("[ServerTun] Tunnel exiting t.Ctl read loop - channel closed??")
|
|
|
|
}()
|
2018-10-31 04:07:42 +00:00
|
|
|
}
|