mirror of https://gogs.blitter.com/RLabs/xs
Tunnels w/reconnect refinements:
-200ms deadlink timeouts -Fixed TunConnRefused client hangup handling Signed-off-by: Russ Magee <rmagee@gmail.com>
This commit is contained in:
parent
ba3cda95e8
commit
2a9e6af2ae
|
@ -842,6 +842,11 @@ func (hc Conn) Read(b []byte) (n int, err error) {
|
||||||
lport := binary.BigEndian.Uint16(payloadBytes[0:2])
|
lport := binary.BigEndian.Uint16(payloadBytes[0:2])
|
||||||
rport := binary.BigEndian.Uint16(payloadBytes[2:4])
|
rport := binary.BigEndian.Uint16(payloadBytes[2:4])
|
||||||
logger.LogDebug(fmt.Sprintf("[Client] Got CSOTunRefused [%d:%d]", lport, rport))
|
logger.LogDebug(fmt.Sprintf("[Client] Got CSOTunRefused [%d:%d]", lport, rport))
|
||||||
|
if _, ok := (*hc.tuns)[rport]; ok {
|
||||||
|
(*hc.tuns)[rport].Died = true
|
||||||
|
} else {
|
||||||
|
logger.LogDebug(fmt.Sprintf("[Client] CSOTunRefused on already-closed tun [%d:%d]", lport, rport))
|
||||||
|
}
|
||||||
} else if ctrlStatOp == CSOTunDisconn {
|
} else if ctrlStatOp == CSOTunDisconn {
|
||||||
// server side's rport has disconnected (server lost)
|
// server side's rport has disconnected (server lost)
|
||||||
lport := binary.BigEndian.Uint16(payloadBytes[0:2])
|
lport := binary.BigEndian.Uint16(payloadBytes[0:2])
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (hc *Conn) InitTunEndpoint(lp uint16, p string /* net.Addr */, rp uint16) {
|
||||||
Ctl: make(chan rune, 1)}
|
Ctl: make(chan rune, 1)}
|
||||||
logger.LogDebug(fmt.Sprintf("InitTunEndpoint [%d:%s:%d]", lp, p, rp))
|
logger.LogDebug(fmt.Sprintf("InitTunEndpoint [%d:%s:%d]", lp, p, rp))
|
||||||
} else {
|
} else {
|
||||||
logger.LogDebug(fmt.Sprintf("InitTunEndpoint [reusing] [%d:%s:%d]", (*hc.tuns)[rp].Lport, (*hc.tuns)[rp].Peer, (*hc.tuns)[rp].Rport))
|
logger.LogDebug(fmt.Sprintf("InitTunEndpoint [reusing] %v", (*hc.tuns)[rp]))
|
||||||
if (*hc.tuns)[rp].Data == nil {
|
if (*hc.tuns)[rp].Data == nil {
|
||||||
// When re-using a tunnel it will have its
|
// When re-using a tunnel it will have its
|
||||||
// data channel removed on closure. Re-create it
|
// data channel removed on closure. Re-create it
|
||||||
|
@ -93,26 +93,26 @@ func (hc *Conn) InitTunEndpoint(lp uint16, p string /* net.Addr */, rp uint16) {
|
||||||
|
|
||||||
func (hc *Conn) StartClientTunnel(lport, rport uint16) {
|
func (hc *Conn) StartClientTunnel(lport, rport uint16) {
|
||||||
hc.InitTunEndpoint(lport, "", rport)
|
hc.InitTunEndpoint(lport, "", rport)
|
||||||
var l HKExListener
|
|
||||||
go func() {
|
go func() {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
weAreListening := false
|
weAreListening := false
|
||||||
for cmd := range (*hc.tuns)[rport].Ctl {
|
|
||||||
logger.LogDebug(fmt.Sprintf("[ClientTun] Listening for client tunnel port %d", lport))
|
|
||||||
|
|
||||||
|
for cmd := range (*hc.tuns)[rport].Ctl {
|
||||||
if cmd == 'a' && !weAreListening {
|
if cmd == 'a' && !weAreListening {
|
||||||
l, e := net.Listen("tcp4", fmt.Sprintf(":%d", lport))
|
l, e := net.Listen("tcp4", fmt.Sprintf(":%d", lport))
|
||||||
if e != nil {
|
if e != nil {
|
||||||
logger.LogDebug(fmt.Sprintf("[ClientTun] Could not get lport %d! (%s)", lport, e))
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Could not get lport %d! (%s)", lport, e))
|
||||||
} else {
|
} else {
|
||||||
weAreListening = true
|
weAreListening = true
|
||||||
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Listening for client tunnel port %d", lport))
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
c, e := l.Accept()
|
||||||
// If tunnel is being re-used, re-init it
|
// If tunnel is being re-used, re-init it
|
||||||
if (*hc.tuns)[rport] == nil {
|
if (*hc.tuns)[rport] == nil {
|
||||||
hc.InitTunEndpoint(lport, "", rport)
|
hc.InitTunEndpoint(lport, "", rport)
|
||||||
}
|
}
|
||||||
c, e := l.Accept()
|
|
||||||
|
|
||||||
// ask server to dial() its side, rport
|
// ask server to dial() its side, rport
|
||||||
var tunDst bytes.Buffer
|
var tunDst bytes.Buffer
|
||||||
binary.Write(&tunDst, binary.BigEndian, lport)
|
binary.Write(&tunDst, binary.BigEndian, lport)
|
||||||
|
@ -121,7 +121,6 @@ func (hc *Conn) StartClientTunnel(lport, rport uint16) {
|
||||||
|
|
||||||
if e != nil {
|
if e != nil {
|
||||||
logger.LogDebug(fmt.Sprintf("[ClientTun] Accept() got error(%v), hanging up.", e))
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Accept() got error(%v), hanging up.", e))
|
||||||
//break
|
|
||||||
} else {
|
} else {
|
||||||
logger.LogDebug(fmt.Sprintf("[ClientTun] Accepted tunnel client %v", (*hc.tuns)[rport]))
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Accepted tunnel client %v", (*hc.tuns)[rport]))
|
||||||
|
|
||||||
|
@ -145,7 +144,7 @@ func (hc *Conn) StartClientTunnel(lport, rport uint16) {
|
||||||
for {
|
for {
|
||||||
rBuf := make([]byte, 1024)
|
rBuf := make([]byte, 1024)
|
||||||
//Read data from c, encrypt/write via hc to client(lport)
|
//Read data from c, encrypt/write via hc to client(lport)
|
||||||
c.SetReadDeadline(time.Now().Add(20 * time.Second))
|
c.SetReadDeadline(time.Now().Add(200 * time.Millisecond))
|
||||||
n, e := c.Read(rBuf)
|
n, e := c.Read(rBuf)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
if e == io.EOF {
|
if e == io.EOF {
|
||||||
|
@ -211,7 +210,7 @@ func (hc *Conn) StartClientTunnel(lport, rport uint16) {
|
||||||
for {
|
for {
|
||||||
bytes, ok := <-(*hc.tuns)[rport].Data
|
bytes, ok := <-(*hc.tuns)[rport].Data
|
||||||
if ok {
|
if ok {
|
||||||
c.SetWriteDeadline(time.Now().Add(20 * time.Second))
|
c.SetWriteDeadline(time.Now().Add(200 * time.Millisecond))
|
||||||
_, e := c.Write(bytes)
|
_, e := c.Write(bytes)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
logger.LogDebug(fmt.Sprintf("[ClientTun] worker B: lport conn closed"))
|
logger.LogDebug(fmt.Sprintf("[ClientTun] worker B: lport conn closed"))
|
||||||
|
@ -234,11 +233,6 @@ func (hc *Conn) StartClientTunnel(lport, rport uint16) {
|
||||||
} else if cmd == 'r' {
|
} else if cmd == 'r' {
|
||||||
logger.LogDebug(fmt.Sprintf("[ClientTun] Server replied TunRefused %v\n", (*hc.tuns)[rport]))
|
logger.LogDebug(fmt.Sprintf("[ClientTun] Server replied TunRefused %v\n", (*hc.tuns)[rport]))
|
||||||
}
|
}
|
||||||
_ = l //else if cmd == 'x' {
|
|
||||||
//logger.LogDebug(fmt.Sprintf("[ClientTun] Server replied TunDisconn, closing lport %v\n", t))
|
|
||||||
//l.Close()
|
|
||||||
//weAreListening = false
|
|
||||||
//}
|
|
||||||
} // end t.Ctl for
|
} // end t.Ctl for
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -298,7 +292,7 @@ func (hc *Conn) StartServerTunnel(lport, rport uint16) {
|
||||||
for {
|
for {
|
||||||
rBuf := make([]byte, 1024)
|
rBuf := make([]byte, 1024)
|
||||||
// Read data from c, encrypt/write via hc to client(lport)
|
// Read data from c, encrypt/write via hc to client(lport)
|
||||||
c.SetReadDeadline(time.Now().Add(20 * time.Second))
|
c.SetReadDeadline(time.Now().Add(200 * time.Millisecond))
|
||||||
n, e := c.Read(rBuf)
|
n, e := c.Read(rBuf)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
if e == io.EOF {
|
if e == io.EOF {
|
||||||
|
@ -315,8 +309,6 @@ func (hc *Conn) StartServerTunnel(lport, rport uint16) {
|
||||||
} else if strings.Contains(e.Error(), "i/o timeout") {
|
} else if strings.Contains(e.Error(), "i/o timeout") {
|
||||||
if (*hc.tuns)[rport].Died {
|
if (*hc.tuns)[rport].Died {
|
||||||
logger.LogDebug(fmt.Sprintf("[ServerTun] worker A: timeout: Server side died, hanging up %v", (*hc.tuns)[rport]))
|
logger.LogDebug(fmt.Sprintf("[ServerTun] worker A: timeout: Server side died, hanging up %v", (*hc.tuns)[rport]))
|
||||||
//hc.WritePacket(tunDst.Bytes(), CSOTunDisconn)
|
|
||||||
//(*hc.tuns)[rport].Died = true
|
|
||||||
if (*hc.tuns)[rport].Data != nil {
|
if (*hc.tuns)[rport].Data != nil {
|
||||||
close((*hc.tuns)[rport].Data)
|
close((*hc.tuns)[rport].Data)
|
||||||
(*hc.tuns)[rport].Data = nil
|
(*hc.tuns)[rport].Data = nil
|
||||||
|
@ -360,7 +352,7 @@ func (hc *Conn) StartServerTunnel(lport, rport uint16) {
|
||||||
for {
|
for {
|
||||||
rData, ok := <-(*hc.tuns)[rport].Data
|
rData, ok := <-(*hc.tuns)[rport].Data
|
||||||
if ok {
|
if ok {
|
||||||
c.SetWriteDeadline(time.Now().Add(20 * time.Second))
|
c.SetWriteDeadline(time.Now().Add(200 * time.Millisecond))
|
||||||
_, e := c.Write(rData)
|
_, e := c.Write(rData)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
logger.LogDebug(fmt.Sprintf("[ServerTun] worker B: ERROR writing to rport conn"))
|
logger.LogDebug(fmt.Sprintf("[ServerTun] worker B: ERROR writing to rport conn"))
|
||||||
|
@ -379,6 +371,5 @@ func (hc *Conn) StartServerTunnel(lport, rport uint16) {
|
||||||
}
|
}
|
||||||
} // t.Ctl read loop
|
} // t.Ctl read loop
|
||||||
logger.LogDebug("[ServerTun] Tunnel exiting t.Ctl read loop - channel closed??")
|
logger.LogDebug("[ServerTun] Tunnel exiting t.Ctl read loop - channel closed??")
|
||||||
//wg.Wait()
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
|
@ -273,7 +273,6 @@ func doShellMode(isInteractive bool, conn *hkexnet.Conn, oldState *hkexsh.State,
|
||||||
// gracefully here
|
// gracefully here
|
||||||
if !strings.HasSuffix(inerr.Error(), "use of closed network connection") {
|
if !strings.HasSuffix(inerr.Error(), "use of closed network connection") {
|
||||||
log.Println(inerr)
|
log.Println(inerr)
|
||||||
conn.CollapseAllTunnels(true)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -311,7 +310,6 @@ func doShellMode(isInteractive bool, conn *hkexnet.Conn, oldState *hkexsh.State,
|
||||||
fmt.Println(outerr)
|
fmt.Println(outerr)
|
||||||
_ = hkexsh.Restore(int(os.Stdin.Fd()), oldState) // Best effort.
|
_ = hkexsh.Restore(int(os.Stdin.Fd()), oldState) // Best effort.
|
||||||
log.Println("[Hanging up]")
|
log.Println("[Hanging up]")
|
||||||
conn.CollapseAllTunnels(true)
|
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -648,7 +646,6 @@ func main() {
|
||||||
doShellMode(isInteractive, &conn, oldState, rec)
|
doShellMode(isInteractive, &conn, oldState, rec)
|
||||||
} else { // copyMode
|
} else { // copyMode
|
||||||
_, s := doCopyMode(&conn, pathIsDest, fileArgs, rec)
|
_, s := doCopyMode(&conn, pathIsDest, fileArgs, rec)
|
||||||
conn.CollapseAllTunnels(true)
|
|
||||||
rec.SetStatus(s)
|
rec.SetStatus(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue