|
|
@ -117,16 +117,9 @@ type AURPPeer struct {
|
|
|
|
// Event tuples yet to be sent to this peer in an RI-Upd.
|
|
|
|
// Event tuples yet to be sent to this peer in an RI-Upd.
|
|
|
|
pendingEvents chan aurp.EventTuple
|
|
|
|
pendingEvents chan aurp.EventTuple
|
|
|
|
|
|
|
|
|
|
|
|
// The internal states below are only set within the Handle loop, but can
|
|
|
|
mu sync.RWMutex
|
|
|
|
// be read concurrently from outside.
|
|
|
|
rstate ReceiverState
|
|
|
|
mu sync.RWMutex
|
|
|
|
sstate SenderState
|
|
|
|
rstate ReceiverState
|
|
|
|
|
|
|
|
sstate SenderState
|
|
|
|
|
|
|
|
lastReconnect time.Time
|
|
|
|
|
|
|
|
lastHeardFrom time.Time
|
|
|
|
|
|
|
|
lastSend time.Time // TODO: clarify use of lastSend / sendRetries
|
|
|
|
|
|
|
|
lastUpdate time.Time
|
|
|
|
|
|
|
|
sendRetries int
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func NewAURPPeer(routes *RouteTable, udpConn *net.UDPConn, peerAddr string, raddr *net.UDPAddr, localDI, remoteDI aurp.DomainIdentifier, connID uint16) *AURPPeer {
|
|
|
|
func NewAURPPeer(routes *RouteTable, udpConn *net.UDPConn, peerAddr string, raddr *net.UDPAddr, localDI, remoteDI aurp.DomainIdentifier, connID uint16) *AURPPeer {
|
|
|
@ -188,7 +181,7 @@ func (p *AURPPeer) Forward(ddpkt *ddp.ExtPacket) error {
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_, err = p.send(p.Transport.NewAppleTalkPacket(outPkt))
|
|
|
|
_, err = p.Send(p.Transport.NewAppleTalkPacket(outPkt))
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -204,36 +197,6 @@ func (p *AURPPeer) SenderState() SenderState {
|
|
|
|
return p.sstate
|
|
|
|
return p.sstate
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) LastReconnectAgo() string {
|
|
|
|
|
|
|
|
p.mu.RLock()
|
|
|
|
|
|
|
|
defer p.mu.RUnlock()
|
|
|
|
|
|
|
|
return ago(p.lastReconnect)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) LastHeardFromAgo() string {
|
|
|
|
|
|
|
|
p.mu.RLock()
|
|
|
|
|
|
|
|
defer p.mu.RUnlock()
|
|
|
|
|
|
|
|
return ago(p.lastHeardFrom)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) LastSendAgo() string {
|
|
|
|
|
|
|
|
p.mu.RLock()
|
|
|
|
|
|
|
|
defer p.mu.RUnlock()
|
|
|
|
|
|
|
|
return ago(p.lastSend)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) LastUpdateAgo() string {
|
|
|
|
|
|
|
|
p.mu.RLock()
|
|
|
|
|
|
|
|
defer p.mu.RUnlock()
|
|
|
|
|
|
|
|
return ago(p.lastUpdate)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) SendRetries() int {
|
|
|
|
|
|
|
|
p.mu.RLock()
|
|
|
|
|
|
|
|
defer p.mu.RUnlock()
|
|
|
|
|
|
|
|
return p.sendRetries
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) setRState(rstate ReceiverState) {
|
|
|
|
func (p *AURPPeer) setRState(rstate ReceiverState) {
|
|
|
|
p.mu.Lock()
|
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
@ -246,42 +209,6 @@ func (p *AURPPeer) setSState(sstate SenderState) {
|
|
|
|
p.sstate = sstate
|
|
|
|
p.sstate = sstate
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) incSendRetries() {
|
|
|
|
|
|
|
|
p.mu.Lock()
|
|
|
|
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
|
|
|
|
p.sendRetries++
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) resetSendRetries() {
|
|
|
|
|
|
|
|
p.mu.Lock()
|
|
|
|
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
|
|
|
|
p.sendRetries = 0
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) bumpLastHeardFrom() {
|
|
|
|
|
|
|
|
p.mu.Lock()
|
|
|
|
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
|
|
|
|
p.lastHeardFrom = time.Now()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) bumpLastReconnect() {
|
|
|
|
|
|
|
|
p.mu.Lock()
|
|
|
|
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
|
|
|
|
p.lastReconnect = time.Now()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) bumpLastSend() {
|
|
|
|
|
|
|
|
p.mu.Lock()
|
|
|
|
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
|
|
|
|
p.lastSend = time.Now()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) bumpLastUpdate() {
|
|
|
|
|
|
|
|
p.mu.Lock()
|
|
|
|
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
|
|
|
|
p.lastUpdate = time.Now()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func (p *AURPPeer) disconnect() {
|
|
|
|
func (p *AURPPeer) disconnect() {
|
|
|
|
p.mu.Lock()
|
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
@ -289,8 +216,8 @@ func (p *AURPPeer) disconnect() {
|
|
|
|
p.sstate = SenderUnconnected
|
|
|
|
p.sstate = SenderUnconnected
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// send encodes and sends pkt to the remote host.
|
|
|
|
// Send encodes and sends pkt to the remote host.
|
|
|
|
func (p *AURPPeer) send(pkt aurp.Packet) (int, error) {
|
|
|
|
func (p *AURPPeer) Send(pkt aurp.Packet) (int, error) {
|
|
|
|
var b bytes.Buffer
|
|
|
|
var b bytes.Buffer
|
|
|
|
if _, err := pkt.WriteTo(&b); err != nil {
|
|
|
|
if _, err := pkt.WriteTo(&b); err != nil {
|
|
|
|
return 0, err
|
|
|
|
return 0, err
|
|
|
@ -305,20 +232,18 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
sticker := time.NewTicker(1 * time.Second)
|
|
|
|
sticker := time.NewTicker(1 * time.Second)
|
|
|
|
defer sticker.Stop()
|
|
|
|
defer sticker.Stop()
|
|
|
|
|
|
|
|
|
|
|
|
p.mu.Lock()
|
|
|
|
lastReconnect := time.Now()
|
|
|
|
p.lastReconnect = time.Now()
|
|
|
|
lastHeardFrom := time.Now()
|
|
|
|
p.lastHeardFrom = time.Now()
|
|
|
|
lastSend := time.Now() // TODO: clarify use of lastSend / sendRetries
|
|
|
|
p.lastSend = time.Now() // TODO: clarify use of lastSend / sendRetries
|
|
|
|
lastUpdate := time.Now()
|
|
|
|
p.lastUpdate = time.Now()
|
|
|
|
sendRetries := 0
|
|
|
|
p.sendRetries = 0
|
|
|
|
|
|
|
|
p.mu.Unlock()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
var lastRISent aurp.Packet
|
|
|
|
var lastRISent aurp.Packet
|
|
|
|
|
|
|
|
|
|
|
|
p.disconnect()
|
|
|
|
p.disconnect()
|
|
|
|
|
|
|
|
|
|
|
|
// Write an Open-Req packet
|
|
|
|
// Write an Open-Req packet
|
|
|
|
if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -334,7 +259,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Send a best-effort Router Down before returning
|
|
|
|
// Send a best-effort Router Down before returning
|
|
|
|
lastRISent = p.Transport.NewRDPacket(aurp.ErrCodeNormalClose)
|
|
|
|
lastRISent = p.Transport.NewRDPacket(aurp.ErrCodeNormalClose)
|
|
|
|
if _, err := p.send(lastRISent); err != nil {
|
|
|
|
if _, err := p.Send(lastRISent); err != nil {
|
|
|
|
log.Printf("Couldn't send RD packet: %v", err)
|
|
|
|
log.Printf("Couldn't send RD packet: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ctx.Err()
|
|
|
|
return ctx.Err()
|
|
|
@ -342,60 +267,60 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
case <-rticker.C:
|
|
|
|
case <-rticker.C:
|
|
|
|
switch p.rstate {
|
|
|
|
switch p.rstate {
|
|
|
|
case ReceiverWaitForOpenRsp:
|
|
|
|
case ReceiverWaitForOpenRsp:
|
|
|
|
if time.Since(p.lastSend) <= sendRetryTimer {
|
|
|
|
if time.Since(lastSend) <= sendRetryTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if p.sendRetries >= sendRetryLimit {
|
|
|
|
if sendRetries >= sendRetryLimit {
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached while waiting for Open-Rsp, closing connection")
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached while waiting for Open-Rsp, closing connection")
|
|
|
|
p.setRState(ReceiverUnconnected)
|
|
|
|
p.setRState(ReceiverUnconnected)
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Send another Open-Req
|
|
|
|
// Send another Open-Req
|
|
|
|
p.incSendRetries()
|
|
|
|
sendRetries++
|
|
|
|
p.bumpLastSend()
|
|
|
|
lastSend = time.Now()
|
|
|
|
if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
case ReceiverConnected:
|
|
|
|
case ReceiverConnected:
|
|
|
|
// Check LHFT, send tickle?
|
|
|
|
// Check LHFT, send tickle?
|
|
|
|
if time.Since(p.lastHeardFrom) <= lastHeardFromTimer {
|
|
|
|
if time.Since(lastHeardFrom) <= lastHeardFromTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if _, err := p.send(p.Transport.NewTicklePacket()); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewTicklePacket()); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Tickle: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Tickle: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p.setRState(ReceiverWaitForTickleAck)
|
|
|
|
p.setRState(ReceiverWaitForTickleAck)
|
|
|
|
p.resetSendRetries()
|
|
|
|
sendRetries = 0
|
|
|
|
p.bumpLastSend()
|
|
|
|
lastSend = time.Now()
|
|
|
|
|
|
|
|
|
|
|
|
case ReceiverWaitForTickleAck:
|
|
|
|
case ReceiverWaitForTickleAck:
|
|
|
|
if time.Since(p.lastSend) <= sendRetryTimer {
|
|
|
|
if time.Since(lastSend) <= sendRetryTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if p.sendRetries >= tickleRetryLimit {
|
|
|
|
if sendRetries >= tickleRetryLimit {
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached while waiting for Tickle-Ack, closing connection")
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached while waiting for Tickle-Ack, closing connection")
|
|
|
|
p.setRState(ReceiverUnconnected)
|
|
|
|
p.setRState(ReceiverUnconnected)
|
|
|
|
p.RouteTable.DeleteAURPPeer(p)
|
|
|
|
p.RouteTable.DeleteAURPPeer(p)
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
p.incSendRetries()
|
|
|
|
sendRetries++
|
|
|
|
p.bumpLastSend()
|
|
|
|
lastSend = time.Now()
|
|
|
|
if _, err := p.send(p.Transport.NewTicklePacket()); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewTicklePacket()); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Tickle: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Tickle: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// still in Wait For Tickle-Ack
|
|
|
|
// still in Wait For Tickle-Ack
|
|
|
|
|
|
|
|
|
|
|
|
case ReceiverWaitForRIRsp:
|
|
|
|
case ReceiverWaitForRIRsp:
|
|
|
|
if time.Since(p.lastSend) <= sendRetryTimer {
|
|
|
|
if time.Since(lastSend) <= sendRetryTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if p.sendRetries >= sendRetryLimit {
|
|
|
|
if sendRetries >= sendRetryLimit {
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached while waiting for RI-Rsp, closing connection")
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached while waiting for RI-Rsp, closing connection")
|
|
|
|
p.setRState(ReceiverUnconnected)
|
|
|
|
p.setRState(ReceiverUnconnected)
|
|
|
|
p.RouteTable.DeleteAURPPeer(p)
|
|
|
|
p.RouteTable.DeleteAURPPeer(p)
|
|
|
@ -404,9 +329,8 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
|
|
|
|
|
|
|
|
// RI-Req is stateless, so we don't need to cache the one we
|
|
|
|
// RI-Req is stateless, so we don't need to cache the one we
|
|
|
|
// sent earlier just to send it again
|
|
|
|
// sent earlier just to send it again
|
|
|
|
p.incSendRetries()
|
|
|
|
sendRetries++
|
|
|
|
p.bumpLastSend()
|
|
|
|
if _, err := p.Send(p.Transport.NewRIReqPacket()); err != nil {
|
|
|
|
if _, err := p.send(p.Transport.NewRIReqPacket()); err != nil {
|
|
|
|
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -415,18 +339,18 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
case ReceiverUnconnected:
|
|
|
|
case ReceiverUnconnected:
|
|
|
|
// Data receiver is unconnected. If data sender is connected,
|
|
|
|
// Data receiver is unconnected. If data sender is connected,
|
|
|
|
// send a null RI-Upd to check if the sender is also unconnected
|
|
|
|
// send a null RI-Upd to check if the sender is also unconnected
|
|
|
|
if p.sstate == SenderConnected && time.Since(p.lastSend) > sendRetryTimer {
|
|
|
|
if p.sstate == SenderConnected && time.Since(lastSend) > sendRetryTimer {
|
|
|
|
if p.sendRetries >= sendRetryLimit {
|
|
|
|
if sendRetries >= sendRetryLimit {
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached while probing sender connect, closing connection")
|
|
|
|
log.Print("AURP Peer: Send retry limit reached while probing sender connect, closing connection")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p.incSendRetries()
|
|
|
|
sendRetries++
|
|
|
|
p.bumpLastSend()
|
|
|
|
lastSend = time.Now()
|
|
|
|
aurp.Inc(&p.Transport.LocalSeq)
|
|
|
|
aurp.Inc(&p.Transport.LocalSeq)
|
|
|
|
events := aurp.EventTuples{{
|
|
|
|
events := aurp.EventTuples{{
|
|
|
|
EventCode: aurp.EventCodeNull,
|
|
|
|
EventCode: aurp.EventCodeNull,
|
|
|
|
}}
|
|
|
|
}}
|
|
|
|
lastRISent = p.Transport.NewRIUpdPacket(events)
|
|
|
|
lastRISent = p.Transport.NewRIUpdPacket(events)
|
|
|
|
if _, err := p.send(lastRISent); err != nil {
|
|
|
|
if _, err := p.Send(lastRISent); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Upd packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Upd packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -435,7 +359,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
|
|
|
|
|
|
|
|
if p.ConfiguredAddr != "" {
|
|
|
|
if p.ConfiguredAddr != "" {
|
|
|
|
// Periodically try to reconnect, if this peer is in the config file
|
|
|
|
// Periodically try to reconnect, if this peer is in the config file
|
|
|
|
if time.Since(p.lastReconnect) <= reconnectTimer {
|
|
|
|
if time.Since(lastReconnect) <= reconnectTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -448,10 +372,10 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
// log.Printf("AURP Peer: resolved %q to %v", p.ConfiguredAddr, raddr)
|
|
|
|
// log.Printf("AURP Peer: resolved %q to %v", p.ConfiguredAddr, raddr)
|
|
|
|
p.RemoteAddr = raddr
|
|
|
|
p.RemoteAddr = raddr
|
|
|
|
|
|
|
|
|
|
|
|
p.bumpLastReconnect()
|
|
|
|
lastReconnect = time.Now()
|
|
|
|
p.resetSendRetries()
|
|
|
|
sendRetries = 0
|
|
|
|
p.bumpLastSend()
|
|
|
|
lastSend = time.Now()
|
|
|
|
if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -465,41 +389,40 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
// Do nothing
|
|
|
|
// Do nothing
|
|
|
|
|
|
|
|
|
|
|
|
case SenderConnected:
|
|
|
|
case SenderConnected:
|
|
|
|
if time.Since(p.lastUpdate) <= updateTimer {
|
|
|
|
if time.Since(lastUpdate) <= updateTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TODO: is there a routing update to send?
|
|
|
|
// TODO: is there a routing update to send?
|
|
|
|
p.bumpLastUpdate()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case SenderWaitForRIRspAck, SenderWaitForRIUpdAck:
|
|
|
|
case SenderWaitForRIRspAck, SenderWaitForRIUpdAck:
|
|
|
|
if time.Since(p.lastSend) <= sendRetryTimer {
|
|
|
|
if time.Since(lastSend) <= sendRetryTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if lastRISent == nil {
|
|
|
|
if lastRISent == nil {
|
|
|
|
log.Print("AURP Peer: sender retry: lastRISent = nil?")
|
|
|
|
log.Print("AURP Peer: sender retry: lastRISent = nil?")
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if p.sendRetries >= sendRetryLimit {
|
|
|
|
if sendRetries >= sendRetryLimit {
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached, closing connection")
|
|
|
|
log.Printf("AURP Peer: Send retry limit reached, closing connection")
|
|
|
|
p.setSState(SenderUnconnected)
|
|
|
|
p.setSState(SenderUnconnected)
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p.incSendRetries()
|
|
|
|
sendRetries++
|
|
|
|
p.bumpLastSend()
|
|
|
|
lastSend = time.Now()
|
|
|
|
if _, err := p.send(lastRISent); err != nil {
|
|
|
|
if _, err := p.Send(lastRISent); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't re-send %T: %v", lastRISent, err)
|
|
|
|
log.Printf("AURP Peer: Couldn't re-send %T: %v", lastRISent, err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
case SenderWaitForRDAck:
|
|
|
|
case SenderWaitForRDAck:
|
|
|
|
if time.Since(p.lastSend) <= sendRetryTimer {
|
|
|
|
if time.Since(lastSend) <= sendRetryTimer {
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p.setSState(SenderUnconnected)
|
|
|
|
p.setSState(SenderUnconnected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
case pkt := <-p.ReceiveCh:
|
|
|
|
case pkt := <-p.ReceiveCh:
|
|
|
|
p.bumpLastHeardFrom()
|
|
|
|
lastHeardFrom = time.Now()
|
|
|
|
|
|
|
|
|
|
|
|
switch pkt := pkt.(type) {
|
|
|
|
switch pkt := pkt.(type) {
|
|
|
|
case *aurp.OpenReqPacket:
|
|
|
|
case *aurp.OpenReqPacket:
|
|
|
@ -526,7 +449,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
orsp = p.Transport.NewOpenRspPacket(0, 1, nil)
|
|
|
|
orsp = p.Transport.NewOpenRspPacket(0, 1, nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if _, err := p.send(orsp); err != nil {
|
|
|
|
if _, err := p.Send(orsp); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Rsp: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Rsp: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -536,9 +459,9 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
|
|
|
|
|
|
|
|
// If receiver is unconnected, commence connecting
|
|
|
|
// If receiver is unconnected, commence connecting
|
|
|
|
if p.rstate == ReceiverUnconnected {
|
|
|
|
if p.rstate == ReceiverUnconnected {
|
|
|
|
p.resetSendRetries()
|
|
|
|
lastSend = time.Now()
|
|
|
|
p.bumpLastSend()
|
|
|
|
sendRetries = 0
|
|
|
|
if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -559,8 +482,8 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
p.setRState(ReceiverConnected)
|
|
|
|
p.setRState(ReceiverConnected)
|
|
|
|
|
|
|
|
|
|
|
|
// Send an RI-Req
|
|
|
|
// Send an RI-Req
|
|
|
|
p.resetSendRetries()
|
|
|
|
sendRetries = 0
|
|
|
|
if _, err := p.send(p.Transport.NewRIReqPacket()); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewRIReqPacket()); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -583,7 +506,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
p.Transport.LocalSeq = 1
|
|
|
|
p.Transport.LocalSeq = 1
|
|
|
|
// TODO: Split tuples across multiple packets as required
|
|
|
|
// TODO: Split tuples across multiple packets as required
|
|
|
|
lastRISent = p.Transport.NewRIRspPacket(aurp.RoutingFlagLast, nets)
|
|
|
|
lastRISent = p.Transport.NewRIRspPacket(aurp.RoutingFlagLast, nets)
|
|
|
|
if _, err := p.send(lastRISent); err != nil {
|
|
|
|
if _, err := p.Send(lastRISent); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Rsp packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Rsp packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -608,7 +531,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
|
|
|
|
|
|
|
|
// TODO: track which networks we don't have zone info for, and
|
|
|
|
// TODO: track which networks we don't have zone info for, and
|
|
|
|
// only set SZI for those ?
|
|
|
|
// only set SZI for those ?
|
|
|
|
if _, err := p.send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, aurp.RoutingFlagSendZoneInfo)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, aurp.RoutingFlagSendZoneInfo)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Ack packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Ack packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -634,7 +557,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
p.setSState(SenderConnected)
|
|
|
|
p.setSState(SenderConnected)
|
|
|
|
p.resetSendRetries()
|
|
|
|
sendRetries = 0
|
|
|
|
|
|
|
|
|
|
|
|
// If SZI flag is set, send ZI-Rsp (transaction)
|
|
|
|
// If SZI flag is set, send ZI-Rsp (transaction)
|
|
|
|
if pkt.Flags&aurp.RoutingFlagSendZoneInfo != 0 {
|
|
|
|
if pkt.Flags&aurp.RoutingFlagSendZoneInfo != 0 {
|
|
|
@ -659,7 +582,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
zones := p.RouteTable.ZonesForNetworks(nets)
|
|
|
|
zones := p.RouteTable.ZonesForNetworks(nets)
|
|
|
|
// TODO: split ZI-Rsp packets similarly to ZIP Replies
|
|
|
|
// TODO: split ZI-Rsp packets similarly to ZIP Replies
|
|
|
|
if _, err := p.send(p.Transport.NewZIRspPacket(zones)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewZIRspPacket(zones)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -670,9 +593,9 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
// Receiver is unconnected, but their receiver sent us an
|
|
|
|
// Receiver is unconnected, but their receiver sent us an
|
|
|
|
// RI-Ack for something
|
|
|
|
// RI-Ack for something
|
|
|
|
// Try to reconnect?
|
|
|
|
// Try to reconnect?
|
|
|
|
p.resetSendRetries()
|
|
|
|
lastSend = time.Now()
|
|
|
|
p.bumpLastSend()
|
|
|
|
sendRetries = 0
|
|
|
|
if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -680,6 +603,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
case *aurp.RIUpdPacket:
|
|
|
|
case *aurp.RIUpdPacket:
|
|
|
|
|
|
|
|
|
|
|
|
var ackFlag aurp.RoutingFlag
|
|
|
|
var ackFlag aurp.RoutingFlag
|
|
|
|
|
|
|
|
|
|
|
|
for _, et := range pkt.Events {
|
|
|
|
for _, et := range pkt.Events {
|
|
|
@ -719,7 +643,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if _, err := p.send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, ackFlag)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, ackFlag)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -733,7 +657,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
p.RouteTable.DeleteAURPPeer(p)
|
|
|
|
p.RouteTable.DeleteAURPPeer(p)
|
|
|
|
|
|
|
|
|
|
|
|
// Respond with RI-Ack
|
|
|
|
// Respond with RI-Ack
|
|
|
|
if _, err := p.send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, 0)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, 0)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -743,7 +667,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
case *aurp.ZIReqPacket:
|
|
|
|
case *aurp.ZIReqPacket:
|
|
|
|
// TODO: split ZI-Rsp packets similarly to ZIP Replies
|
|
|
|
// TODO: split ZI-Rsp packets similarly to ZIP Replies
|
|
|
|
zones := p.RouteTable.ZonesForNetworks(pkt.Networks)
|
|
|
|
zones := p.RouteTable.ZonesForNetworks(pkt.Networks)
|
|
|
|
if _, err := p.send(p.Transport.NewZIRspPacket(zones)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewZIRspPacket(zones)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -755,7 +679,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
case *aurp.GDZLReqPacket:
|
|
|
|
case *aurp.GDZLReqPacket:
|
|
|
|
if _, err := p.send(p.Transport.NewGDZLRspPacket(-1, nil)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewGDZLRspPacket(-1, nil)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send GDZL-Rsp packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send GDZL-Rsp packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -764,7 +688,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
log.Printf("AURP Peer: Received a GDZL-Rsp, but I wouldn't have sent a GDZL-Req - that's weird")
|
|
|
|
log.Printf("AURP Peer: Received a GDZL-Rsp, but I wouldn't have sent a GDZL-Req - that's weird")
|
|
|
|
|
|
|
|
|
|
|
|
case *aurp.GZNReqPacket:
|
|
|
|
case *aurp.GZNReqPacket:
|
|
|
|
if _, err := p.send(p.Transport.NewGZNRspPacket(pkt.ZoneName, false, nil)); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewGZNRspPacket(pkt.ZoneName, false, nil)); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send GZN-Rsp packet: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send GZN-Rsp packet: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -774,7 +698,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
|
|
|
|
|
|
|
|
|
|
|
|
case *aurp.TicklePacket:
|
|
|
|
case *aurp.TicklePacket:
|
|
|
|
// Immediately respond with Tickle-Ack
|
|
|
|
// Immediately respond with Tickle-Ack
|
|
|
|
if _, err := p.send(p.Transport.NewTickleAckPacket()); err != nil {
|
|
|
|
if _, err := p.Send(p.Transport.NewTickleAckPacket()); err != nil {
|
|
|
|
log.Printf("AURP Peer: Couldn't send Tickle-Ack: %v", err)
|
|
|
|
log.Printf("AURP Peer: Couldn't send Tickle-Ack: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|