Compare commits

...

12 commits

9 changed files with 361 additions and 139 deletions

View file

@ -62,3 +62,7 @@ sudo setcap 'CAP_NET_BIND_SERVICE=ep CAP_NET_RAW=ep' ~/go/bin/jrouter
* `NET_RAW` is needed for EtherTalk * `NET_RAW` is needed for EtherTalk
TODO: instructions for non-Linux machines TODO: instructions for non-Linux machines
## Bug reports? Feature requests? Complaints? Praise?
You can contact me on the Fediverse at @DrJosh9000@cloudisland.nz, or email me at josh.deprez@gmail.com.

View file

@ -173,9 +173,12 @@ func (e EventTuples) WriteTo(w io.Writer) (int64, error) {
} }
func parseEventTuples(p []byte) (EventTuples, error) { func parseEventTuples(p []byte) (EventTuples, error) {
// Each event tuple is at least 4 bytes, so we need to store at most // Event tuples can be 1, 4, or 6 bytes long. But the only type of length 1
// len(p)/4 of them. // is the Null event type sent to probe whether or not the data receiver is
e := make(EventTuples, 0, len(p)/4) // still listening. If that's present there probably aren't any other
// tuples. Hence len(p)/4 (rounded up) is a reasonable estimate of max tuple
// count.
e := make(EventTuples, 0, (len(p)+3)/4)
for len(p) > 0 { for len(p) > 0 {
et, nextp, err := parseEventTuple(p) et, nextp, err := parseEventTuple(p)
if err != nil { if err != nil {
@ -198,6 +201,10 @@ type EventTuple struct {
func (et *EventTuple) WriteTo(w io.Writer) (int64, error) { func (et *EventTuple) WriteTo(w io.Writer) (int64, error) {
a := acc(w) a := acc(w)
a.write8(uint8(et.EventCode)) a.write8(uint8(et.EventCode))
if et.EventCode == EventCodeNull {
// null tuple
return a.ret()
}
a.write16(uint16(et.RangeStart)) a.write16(uint16(et.RangeStart))
if !et.Extended { if !et.Extended {
// non-extended tuple // non-extended tuple
@ -211,12 +218,18 @@ func (et *EventTuple) WriteTo(w io.Writer) (int64, error) {
} }
func parseEventTuple(p []byte) (EventTuple, []byte, error) { func parseEventTuple(p []byte) (EventTuple, []byte, error) {
if len(p) < 4 { if len(p) < 1 {
return EventTuple{}, p, fmt.Errorf("insufficient input length %d for network event tuple", len(p)) return EventTuple{}, p, fmt.Errorf("insufficient input length %d for any network event tuple", len(p))
} }
var et EventTuple var et EventTuple
et.EventCode = EventCode(p[0]) et.EventCode = EventCode(p[0])
if et.EventCode == EventCodeNull {
return et, p[1:], nil
}
if len(p) < 4 {
return EventTuple{}, p, fmt.Errorf("insufficient input length %d for non-Null network event tuple", len(p))
}
et.RangeStart = ddp.Network(binary.BigEndian.Uint16(p[1:3])) et.RangeStart = ddp.Network(binary.BigEndian.Uint16(p[1:3]))
et.RangeEnd = et.RangeStart et.RangeEnd = et.RangeStart
et.Distance = p[3] et.Distance = p[3]

35
main.go
View file

@ -87,6 +87,11 @@ const peerTableTemplate = `
<th>Remote addr</th> <th>Remote addr</th>
<th>Receiver state</th> <th>Receiver state</th>
<th>Sender state</th> <th>Sender state</th>
<th>Last heard from</th>
<th>Last reconnect</th>
<th>Last update</th>
<th>Last send</th>
<th>Send retries</th>
</tr></thead> </tr></thead>
<tbody> <tbody>
{{range $peer := . }} {{range $peer := . }}
@ -95,6 +100,11 @@ const peerTableTemplate = `
<td>{{$peer.RemoteAddr}}</td> <td>{{$peer.RemoteAddr}}</td>
<td>{{$peer.ReceiverState}}</td> <td>{{$peer.ReceiverState}}</td>
<td>{{$peer.SenderState}}</td> <td>{{$peer.SenderState}}</td>
<td>{{$peer.LastHeardFromAgo}}</td>
<td>{{$peer.LastReconnectAgo}}</td>
<td>{{$peer.LastUpdateAgo}}</td>
<td>{{$peer.LastSendAgo}}</td>
<td>{{$peer.SendRetries}}</td>
</tr> </tr>
{{end}} {{end}}
</tbody> </tbody>
@ -287,18 +297,7 @@ func main() {
continue continue
} }
peer := &router.AURPPeer{ peer := router.NewAURPPeer(routes, ln, peerStr, raddr, localDI, nil, nextConnID)
Transport: &aurp.Transport{
LocalDI: localDI,
RemoteDI: aurp.IPDomainIdentifier(raddr.IP),
LocalConnID: nextConnID,
},
UDPConn: ln,
ConfiguredAddr: peerStr,
RemoteAddr: raddr,
ReceiveCh: make(chan aurp.Packet, 1024),
RouteTable: routes,
}
aurp.Inc(&nextConnID) aurp.Inc(&nextConnID)
peersMu.Lock() peersMu.Lock()
peers[udpAddrFromNet(raddr)] = peer peers[udpAddrFromNet(raddr)] = peer
@ -394,17 +393,7 @@ func main() {
continue continue
} }
// New peer! // New peer!
pr = &router.AURPPeer{ pr = router.NewAURPPeer(routes, ln, "", raddr, localDI, dh.SourceDI, nextConnID)
Transport: &aurp.Transport{
LocalDI: localDI,
RemoteDI: dh.SourceDI, // platinum rule
LocalConnID: nextConnID,
},
UDPConn: ln,
RemoteAddr: raddr,
ReceiveCh: make(chan aurp.Packet, 1024),
RouteTable: routes,
}
aurp.Inc(&nextConnID) aurp.Inc(&nextConnID)
peers[ra] = pr peers[ra] = pr
goPeerHandler(pr) goPeerHandler(pr)

View file

@ -37,6 +37,8 @@ const (
maxAMTEntryAge = 30 * time.Second maxAMTEntryAge = 30 * time.Second
aarpRequestRetransmit = 1 * time.Second aarpRequestRetransmit = 1 * time.Second
aarpRequestTimeout = 10 * time.Second aarpRequestTimeout = 10 * time.Second
aarpBodyLength = 28 // bytes
) )
const aarpStatusTemplate = ` const aarpStatusTemplate = `
@ -181,8 +183,17 @@ func (a *AARPMachine) Run(ctx context.Context) error {
a.incomingCh = nil a.incomingCh = nil
} }
// sfiera/multitalk will return an "excess data" error if the
// payload is too big. Most traffic I've seen locally does not have
// this problem, but I've seen one report with some junk trailing
// data on AARP packets.
payload := ethFrame.Payload
if len(payload) > aarpBodyLength {
payload = payload[:aarpBodyLength]
}
var aapkt aarp.Packet var aapkt aarp.Packet
if err := aarp.Unmarshal(ethFrame.Payload, &aapkt); err != nil { if err := aarp.Unmarshal(payload, &aapkt); err != nil {
log.Printf("Couldn't unmarshal AARP packet: %v", err) log.Printf("Couldn't unmarshal AARP packet: %v", err)
continue continue
} }
@ -378,10 +389,7 @@ func (e AMTEntry) Valid() bool {
// LastUpdatedAgo is a friendly string reporting how long ago the entry was // LastUpdatedAgo is a friendly string reporting how long ago the entry was
// updated/resolved. // updated/resolved.
func (e AMTEntry) LastUpdatedAgo() string { func (e AMTEntry) LastUpdatedAgo() string {
if e.LastUpdated.IsZero() { return ago(e.LastUpdated)
return "never"
}
return fmt.Sprintf("%v ago", time.Since(e.LastUpdated).Truncate(time.Millisecond))
} }
// addressMappingTable implements a concurrent-safe Address Mapping Table for // addressMappingTable implements a concurrent-safe Address Mapping Table for

View file

@ -16,6 +16,11 @@
package router package router
import (
"fmt"
"time"
)
// StringSet is a set of strings. // StringSet is a set of strings.
// Yep, yet another string set implementation. Took me 2 minutes to write *shrug* // Yep, yet another string set implementation. Took me 2 minutes to write *shrug*
type StringSet map[string]struct{} type StringSet map[string]struct{}
@ -50,3 +55,11 @@ func SetFromSlice(ss []string) StringSet {
set.Insert(ss...) set.Insert(ss...)
return set return set
} }
// ago is a helper for formatting times.
func ago(t time.Time) string {
if t.IsZero() {
return "never"
}
return fmt.Sprintf("%v ago", time.Since(t).Truncate(time.Millisecond))
}

View file

@ -114,9 +114,75 @@ type AURPPeer struct {
// Route table (the peer will add/remove/update routes and zones) // Route table (the peer will add/remove/update routes and zones)
RouteTable *RouteTable RouteTable *RouteTable
mu sync.RWMutex // Event tuples yet to be sent to this peer in an RI-Upd.
rstate ReceiverState pendingEventsMu sync.Mutex
sstate SenderState pendingEvents aurp.EventTuples
// The internal states below are only set within the Handle loop, but can
// be read concurrently from outside.
mu sync.RWMutex
rstate ReceiverState
sstate SenderState
lastReconnect time.Time
lastHeardFrom time.Time
lastSend time.Time // TODO: clarify use of lastSend / sendRetries
lastUpdate time.Time
sendRetries int
}
func NewAURPPeer(routes *RouteTable, udpConn *net.UDPConn, peerAddr string, raddr *net.UDPAddr, localDI, remoteDI aurp.DomainIdentifier, connID uint16) *AURPPeer {
if remoteDI == nil {
remoteDI = aurp.IPDomainIdentifier(raddr.IP)
}
return &AURPPeer{
Transport: &aurp.Transport{
LocalDI: localDI,
RemoteDI: remoteDI,
LocalConnID: connID,
},
UDPConn: udpConn,
ConfiguredAddr: peerAddr,
RemoteAddr: raddr,
ReceiveCh: make(chan aurp.Packet, 1024),
RouteTable: routes,
}
}
func (p *AURPPeer) addPendingEvent(ec aurp.EventCode, route *Route) {
// Don't advertise routes to AURP peers to other AURP peers
if route.AURPPeer != nil {
return
}
et := aurp.EventTuple{
EventCode: ec,
Extended: route.Extended,
RangeStart: route.NetStart,
Distance: route.Distance,
RangeEnd: route.NetEnd,
}
switch ec {
case aurp.EventCodeND, aurp.EventCodeNRC:
et.Distance = 0 // "The distance field does not apply to ND or NRC event tuples and should be set to 0."
}
p.pendingEventsMu.Lock()
defer p.pendingEventsMu.Unlock()
p.pendingEvents = append(p.pendingEvents, et)
}
func (p *AURPPeer) RouteAdded(route *Route) {
p.addPendingEvent(aurp.EventCodeNA, route)
}
func (p *AURPPeer) RouteDeleted(route *Route) {
p.addPendingEvent(aurp.EventCodeND, route)
}
func (p *AURPPeer) RouteDistanceChanged(route *Route) {
p.addPendingEvent(aurp.EventCodeNDC, route)
}
func (p *AURPPeer) RouteForwarderChanged(route *Route) {
p.addPendingEvent(aurp.EventCodeNRC, route)
} }
func (p *AURPPeer) Forward(ddpkt *ddp.ExtPacket) error { func (p *AURPPeer) Forward(ddpkt *ddp.ExtPacket) error {
@ -124,7 +190,7 @@ func (p *AURPPeer) Forward(ddpkt *ddp.ExtPacket) error {
if err != nil { if err != nil {
return err return err
} }
_, err = p.Send(p.Transport.NewAppleTalkPacket(outPkt)) _, err = p.send(p.Transport.NewAppleTalkPacket(outPkt))
return err return err
} }
@ -140,6 +206,36 @@ func (p *AURPPeer) SenderState() SenderState {
return p.sstate return p.sstate
} }
func (p *AURPPeer) LastReconnectAgo() string {
p.mu.RLock()
defer p.mu.RUnlock()
return ago(p.lastReconnect)
}
func (p *AURPPeer) LastHeardFromAgo() string {
p.mu.RLock()
defer p.mu.RUnlock()
return ago(p.lastHeardFrom)
}
func (p *AURPPeer) LastSendAgo() string {
p.mu.RLock()
defer p.mu.RUnlock()
return ago(p.lastSend)
}
func (p *AURPPeer) LastUpdateAgo() string {
p.mu.RLock()
defer p.mu.RUnlock()
return ago(p.lastUpdate)
}
func (p *AURPPeer) SendRetries() int {
p.mu.RLock()
defer p.mu.RUnlock()
return p.sendRetries
}
func (p *AURPPeer) setRState(rstate ReceiverState) { func (p *AURPPeer) setRState(rstate ReceiverState) {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
@ -152,6 +248,42 @@ func (p *AURPPeer) setSState(sstate SenderState) {
p.sstate = sstate p.sstate = sstate
} }
func (p *AURPPeer) incSendRetries() {
p.mu.Lock()
defer p.mu.Unlock()
p.sendRetries++
}
func (p *AURPPeer) resetSendRetries() {
p.mu.Lock()
defer p.mu.Unlock()
p.sendRetries = 0
}
func (p *AURPPeer) bumpLastHeardFrom() {
p.mu.Lock()
defer p.mu.Unlock()
p.lastHeardFrom = time.Now()
}
func (p *AURPPeer) bumpLastReconnect() {
p.mu.Lock()
defer p.mu.Unlock()
p.lastReconnect = time.Now()
}
func (p *AURPPeer) bumpLastSend() {
p.mu.Lock()
defer p.mu.Unlock()
p.lastSend = time.Now()
}
func (p *AURPPeer) bumpLastUpdate() {
p.mu.Lock()
defer p.mu.Unlock()
p.lastUpdate = time.Now()
}
func (p *AURPPeer) disconnect() { func (p *AURPPeer) disconnect() {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
@ -159,8 +291,8 @@ func (p *AURPPeer) disconnect() {
p.sstate = SenderUnconnected p.sstate = SenderUnconnected
} }
// Send encodes and sends pkt to the remote host. // send encodes and sends pkt to the remote host.
func (p *AURPPeer) Send(pkt aurp.Packet) (int, error) { func (p *AURPPeer) send(pkt aurp.Packet) (int, error) {
var b bytes.Buffer var b bytes.Buffer
if _, err := pkt.WriteTo(&b); err != nil { if _, err := pkt.WriteTo(&b); err != nil {
return 0, err return 0, err
@ -170,23 +302,28 @@ func (p *AURPPeer) Send(pkt aurp.Packet) (int, error) {
} }
func (p *AURPPeer) Handle(ctx context.Context) error { func (p *AURPPeer) Handle(ctx context.Context) error {
// Stop listening to events if the goroutine exits
defer p.RouteTable.RemoveObserver(p)
rticker := time.NewTicker(1 * time.Second) rticker := time.NewTicker(1 * time.Second)
defer rticker.Stop() defer rticker.Stop()
sticker := time.NewTicker(1 * time.Second) sticker := time.NewTicker(1 * time.Second)
defer sticker.Stop() defer sticker.Stop()
lastReconnect := time.Now() p.mu.Lock()
lastHeardFrom := time.Now() p.lastReconnect = time.Now()
lastSend := time.Now() // TODO: clarify use of lastSend / sendRetries p.lastHeardFrom = time.Now()
lastUpdate := time.Now() p.lastSend = time.Now() // TODO: clarify use of lastSend / sendRetries
sendRetries := 0 p.lastUpdate = time.Now()
p.sendRetries = 0
p.mu.Unlock()
var lastRISent aurp.Packet var lastRISent aurp.Packet
p.disconnect() p.disconnect()
// Write an Open-Req packet // Write an Open-Req packet
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil { if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err) log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
return err return err
} }
@ -202,7 +339,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
} }
// Send a best-effort Router Down before returning // Send a best-effort Router Down before returning
lastRISent = p.Transport.NewRDPacket(aurp.ErrCodeNormalClose) lastRISent = p.Transport.NewRDPacket(aurp.ErrCodeNormalClose)
if _, err := p.Send(lastRISent); err != nil { if _, err := p.send(lastRISent); err != nil {
log.Printf("Couldn't send RD packet: %v", err) log.Printf("Couldn't send RD packet: %v", err)
} }
return ctx.Err() return ctx.Err()
@ -210,60 +347,60 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
case <-rticker.C: case <-rticker.C:
switch p.rstate { switch p.rstate {
case ReceiverWaitForOpenRsp: case ReceiverWaitForOpenRsp:
if time.Since(lastSend) <= sendRetryTimer { if time.Since(p.lastSend) <= sendRetryTimer {
break break
} }
if sendRetries >= sendRetryLimit { if p.sendRetries >= sendRetryLimit {
log.Printf("AURP Peer: Send retry limit reached while waiting for Open-Rsp, closing connection") log.Printf("AURP Peer: Send retry limit reached while waiting for Open-Rsp, closing connection")
p.setRState(ReceiverUnconnected) p.setRState(ReceiverUnconnected)
break break
} }
// Send another Open-Req // Send another Open-Req
sendRetries++ p.incSendRetries()
lastSend = time.Now() p.bumpLastSend()
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil { if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err) log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
return err return err
} }
case ReceiverConnected: case ReceiverConnected:
// Check LHFT, send tickle? // Check LHFT, send tickle?
if time.Since(lastHeardFrom) <= lastHeardFromTimer { if time.Since(p.lastHeardFrom) <= lastHeardFromTimer {
break break
} }
if _, err := p.Send(p.Transport.NewTicklePacket()); err != nil { if _, err := p.send(p.Transport.NewTicklePacket()); err != nil {
log.Printf("AURP Peer: Couldn't send Tickle: %v", err) log.Printf("AURP Peer: Couldn't send Tickle: %v", err)
return err return err
} }
p.setRState(ReceiverWaitForTickleAck) p.setRState(ReceiverWaitForTickleAck)
sendRetries = 0 p.resetSendRetries()
lastSend = time.Now() p.bumpLastSend()
case ReceiverWaitForTickleAck: case ReceiverWaitForTickleAck:
if time.Since(lastSend) <= sendRetryTimer { if time.Since(p.lastSend) <= sendRetryTimer {
break break
} }
if sendRetries >= tickleRetryLimit { if p.sendRetries >= tickleRetryLimit {
log.Printf("AURP Peer: Send retry limit reached while waiting for Tickle-Ack, closing connection") log.Printf("AURP Peer: Send retry limit reached while waiting for Tickle-Ack, closing connection")
p.setRState(ReceiverUnconnected) p.setRState(ReceiverUnconnected)
p.RouteTable.DeleteAURPPeer(p) p.RouteTable.DeleteAURPPeer(p)
break break
} }
sendRetries++ p.incSendRetries()
lastSend = time.Now() p.bumpLastSend()
if _, err := p.Send(p.Transport.NewTicklePacket()); err != nil { if _, err := p.send(p.Transport.NewTicklePacket()); err != nil {
log.Printf("AURP Peer: Couldn't send Tickle: %v", err) log.Printf("AURP Peer: Couldn't send Tickle: %v", err)
return err return err
} }
// still in Wait For Tickle-Ack // still in Wait For Tickle-Ack
case ReceiverWaitForRIRsp: case ReceiverWaitForRIRsp:
if time.Since(lastSend) <= sendRetryTimer { if time.Since(p.lastSend) <= sendRetryTimer {
break break
} }
if sendRetries >= sendRetryLimit { if p.sendRetries >= sendRetryLimit {
log.Printf("AURP Peer: Send retry limit reached while waiting for RI-Rsp, closing connection") log.Printf("AURP Peer: Send retry limit reached while waiting for RI-Rsp, closing connection")
p.setRState(ReceiverUnconnected) p.setRState(ReceiverUnconnected)
p.RouteTable.DeleteAURPPeer(p) p.RouteTable.DeleteAURPPeer(p)
@ -272,8 +409,9 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
// RI-Req is stateless, so we don't need to cache the one we // RI-Req is stateless, so we don't need to cache the one we
// sent earlier just to send it again // sent earlier just to send it again
sendRetries++ p.incSendRetries()
if _, err := p.Send(p.Transport.NewRIReqPacket()); err != nil { p.bumpLastSend()
if _, err := p.send(p.Transport.NewRIReqPacket()); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err) log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err)
return err return err
} }
@ -282,18 +420,18 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
case ReceiverUnconnected: case ReceiverUnconnected:
// Data receiver is unconnected. If data sender is connected, // Data receiver is unconnected. If data sender is connected,
// send a null RI-Upd to check if the sender is also unconnected // send a null RI-Upd to check if the sender is also unconnected
if p.sstate == SenderConnected && time.Since(lastSend) > sendRetryTimer { if p.sstate == SenderConnected && time.Since(p.lastSend) > sendRetryTimer {
if sendRetries >= sendRetryLimit { if p.sendRetries >= sendRetryLimit {
log.Printf("AURP Peer: Send retry limit reached while probing sender connect, closing connection") log.Printf("AURP Peer: Send retry limit reached while probing sender connect, closing connection")
} }
sendRetries++ p.incSendRetries()
lastSend = time.Now() p.bumpLastSend()
aurp.Inc(&p.Transport.LocalSeq) aurp.Inc(&p.Transport.LocalSeq)
events := aurp.EventTuples{{ events := aurp.EventTuples{{
EventCode: aurp.EventCodeNull, EventCode: aurp.EventCodeNull,
}} }}
lastRISent = p.Transport.NewRIUpdPacket(events) lastRISent = p.Transport.NewRIUpdPacket(events)
if _, err := p.Send(lastRISent); err != nil { if _, err := p.send(lastRISent); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Upd packet: %v", err) log.Printf("AURP Peer: Couldn't send RI-Upd packet: %v", err)
return err return err
} }
@ -302,7 +440,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
if p.ConfiguredAddr != "" { if p.ConfiguredAddr != "" {
// Periodically try to reconnect, if this peer is in the config file // Periodically try to reconnect, if this peer is in the config file
if time.Since(lastReconnect) <= reconnectTimer { if time.Since(p.lastReconnect) <= reconnectTimer {
break break
} }
@ -315,10 +453,10 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
log.Printf("AURP Peer: resolved %q to %v", p.ConfiguredAddr, raddr) log.Printf("AURP Peer: resolved %q to %v", p.ConfiguredAddr, raddr)
p.RemoteAddr = raddr p.RemoteAddr = raddr
lastReconnect = time.Now() p.bumpLastReconnect()
sendRetries = 0 p.resetSendRetries()
lastSend = time.Now() p.bumpLastSend()
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil { if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err) log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
return err return err
} }
@ -332,40 +470,64 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
// Do nothing // Do nothing
case SenderConnected: case SenderConnected:
if time.Since(lastUpdate) <= updateTimer { if time.Since(p.lastUpdate) <= updateTimer {
break break
} }
// TODO: is there a routing update to send?
// Are there routing updates to send?
p.pendingEventsMu.Lock()
if len(p.pendingEvents) == 0 {
p.pendingEventsMu.Unlock()
break
}
// Yes - swap the slices, release the mutex, then send them
pending := p.pendingEvents
p.pendingEvents = make(aurp.EventTuples, 0, cap(pending))
p.pendingEventsMu.Unlock()
// TODO: eliminate events that cancel out (e.g. NA then ND)
// TODO: split pending events to fit within a packet
p.bumpLastUpdate()
aurp.Inc(&p.Transport.LocalSeq)
lastRISent = p.Transport.NewRIUpdPacket(pending)
if _, err := p.send(lastRISent); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Upd packet: %v", err)
return err
}
p.setSState(SenderWaitForRIUpdAck)
case SenderWaitForRIRspAck, SenderWaitForRIUpdAck: case SenderWaitForRIRspAck, SenderWaitForRIUpdAck:
if time.Since(lastSend) <= sendRetryTimer { if time.Since(p.lastSend) <= sendRetryTimer {
break break
} }
if lastRISent == nil { if lastRISent == nil {
log.Print("AURP Peer: sender retry: lastRISent = nil?") log.Print("AURP Peer: sender retry: lastRISent = nil?")
continue continue
} }
if sendRetries >= sendRetryLimit { if p.sendRetries >= sendRetryLimit {
log.Printf("AURP Peer: Send retry limit reached, closing connection") log.Printf("AURP Peer: Send retry limit reached, closing connection")
p.setSState(SenderUnconnected) p.setSState(SenderUnconnected)
p.RouteTable.RemoveObserver(p)
continue continue
} }
sendRetries++ p.incSendRetries()
lastSend = time.Now() p.bumpLastSend()
if _, err := p.Send(lastRISent); err != nil { if _, err := p.send(lastRISent); err != nil {
log.Printf("AURP Peer: Couldn't re-send %T: %v", lastRISent, err) log.Printf("AURP Peer: Couldn't re-send %T: %v", lastRISent, err)
return err return err
} }
case SenderWaitForRDAck: case SenderWaitForRDAck:
if time.Since(lastSend) <= sendRetryTimer { if time.Since(p.lastSend) <= sendRetryTimer {
break break
} }
p.setSState(SenderUnconnected) p.setSState(SenderUnconnected)
p.RouteTable.RemoveObserver(p)
} }
case pkt := <-p.ReceiveCh: case pkt := <-p.ReceiveCh:
lastHeardFrom = time.Now() p.bumpLastHeardFrom()
switch pkt := pkt.(type) { switch pkt := pkt.(type) {
case *aurp.OpenReqPacket: case *aurp.OpenReqPacket:
@ -392,19 +554,21 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
orsp = p.Transport.NewOpenRspPacket(0, 1, nil) orsp = p.Transport.NewOpenRspPacket(0, 1, nil)
} }
if _, err := p.Send(orsp); err != nil { if _, err := p.send(orsp); err != nil {
log.Printf("AURP Peer: Couldn't send Open-Rsp: %v", err) log.Printf("AURP Peer: Couldn't send Open-Rsp: %v", err)
return err return err
} }
if orsp.RateOrErrCode >= 0 { if orsp.RateOrErrCode >= 0 {
// Data sender is successfully in connected state
p.setSState(SenderConnected) p.setSState(SenderConnected)
p.RouteTable.AddObserver(p)
} }
// If receiver is unconnected, commence connecting // If receiver is unconnected, commence connecting
if p.rstate == ReceiverUnconnected { if p.rstate == ReceiverUnconnected {
lastSend = time.Now() p.resetSendRetries()
sendRetries = 0 p.bumpLastSend()
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil { if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err) log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
return err return err
} }
@ -425,8 +589,8 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
p.setRState(ReceiverConnected) p.setRState(ReceiverConnected)
// Send an RI-Req // Send an RI-Req
sendRetries = 0 p.resetSendRetries()
if _, err := p.Send(p.Transport.NewRIReqPacket()); err != nil { if _, err := p.send(p.Transport.NewRIReqPacket()); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err) log.Printf("AURP Peer: Couldn't send RI-Req packet: %v", err)
return err return err
} }
@ -449,7 +613,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
p.Transport.LocalSeq = 1 p.Transport.LocalSeq = 1
// TODO: Split tuples across multiple packets as required // TODO: Split tuples across multiple packets as required
lastRISent = p.Transport.NewRIRspPacket(aurp.RoutingFlagLast, nets) lastRISent = p.Transport.NewRIRspPacket(aurp.RoutingFlagLast, nets)
if _, err := p.Send(lastRISent); err != nil { if _, err := p.send(lastRISent); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Rsp packet: %v", err) log.Printf("AURP Peer: Couldn't send RI-Rsp packet: %v", err)
return err return err
} }
@ -474,7 +638,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
// TODO: track which networks we don't have zone info for, and // TODO: track which networks we don't have zone info for, and
// only set SZI for those ? // only set SZI for those ?
if _, err := p.Send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, aurp.RoutingFlagSendZoneInfo)); err != nil { if _, err := p.send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, aurp.RoutingFlagSendZoneInfo)); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Ack packet: %v", err) log.Printf("AURP Peer: Couldn't send RI-Ack packet: %v", err)
return err return err
} }
@ -500,7 +664,8 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
} }
p.setSState(SenderConnected) p.setSState(SenderConnected)
sendRetries = 0 p.resetSendRetries()
p.RouteTable.AddObserver(p)
// If SZI flag is set, send ZI-Rsp (transaction) // If SZI flag is set, send ZI-Rsp (transaction)
if pkt.Flags&aurp.RoutingFlagSendZoneInfo != 0 { if pkt.Flags&aurp.RoutingFlagSendZoneInfo != 0 {
@ -525,7 +690,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
} }
zones := p.RouteTable.ZonesForNetworks(nets) zones := p.RouteTable.ZonesForNetworks(nets)
// TODO: split ZI-Rsp packets similarly to ZIP Replies // TODO: split ZI-Rsp packets similarly to ZIP Replies
if _, err := p.Send(p.Transport.NewZIRspPacket(zones)); err != nil { if _, err := p.send(p.Transport.NewZIRspPacket(zones)); err != nil {
log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err) log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err)
} }
} }
@ -536,9 +701,9 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
// Receiver is unconnected, but their receiver sent us an // Receiver is unconnected, but their receiver sent us an
// RI-Ack for something // RI-Ack for something
// Try to reconnect? // Try to reconnect?
lastSend = time.Now() p.resetSendRetries()
sendRetries = 0 p.bumpLastSend()
if _, err := p.Send(p.Transport.NewOpenReqPacket(nil)); err != nil { if _, err := p.send(p.Transport.NewOpenReqPacket(nil)); err != nil {
log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err) log.Printf("AURP Peer: Couldn't send Open-Req packet: %v", err)
return err return err
} }
@ -546,7 +711,6 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
} }
case *aurp.RIUpdPacket: case *aurp.RIUpdPacket:
var ackFlag aurp.RoutingFlag var ackFlag aurp.RoutingFlag
for _, et := range pkt.Events { for _, et := range pkt.Events {
@ -586,7 +750,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
} }
} }
if _, err := p.Send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, ackFlag)); err != nil { if _, err := p.send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, ackFlag)); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err) log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err)
return err return err
} }
@ -600,7 +764,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
p.RouteTable.DeleteAURPPeer(p) p.RouteTable.DeleteAURPPeer(p)
// Respond with RI-Ack // Respond with RI-Ack
if _, err := p.Send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, 0)); err != nil { if _, err := p.send(p.Transport.NewRIAckPacket(pkt.ConnectionID, pkt.Sequence, 0)); err != nil {
log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err) log.Printf("AURP Peer: Couldn't send RI-Ack: %v", err)
return err return err
} }
@ -610,7 +774,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
case *aurp.ZIReqPacket: case *aurp.ZIReqPacket:
// TODO: split ZI-Rsp packets similarly to ZIP Replies // TODO: split ZI-Rsp packets similarly to ZIP Replies
zones := p.RouteTable.ZonesForNetworks(pkt.Networks) zones := p.RouteTable.ZonesForNetworks(pkt.Networks)
if _, err := p.Send(p.Transport.NewZIRspPacket(zones)); err != nil { if _, err := p.send(p.Transport.NewZIRspPacket(zones)); err != nil {
log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err) log.Printf("AURP Peer: Couldn't send ZI-Rsp packet: %v", err)
return err return err
} }
@ -622,7 +786,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
} }
case *aurp.GDZLReqPacket: case *aurp.GDZLReqPacket:
if _, err := p.Send(p.Transport.NewGDZLRspPacket(-1, nil)); err != nil { if _, err := p.send(p.Transport.NewGDZLRspPacket(-1, nil)); err != nil {
log.Printf("AURP Peer: Couldn't send GDZL-Rsp packet: %v", err) log.Printf("AURP Peer: Couldn't send GDZL-Rsp packet: %v", err)
return err return err
} }
@ -631,7 +795,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
log.Printf("AURP Peer: Received a GDZL-Rsp, but I wouldn't have sent a GDZL-Req - that's weird") log.Printf("AURP Peer: Received a GDZL-Rsp, but I wouldn't have sent a GDZL-Req - that's weird")
case *aurp.GZNReqPacket: case *aurp.GZNReqPacket:
if _, err := p.Send(p.Transport.NewGZNRspPacket(pkt.ZoneName, false, nil)); err != nil { if _, err := p.send(p.Transport.NewGZNRspPacket(pkt.ZoneName, false, nil)); err != nil {
log.Printf("AURP Peer: Couldn't send GZN-Rsp packet: %v", err) log.Printf("AURP Peer: Couldn't send GZN-Rsp packet: %v", err)
return err return err
} }
@ -641,7 +805,7 @@ func (p *AURPPeer) Handle(ctx context.Context) error {
case *aurp.TicklePacket: case *aurp.TicklePacket:
// Immediately respond with Tickle-Ack // Immediately respond with Tickle-Ack
if _, err := p.Send(p.Transport.NewTickleAckPacket()); err != nil { if _, err := p.send(p.Transport.NewTickleAckPacket()); err != nil {
log.Printf("AURP Peer: Couldn't send Tickle-Ack: %v", err) log.Printf("AURP Peer: Couldn't send Tickle-Ack: %v", err)
return err return err
} }

View file

@ -18,6 +18,7 @@ package router
import ( import (
"context" "context"
"encoding/binary"
"errors" "errors"
"io" "io"
"log" "log"
@ -79,8 +80,18 @@ func (port *EtherTalkPort) Serve(ctx context.Context) {
case ethertalk.AppleTalkProto: case ethertalk.AppleTalkProto:
// log.Print("Got an AppleTalk frame") // log.Print("Got an AppleTalk frame")
// Workaround for strict length checking in sfiera/multitalk
payload := ethFrame.Payload
if len(payload) < 2 {
log.Printf("Couldn't unmarshal DDP packet: too small (length = %d)", len(payload))
}
if size := binary.BigEndian.Uint16(payload[:2]) & 0x3ff; len(payload) > int(size) {
payload = payload[:size]
}
ddpkt := new(ddp.ExtPacket) ddpkt := new(ddp.ExtPacket)
if err := ddp.ExtUnmarshal(ethFrame.Payload, ddpkt); err != nil { if err := ddp.ExtUnmarshal(payload, ddpkt); err != nil {
log.Printf("Couldn't unmarshal DDP packet: %v", err) log.Printf("Couldn't unmarshal DDP packet: %v", err)
continue continue
} }

View file

@ -45,10 +45,7 @@ type Route struct {
} }
func (r Route) LastSeenAgo() string { func (r Route) LastSeenAgo() string {
if r.LastSeen.IsZero() { return ago(r.LastSeen)
return "never"
}
return fmt.Sprintf("%v ago", time.Since(r.LastSeen).Truncate(time.Millisecond))
} }
// Valid reports whether the route is valid. // Valid reports whether the route is valid.
@ -58,17 +55,40 @@ func (r *Route) Valid() bool {
return len(r.ZoneNames) > 0 && (r.EtherTalkPeer == nil || time.Since(r.LastSeen) <= maxRouteAge) return len(r.ZoneNames) > 0 && (r.EtherTalkPeer == nil || time.Since(r.LastSeen) <= maxRouteAge)
} }
type RouteTableObserver interface {
RouteAdded(*Route)
RouteDeleted(*Route)
RouteDistanceChanged(*Route)
RouteForwarderChanged(*Route)
}
type RouteTable struct { type RouteTable struct {
mu sync.Mutex routesMu sync.RWMutex
routes map[*Route]struct{} routes map[*Route]struct{}
observersMu sync.RWMutex
observers map[RouteTableObserver]struct{}
} }
func NewRouteTable() *RouteTable { func NewRouteTable() *RouteTable {
return &RouteTable{ return &RouteTable{
routes: make(map[*Route]struct{}), routes: make(map[*Route]struct{}),
observers: make(map[RouteTableObserver]struct{}),
} }
} }
func (rt *RouteTable) AddObserver(obs RouteTableObserver) {
rt.observersMu.Lock()
defer rt.observersMu.Unlock()
rt.observers[obs] = struct{}{}
}
func (rt *RouteTable) RemoveObserver(obs RouteTableObserver) {
rt.observersMu.Lock()
defer rt.observersMu.Unlock()
delete(rt.observers, obs)
}
func (rt *RouteTable) InsertEtherTalkDirect(port *EtherTalkPort) { func (rt *RouteTable) InsertEtherTalkDirect(port *EtherTalkPort) {
r := &Route{ r := &Route{
Extended: true, Extended: true,
@ -80,14 +100,14 @@ func (rt *RouteTable) InsertEtherTalkDirect(port *EtherTalkPort) {
EtherTalkDirect: port, EtherTalkDirect: port,
} }
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
rt.routes[r] = struct{}{} rt.routes[r] = struct{}{}
} }
func (rt *RouteTable) Dump() []Route { func (rt *RouteTable) Dump() []Route {
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
table := make([]Route, 0, len(rt.routes)) table := make([]Route, 0, len(rt.routes))
for r := range rt.routes { for r := range rt.routes {
@ -97,8 +117,8 @@ func (rt *RouteTable) Dump() []Route {
} }
func (rt *RouteTable) LookupRoute(network ddp.Network) *Route { func (rt *RouteTable) LookupRoute(network ddp.Network) *Route {
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
var bestRoute *Route var bestRoute *Route
for r := range rt.routes { for r := range rt.routes {
@ -120,8 +140,8 @@ func (rt *RouteTable) LookupRoute(network ddp.Network) *Route {
} }
func (rt *RouteTable) DeleteAURPPeer(peer *AURPPeer) { func (rt *RouteTable) DeleteAURPPeer(peer *AURPPeer) {
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
for route := range rt.routes { for route := range rt.routes {
if route.AURPPeer == peer { if route.AURPPeer == peer {
@ -131,8 +151,8 @@ func (rt *RouteTable) DeleteAURPPeer(peer *AURPPeer) {
} }
func (rt *RouteTable) DeleteAURPPeerNetwork(peer *AURPPeer, network ddp.Network) { func (rt *RouteTable) DeleteAURPPeerNetwork(peer *AURPPeer, network ddp.Network) {
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
for route := range rt.routes { for route := range rt.routes {
if route.AURPPeer == peer && route.NetStart == network { if route.AURPPeer == peer && route.NetStart == network {
@ -142,8 +162,8 @@ func (rt *RouteTable) DeleteAURPPeerNetwork(peer *AURPPeer, network ddp.Network)
} }
func (rt *RouteTable) UpdateAURPRouteDistance(peer *AURPPeer, network ddp.Network, distance uint8) { func (rt *RouteTable) UpdateAURPRouteDistance(peer *AURPPeer, network ddp.Network, distance uint8) {
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
for route := range rt.routes { for route := range rt.routes {
if route.AURPPeer == peer && route.NetStart == network { if route.AURPPeer == peer && route.NetStart == network {
@ -161,8 +181,8 @@ func (rt *RouteTable) UpsertEtherTalkRoute(peer *EtherTalkPeer, extended bool, n
return nil, fmt.Errorf("invalid network range [%d, %d] for nonextended network", netStart, netEnd) return nil, fmt.Errorf("invalid network range [%d, %d] for nonextended network", netStart, netEnd)
} }
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
// Update? // Update?
for r := range rt.routes { for r := range rt.routes {
@ -213,16 +233,16 @@ func (rt *RouteTable) InsertAURPRoute(peer *AURPPeer, extended bool, netStart, n
AURPPeer: peer, AURPPeer: peer,
} }
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
rt.routes[r] = struct{}{} rt.routes[r] = struct{}{}
return nil return nil
} }
// ValidRoutes returns all valid routes. // ValidRoutes returns all valid routes.
func (rt *RouteTable) ValidRoutes() []*Route { func (rt *RouteTable) ValidRoutes() []*Route {
rt.mu.Lock() rt.routesMu.RLock()
defer rt.mu.Unlock() defer rt.routesMu.RUnlock()
valid := make([]*Route, 0, len(rt.routes)) valid := make([]*Route, 0, len(rt.routes))
for r := range rt.routes { for r := range rt.routes {
if r.Valid() { if r.Valid() {
@ -234,8 +254,8 @@ func (rt *RouteTable) ValidRoutes() []*Route {
// ValidNonAURPRoutes returns all valid routes that were not learned via AURP. // ValidNonAURPRoutes returns all valid routes that were not learned via AURP.
func (rt *RouteTable) ValidNonAURPRoutes() []*Route { func (rt *RouteTable) ValidNonAURPRoutes() []*Route {
rt.mu.Lock() rt.routesMu.RLock()
defer rt.mu.Unlock() defer rt.routesMu.RUnlock()
valid := make([]*Route, 0, len(rt.routes)) valid := make([]*Route, 0, len(rt.routes))
for r := range rt.routes { for r := range rt.routes {
if r.AURPPeer != nil { if r.AURPPeer != nil {

View file

@ -23,8 +23,8 @@ import (
) )
func (rt *RouteTable) AddZonesToNetwork(n ddp.Network, zs ...string) { func (rt *RouteTable) AddZonesToNetwork(n ddp.Network, zs ...string) {
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
for r := range rt.routes { for r := range rt.routes {
if n < r.NetStart || n > r.NetEnd { if n < r.NetStart || n > r.NetEnd {
continue continue
@ -39,8 +39,8 @@ func (rt *RouteTable) AddZonesToNetwork(n ddp.Network, zs ...string) {
func (rt *RouteTable) ZonesForNetworks(ns []ddp.Network) map[ddp.Network][]string { func (rt *RouteTable) ZonesForNetworks(ns []ddp.Network) map[ddp.Network][]string {
zs := make(map[ddp.Network][]string) zs := make(map[ddp.Network][]string)
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
for r := range rt.routes { for r := range rt.routes {
if !r.Valid() { if !r.Valid() {
continue continue
@ -55,8 +55,8 @@ func (rt *RouteTable) ZonesForNetworks(ns []ddp.Network) map[ddp.Network][]strin
} }
func (rt *RouteTable) RoutesForZone(zone string) []*Route { func (rt *RouteTable) RoutesForZone(zone string) []*Route {
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
var routes []*Route var routes []*Route
for r := range rt.routes { for r := range rt.routes {
@ -73,8 +73,8 @@ func (rt *RouteTable) RoutesForZone(zone string) []*Route {
func (rt *RouteTable) AllZoneNames() (zones []string) { func (rt *RouteTable) AllZoneNames() (zones []string) {
defer slices.Sort(zones) defer slices.Sort(zones)
rt.mu.Lock() rt.routesMu.Lock()
defer rt.mu.Unlock() defer rt.routesMu.Unlock()
zs := make(StringSet) zs := make(StringSet)
for r := range rt.routes { for r := range rt.routes {