notify/server_peer_detach_test.go

162 lines
6.0 KiB
Go
Raw Normal View History

package notify
import (
"context"
"net"
"testing"
"time"
)
func TestClientConnReadTUMessageReadErrorDetachesBoundStreamPeer(t *testing.T) {
server := NewServer().(*ServerCommon)
left, right := net.Pipe()
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
logical, _, _ := newRegisteredServerLogicalForTest(t, server, "client-read-error-bound", left, stopCtx, stopFn)
client := clientConnFromLogical(logical)
client.markClientConnIdentityBound()
scope := serverTransportScope(client)
pending := server.getPendingWaitPool().createAndStoreWithScope(TransferMsg{ID: 17001, Type: MSG_SYNC_ASK}, scope)
fileWait := server.getFileAckPool().prepare(scope, "file-read-error-bound", "end", 0)
signalWait := server.getSignalAckPool().prepare(scope, 7001)
done := make(chan struct{})
go func() {
client.readTUMessage()
close(done)
}()
_ = right.Close()
select {
case <-done:
case <-time.After(time.Second):
t.Fatal("readTUMessage should exit after read error")
}
status := client.Status()
if !status.Alive || status.Reason != "" || status.Err != nil {
t.Fatalf("bound stream peer should stay logically alive after read error: %+v", status)
}
if got := server.GetLogicalConn(client.ClientID); got != logical {
t.Fatalf("bound stream peer should remain registered, got %+v want %+v", got, logical)
}
if client.clientConnTransportSnapshot() != nil {
t.Fatalf("bound stream peer transport should be detached, got %v", client.clientConnTransportSnapshot())
}
if client.clientConnTransportAttachedSnapshot() {
t.Fatal("bound stream peer transport should be marked detached")
}
select {
case <-client.clientConnStopContextSnapshot().Done():
t.Fatal("logical stop context should remain active after transport detach")
default:
}
if err := server.getFileAckPool().waitPrepared(fileWait, defaultFileAckTimeout); err == nil || err.Error() != "file ack canceled" {
t.Fatalf("file waiter cancel mismatch after transport detach: %v", err)
}
if err := server.getSignalAckPool().waitPrepared(signalWait, defaultSignalAckTimeout); err == nil || err.Error() != "signal ack canceled" {
t.Fatalf("signal waiter cancel mismatch after transport detach: %v", err)
}
select {
case _, ok := <-pending.Reply:
if ok {
t.Fatal("pending waiter should be canceled after transport detach")
}
default:
t.Fatal("pending waiter should be closed immediately after transport detach")
}
}
func TestServerCleanupLostHeartbeatClientsDetachesBoundStreamPeer(t *testing.T) {
server := NewServer().(*ServerCommon)
server.SetHeartbeatTimeoutSec(10)
now := time.Now().Unix()
left, right := net.Pipe()
defer right.Close()
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
logical, _, _ := newRegisteredServerLogicalForTest(t, server, "client-heartbeat-bound", left, stopCtx, stopFn)
client := clientConnFromLogical(logical)
client.markClientConnIdentityBound()
client.setClientConnLastHeartbeatUnix(now - 20)
scope := serverTransportScope(client)
pending := server.getPendingWaitPool().createAndStoreWithScope(TransferMsg{ID: 17002, Type: MSG_SYNC_ASK}, scope)
fileWait := server.getFileAckPool().prepare(scope, "file-heartbeat-bound", "end", 0)
signalWait := server.getSignalAckPool().prepare(scope, 7002)
server.cleanupLostHeartbeatClients(time.Unix(now, 0))
status := client.Status()
if !status.Alive || status.Reason != "" || status.Err != nil {
t.Fatalf("bound stream peer should stay logically alive after heartbeat timeout detach: %+v", status)
}
if got := server.GetLogicalConn(client.ClientID); got != logical {
t.Fatalf("bound stream peer should remain registered after heartbeat timeout, got %+v want %+v", got, logical)
}
if client.clientConnTransportSnapshot() != nil {
t.Fatalf("bound stream peer transport should be detached after heartbeat timeout, got %v", client.clientConnTransportSnapshot())
}
if clients := server.snapshotLostHeartbeatClients(now + 30); len(clients) != 0 {
t.Fatalf("detached bound stream peer should no longer appear in heartbeat timeout snapshot: %+v", clients)
}
if err := server.getFileAckPool().waitPrepared(fileWait, defaultFileAckTimeout); err == nil || err.Error() != "file ack canceled" {
t.Fatalf("file waiter cancel mismatch after heartbeat timeout detach: %v", err)
}
if err := server.getSignalAckPool().waitPrepared(signalWait, defaultSignalAckTimeout); err == nil || err.Error() != "signal ack canceled" {
t.Fatalf("signal waiter cancel mismatch after heartbeat timeout detach: %v", err)
}
select {
case _, ok := <-pending.Reply:
if ok {
t.Fatal("pending waiter should be canceled after heartbeat timeout detach")
}
default:
t.Fatal("pending waiter should be closed immediately after heartbeat timeout detach")
}
}
func TestServerCleanupExpiredDetachedClientsStopsBoundStreamPeer(t *testing.T) {
server := NewServer().(*ServerCommon)
server.SetDetachedClientKeepSec(10)
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
left, right := net.Pipe()
defer right.Close()
logical, _, _ := newRegisteredServerLogicalForTest(t, server, "client-detached-expired", left, stopCtx, stopFn)
client := clientConnFromLogical(logical)
client.markClientConnIdentityBound()
client.markClientConnStreamTransport()
client.setClientConnTransportDetachState(&clientConnTransportDetachState{
Reason: "read error",
Err: "boom",
At: time.Now().Add(-20 * time.Second),
})
client.clearClientConnSessionRuntimeTransport()
server.cleanupExpiredDetachedClients(time.Now())
status := client.Status()
if status.Alive {
t.Fatalf("expired detached peer should stop logically, got %+v", status)
}
if got, want := status.Reason, "detached transport expired"; got != want {
t.Fatalf("expired detached peer reason mismatch: got %q want %q", got, want)
}
if got := server.GetLogicalConn(client.ClientID); got != nil {
t.Fatalf("expired detached peer should be removed from registry, got %+v", got)
}
select {
case <-client.clientConnStopContextSnapshot().Done():
case <-time.After(time.Second):
t.Fatal("expired detached peer stop context should close")
}
}