notify/client_conn_session_test.go

462 lines
14 KiB
Go
Raw Normal View History

package notify
import (
"b612.me/stario"
"bytes"
"context"
"errors"
"fmt"
"io"
"net"
"testing"
"time"
)
func TestClientConnReadTUMessagePreservesServerStopReason(t *testing.T) {
server := NewServer().(*ServerCommon)
left, right := net.Pipe()
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
client, _, _ := newRegisteredServerClientForTest(t, server, "client-stop", left, stopCtx, stopFn)
done := make(chan struct{})
go func() {
client.readTUMessage()
close(done)
}()
server.stopClientSession(client, "recv stop signal from server", nil)
_ = right.Close()
select {
case <-done:
case <-time.After(time.Second):
t.Fatal("readTUMessage should exit after server stop")
}
if status := client.Status(); status.Alive || status.Reason != "recv stop signal from server" || status.Err != nil {
t.Fatalf("unexpected status after server stop: %+v", status)
}
if got := server.GetLogicalConn(client.ClientID); got != nil {
t.Fatalf("logical should be removed after server stop, got %+v", got)
}
}
func TestClientConnReadTUMessageReadErrorStopsAndRemovesClient(t *testing.T) {
server := NewServer().(*ServerCommon)
left, right := net.Pipe()
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
client, _, _ := newRegisteredServerClientForTest(t, server, "client-read-error", left, stopCtx, stopFn)
done := make(chan struct{})
go func() {
client.readTUMessage()
close(done)
}()
_ = right.Close()
select {
case <-done:
case <-time.After(time.Second):
t.Fatal("readTUMessage should exit after read error")
}
status := client.Status()
if status.Alive || status.Reason != "read error" || status.Err == nil {
t.Fatalf("unexpected status after read error: %+v", status)
}
if got := server.GetLogicalConn(client.ClientID); got != nil {
t.Fatalf("logical should be removed after read error, got %+v", got)
}
}
func TestClientConnMarkSessionStoppedUsesRuntimeStopFn(t *testing.T) {
client := &ClientConn{}
client.markSessionStarted()
runtimeCtx, runtimeCancel := context.WithCancel(context.Background())
defer runtimeCancel()
client.setClientConnSessionRuntime(&clientConnSessionRuntime{
stopCtx: runtimeCtx,
stopFn: runtimeCancel,
})
client.markSessionStopped("runtime stop", nil)
select {
case <-runtimeCtx.Done():
case <-time.After(time.Second):
t.Fatal("runtime stop context should be canceled by markSessionStopped")
}
}
func TestClientConnDetachServerOwnedSessionClearsRuntimeTransport(t *testing.T) {
client := &ClientConn{}
left, right := net.Pipe()
defer right.Close()
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
client.startClientConnSession(left, stopCtx, stopFn)
client.detachServerOwnedSession()
if got := client.clientConnTransportSnapshot(); got != nil {
t.Fatalf("runtime transport should be cleared after detach, got %v", got)
}
if got := client.clientConnStopContextSnapshot(); got != stopCtx {
t.Fatalf("runtime stop context should be preserved after detach, got %v want %v", got, stopCtx)
}
}
func TestClientConnReadFromTUTransportUsesRuntimeConn(t *testing.T) {
client := &ClientConn{}
runtimeLeft, runtimeRight := net.Pipe()
defer runtimeLeft.Close()
defer runtimeRight.Close()
runtimeCtx, runtimeCancel := context.WithCancel(context.Background())
defer runtimeCancel()
client.setClientConnSessionRuntime(&clientConnSessionRuntime{
tuConn: runtimeLeft,
stopCtx: runtimeCtx,
stopFn: runtimeCancel,
})
payload := []byte("runtime-tu-conn")
writeDone := make(chan error, 1)
go func() {
_, err := runtimeRight.Write(payload)
writeDone <- err
}()
num, data, err := client.readFromTUTransport()
if err != nil {
t.Fatalf("readFromTUTransport failed: %v", err)
}
if got, want := string(data[:num]), string(payload); got != want {
t.Fatalf("payload mismatch: got %q want %q", got, want)
}
select {
case err := <-writeDone:
if err != nil {
t.Fatalf("runtime writer failed: %v", err)
}
case <-time.After(time.Second):
t.Fatal("runtime writer did not finish")
}
}
func TestStartClientConnSessionInitializesDefaultRuntime(t *testing.T) {
client := &ClientConn{}
left, right := net.Pipe()
defer left.Close()
defer right.Close()
stopCtx, stopFn := client.startClientConnSession(left, nil, nil)
defer stopFn()
if !client.Status().Alive {
t.Fatalf("client should start alive: %+v", client.Status())
}
if stopCtx == nil || stopFn == nil {
t.Fatal("startClientConnSession should initialize default stop context")
}
if got := client.clientConnTransportSnapshot(); got != left {
t.Fatal("runtime transport snapshot should match passed conn")
}
if got := client.clientConnStopContextSnapshot(); got != stopCtx {
t.Fatal("runtime stop context snapshot should match returned context")
}
if got := client.clientConnStopFuncSnapshot(); got == nil {
t.Fatal("runtime stop func snapshot should be initialized")
}
if got := client.GetRemoteAddr(); got == nil || got.String() != left.RemoteAddr().String() {
t.Fatalf("client remote addr mismatch: got %v want %v", got, left.RemoteAddr())
}
}
func TestLogicalConnSessionTransportLifecycleUsesLogicalRuntimeOwner(t *testing.T) {
client := &ClientConn{ClientID: "logical-runtime"}
logical := client.LogicalConn()
if logical == nil {
t.Fatal("LogicalConn should exist")
}
firstLeft, firstRight := net.Pipe()
defer firstRight.Close()
stopCtx, stopFn := logical.startSession(firstLeft, nil, nil)
defer stopFn()
if stopCtx == nil {
t.Fatal("logical startSession should initialize stop context")
}
if got := logical.transportSnapshot(); got != firstLeft {
t.Fatalf("logical transport snapshot mismatch: got %v want %v", got, firstLeft)
}
if !logical.transportAttachedSnapshot() {
t.Fatal("logical transport should be attached after startSession")
}
firstGeneration := logical.transportGenerationSnapshot()
if firstGeneration == 0 {
t.Fatal("logical transport generation should advance for stream runtime")
}
secondLeft, secondRight := net.Pipe()
defer secondRight.Close()
if err := logical.attachSessionTransport(secondLeft); err != nil {
t.Fatalf("logical attachSessionTransport failed: %v", err)
}
if got := logical.transportSnapshot(); got != secondLeft {
t.Fatalf("logical transport snapshot after attach mismatch: got %v want %v", got, secondLeft)
}
if !logical.transportAttachedSnapshot() {
t.Fatal("logical transport should stay attached after attachSessionTransport")
}
if got := logical.transportGenerationSnapshot(); got <= firstGeneration {
t.Fatalf("logical transport generation should advance after attach: got %d want > %d", got, firstGeneration)
}
detachedConn, err := logical.detachTransportForTransfer()
if err != nil {
t.Fatalf("logical detachTransportForTransfer failed: %v", err)
}
if detachedConn != secondLeft {
t.Fatalf("detached conn mismatch: got %v want %v", detachedConn, secondLeft)
}
if got := logical.transportSnapshot(); got != nil {
t.Fatalf("logical transport should be cleared after detach, got %v", got)
}
if logical.transportAttachedSnapshot() {
t.Fatal("logical transport should be detached after detachTransportForTransfer")
}
if got := logical.stopContextSnapshot(); got != stopCtx {
t.Fatalf("logical stop context should be preserved after detach, got %v want %v", got, stopCtx)
}
}
func TestLogicalConnOwnerStateMutationsSyncLegacyClientView(t *testing.T) {
client := &ClientConn{ClientID: "logical-owner-state"}
logical := client.LogicalConn()
if logical == nil {
t.Fatal("LogicalConn should exist")
}
logical.markIdentityBound()
logical.markStreamTransport()
attachGeneration := logical.markTransportAttached()
logical.setClientConnLastHeartbeatUnix(12345)
logical.markTransportDetached("read error", errors.New("boom"))
if !client.clientConnIdentityBoundSnapshot() {
t.Fatal("legacy client identity-bound snapshot should follow logical state")
}
if !client.clientConnUsesStreamTransportSnapshot() {
t.Fatal("legacy client stream-transport snapshot should follow logical state")
}
if got := client.clientConnTransportGenerationSnapshot(); got != attachGeneration {
t.Fatalf("legacy client transport generation = %d, want %d", got, attachGeneration)
}
if got := client.clientConnLastHeartbeatUnixSnapshot(); got != 12345 {
t.Fatalf("legacy client last heartbeat = %d, want %d", got, 12345)
}
detach := client.clientConnTransportDetachSnapshot()
if detach == nil {
t.Fatal("legacy client detach snapshot should follow logical state")
}
if detach.Reason != "read error" || detach.Err != "boom" || detach.Generation != attachGeneration {
t.Fatalf("legacy client detach snapshot mismatch: %+v", detach)
}
logical.clearTransportDetachState()
if got := client.clientConnTransportDetachSnapshot(); got != nil {
t.Fatalf("legacy client detach snapshot should clear with logical state, got %+v", got)
}
}
func TestLogicalDetachTransportForTransferKeepsHandoffConnAlive(t *testing.T) {
server := NewServer().(*ServerCommon)
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
left, right := net.Pipe()
defer right.Close()
client := &ClientConn{
ClientID: "client-handoff",
server: server,
}
client.startClientConnSessionTransport(left, stopCtx, stopFn)
logical := client.LogicalConn()
detachedConn, err := logical.detachTransportForTransfer()
if err != nil {
t.Fatalf("logical detachTransportForTransfer failed: %v", err)
}
defer detachedConn.Close()
payload := []byte("handoff-payload")
readDone := make(chan error, 1)
go func() {
buf := make([]byte, len(payload))
_ = right.SetReadDeadline(time.Now().Add(time.Second))
if _, err := io.ReadFull(right, buf); err != nil {
readDone <- err
return
}
if !bytes.Equal(buf, payload) {
readDone <- fmt.Errorf("payload mismatch: got %q want %q", string(buf), string(payload))
return
}
readDone <- nil
}()
_ = detachedConn.SetWriteDeadline(time.Now().Add(time.Second))
if _, err := detachedConn.Write(payload); err != nil {
t.Fatalf("detached handoff conn write failed: %v", err)
}
select {
case err := <-readDone:
if err != nil {
t.Fatalf("handoff conn read failed: %v", err)
}
case <-time.After(2 * time.Second):
t.Fatal("timed out waiting for handoff conn read")
}
}
func TestLogicalHandleTUTransportReadResultWithSessionDropsDataAfterTransportStop(t *testing.T) {
server := NewServer().(*ServerCommon)
left, right := net.Pipe()
defer right.Close()
stopCtx, stopFn := context.WithCancel(context.Background())
logical, _, _ := newRegisteredServerLogicalForTest(t, server, "logical-stop-read-drop", left, stopCtx, stopFn)
if logical == nil {
t.Fatal("logical should not be nil")
}
generation := logical.transportGenerationSnapshot()
stopFn()
if logical.handleTUTransportReadResultWithSession(stopCtx, left, generation, len([]byte("late-data")), []byte("late-data"), nil) {
t.Fatal("handleTUTransportReadResultWithSession should stop after transport stop")
}
}
func TestClientConnTransportBindingSnapshotUsesRuntimeBinding(t *testing.T) {
client := &ClientConn{}
left, right := net.Pipe()
defer left.Close()
defer right.Close()
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
client.setClientConnSessionRuntime(&clientConnSessionRuntime{
transport: newTransportBinding(left, nil),
tuConn: left,
stopCtx: stopCtx,
stopFn: stopFn,
})
binding := client.clientConnTransportBindingSnapshot()
if binding == nil {
t.Fatal("runtime transport binding should exist")
}
if got := binding.connSnapshot(); got != left {
t.Fatal("runtime transport binding conn should match runtime conn")
}
if got := binding.queueSnapshot(); got != nil {
t.Fatalf("server-side peer binding queue should remain nil, got %v", got)
}
}
func TestClientConnDetachServerOwnedSessionCancelsTransportOnly(t *testing.T) {
client := &ClientConn{}
left, right := net.Pipe()
defer left.Close()
defer right.Close()
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
client.startClientConnSession(left, stopCtx, stopFn)
transportStopCtx := client.clientConnTransportStopContextSnapshot()
client.detachServerOwnedSession()
if transportStopCtx == nil {
t.Fatal("transport stop context should exist")
}
select {
case <-transportStopCtx.Done():
case <-time.After(time.Second):
t.Fatal("transport stop context should be canceled after detach")
}
select {
case <-client.clientConnStopContextSnapshot().Done():
t.Fatal("logical stop context should remain active after pure detach")
default:
}
if client.clientConnTransportAttachedSnapshot() {
t.Fatal("client conn transport should be marked detached after pure detach")
}
}
func TestAttachClientConnSessionTransportRebindsRuntimeAndStartsReadLoop(t *testing.T) {
server := NewServer().(*ServerCommon)
stopCtx, stopFn := context.WithCancel(context.Background())
defer stopFn()
queue := stario.NewQueueCtx(stopCtx, 4, 1024)
server.setServerSessionRuntime(&serverSessionRuntime{
stopCtx: stopCtx,
stopFn: stopFn,
queue: queue,
})
oldLeft, oldRight := net.Pipe()
defer oldRight.Close()
client := &ClientConn{
ClientID: "client-reattach",
server: server,
}
client.startClientConnSession(oldLeft, stopCtx, stopFn)
newLeft, newRight := net.Pipe()
defer newRight.Close()
if err := client.attachClientConnSessionTransport(newLeft); err != nil {
t.Fatalf("attachClientConnSessionTransport failed: %v", err)
}
rt := client.clientConnSessionRuntimeSnapshot()
if rt == nil {
t.Fatal("client conn runtime should exist after attach")
}
if rt.tuConn != newLeft || !rt.transportAttached {
t.Fatalf("attached client conn runtime mismatch: %+v", rt)
}
wire := queue.BuildMessage([]byte("reattached"))
if _, err := newRight.Write(wire); err != nil {
t.Fatalf("new transport write failed: %v", err)
}
select {
case msg := <-queue.RestoreChan():
source := assertServerInboundQueueSource(t, msg.Conn, client)
if got, want := source.TransportGeneration, client.clientConnTransportGenerationSnapshot(); got != want {
t.Fatalf("queue transport generation mismatch: got %d want %d", got, want)
}
if got, want := string(msg.Msg), "reattached"; got != want {
t.Fatalf("queue payload mismatch: got %q want %q", got, want)
}
case <-time.After(time.Second):
t.Fatal("reattached server-owned transport did not push framed message")
}
}