notify/client_conn_transport.go
starainrt 4f760f2807
fix: 修复 dedicated bulk attach 竞态并优化 short write 补写路径
- 客户端 dedicated attach 回复改为精确读取单帧,避免 attach reply 与后续 NBR1 数据粘连后被误解析
  - 服务端 accepted attach 改为先 detach transport,再直接回 attach reply,随后立即切入 dedicated bulk read loop
  - transport 读循环在 stop 或 transport ownership 失效后不再继续上推已读数据,避免 handoff 后首包被旧 reader 吃掉
  - dedicated bulk record 写路径改为 full-write,消除 short write 导致的 invalid bulk fast payload
  - 优化 vectored write 补写策略:先尝试一次 writev,未写完时直接顺序补完剩余 buffers,减少重复 WriteTo 开销
  - 放宽 vectored write 能力识别,支持通过 UnwrapConn/WriteBuffers 命中 fast path
  - 修复 dedicated batch 排队路径 payload 复用问题,改为深拷贝 queued items
  - 补齐 dedicated attach、short write、payload clone、transport stop/handoff 等回归测试
2026-04-16 17:27:48 +08:00

275 lines
7.4 KiB
Go

package notify
import (
"context"
"net"
"os"
"time"
)
type serverLogicalTransportDetacher interface {
detachLogicalSessionTransport(logical *LogicalConn, reason string, err error)
}
type serverInboundSourcePusher interface {
pushMessageSource([]byte, interface{})
}
func (c *LogicalConn) readTUMessage() {
rt := c.clientConnSessionRuntimeSnapshot()
if rt == nil {
return
}
c.readTUMessageLoop(rt)
}
func (c *LogicalConn) readTUMessageLoop(rt *clientConnSessionRuntime) {
if rt == nil {
return
}
stopCtx := rt.transportStopCtx
if stopCtx == nil {
stopCtx = rt.stopCtx
}
if stopCtx == nil {
return
}
conn := rt.tuConn
generation := rt.transportGeneration
defer closeClientConnSessionRuntimeTransportDone(rt)
buf := streamReadBuffer()
for {
select {
case <-sessionStopChan(stopCtx):
if c.shouldCloseTransportOnStop(conn) {
_ = conn.Close()
}
return
default:
}
num, data, err := c.readFromTUTransportConnWithBuffer(conn, buf)
if !c.handleTUTransportReadResultWithSession(stopCtx, conn, generation, num, data, err) {
return
}
}
}
func (c *LogicalConn) readFromTUTransportConnWithBuffer(conn net.Conn, data []byte) (int, []byte, error) {
if len(data) == 0 {
data = streamReadBuffer()
}
if conn == nil {
return 0, nil, net.ErrClosed
}
if timeout := c.clientConnMaxReadTimeoutSnapshot(); timeout > 0 {
_ = conn.SetReadDeadline(time.Now().Add(timeout))
}
num, err := conn.Read(data)
return num, data, err
}
func (c *LogicalConn) handleTUTransportReadResultWithSession(stopCtx context.Context, conn net.Conn, generation uint64, num int, data []byte, err error) bool {
if transportReadShouldStop(stopCtx) || !c.ownsTransportRead(conn, generation) {
if c.shouldCloseTransportOnStop(conn) {
_ = conn.Close()
}
return false
}
if err == os.ErrDeadlineExceeded {
if num != 0 {
c.pushServerOwnedTransportMessage(data[:num], conn, generation)
}
return true
}
if err != nil {
select {
case <-sessionStopChan(stopCtx):
if c.shouldCloseTransportOnStop(conn) {
_ = conn.Close()
}
return false
default:
}
if detacher, ok := c.Server().(serverLogicalTransportDetacher); ok && c.shouldPreserveLogicalPeerOnTransportLoss() {
detacher.detachLogicalSessionTransport(c, "read error", err)
return false
}
c.stopServerOwnedSession("read error", err)
return false
}
c.pushServerOwnedTransportMessage(data[:num], conn, generation)
return true
}
func transportReadShouldStop(stopCtx context.Context) bool {
select {
case <-sessionStopChan(stopCtx):
return true
default:
return false
}
}
func (c *LogicalConn) ownsTransportRead(conn net.Conn, generation uint64) bool {
if c == nil {
return false
}
rt := c.clientConnSessionRuntimeSnapshot()
if rt == nil || !rt.transportAttached || rt.transportGeneration != generation {
return false
}
current := rt.tuConn
if rt.transport != nil && rt.transport.connSnapshot() != nil {
current = rt.transport.connSnapshot()
}
return current == conn
}
func (c *LogicalConn) pushServerOwnedTransportMessage(data []byte, conn net.Conn, generation uint64) {
if c == nil || len(data) == 0 {
return
}
server := c.Server()
if server == nil {
return
}
if pusher, ok := server.(serverInboundSourcePusher); ok {
pusher.pushMessageSource(data, newServerInboundSource(c, conn, nil, generation))
return
}
server.pushMessage(data, c.clientConnIDSnapshot())
}
func (c *LogicalConn) shouldCloseTransportOnStop(conn net.Conn) bool {
if c == nil || conn == nil {
return false
}
rt := c.clientConnSessionRuntimeSnapshot()
if rt == nil || !rt.transportAttached {
return false
}
current := rt.tuConn
if rt.transport != nil && rt.transport.connSnapshot() != nil {
current = rt.transport.connSnapshot()
}
return current == conn
}
func (c *ClientConn) readFromTUTransport() (int, []byte, error) {
binding := c.clientConnTransportBindingSnapshot()
if binding == nil {
return 0, nil, net.ErrClosed
}
conn := binding.connSnapshot()
return c.readFromTUTransportConn(conn)
}
func (c *ClientConn) readFromTUTransportConn(conn net.Conn) (int, []byte, error) {
return c.readFromTUTransportConnWithBuffer(conn, streamReadBuffer())
}
func (c *ClientConn) readFromTUTransportConnWithBuffer(conn net.Conn, data []byte) (int, []byte, error) {
if logical := c.LogicalConn(); logical != nil {
return logical.readFromTUTransportConnWithBuffer(conn, data)
}
if len(data) == 0 {
data = streamReadBuffer()
}
if conn == nil {
return 0, nil, net.ErrClosed
}
if timeout := c.clientConnMaxReadTimeoutSnapshot(); timeout > 0 {
_ = conn.SetReadDeadline(time.Now().Add(timeout))
}
num, err := conn.Read(data)
return num, data, err
}
func (c *ClientConn) handleTUTransportReadResult(num int, data []byte, err error) bool {
return c.handleTUTransportReadResultWithSession(c.clientConnTransportStopContextSnapshot(), c.clientConnTransportSnapshot(), c.clientConnTransportGenerationSnapshot(), num, data, err)
}
func (c *ClientConn) handleTUTransportReadResultWithSession(stopCtx context.Context, conn net.Conn, generation uint64, num int, data []byte, err error) bool {
if logical := c.LogicalConn(); logical != nil {
return logical.handleTUTransportReadResultWithSession(stopCtx, conn, generation, num, data, err)
}
if transportReadShouldStop(stopCtx) || !c.ownsTransportRead(conn, generation) {
if c.shouldCloseClientConnTransportOnStop(conn) {
_ = conn.Close()
}
return false
}
if err == os.ErrDeadlineExceeded {
if num != 0 {
c.pushServerOwnedTransportMessage(data[:num], conn, generation)
}
return true
}
if err != nil {
select {
case <-sessionStopChan(stopCtx):
if c.shouldCloseClientConnTransportOnStop(conn) {
_ = conn.Close()
}
return false
default:
}
if detacher, ok := c.server.(serverLogicalTransportDetacher); ok && c.shouldPreserveLogicalPeerOnTransportLoss() {
detacher.detachLogicalSessionTransport(logicalConnFromClient(c), "read error", err)
return false
}
c.stopServerOwnedSession("read error", err)
return false
}
c.pushServerOwnedTransportMessage(data[:num], conn, generation)
return true
}
func (c *ClientConn) ownsTransportRead(conn net.Conn, generation uint64) bool {
if c == nil {
return false
}
rt := c.clientConnSessionRuntimeSnapshot()
if rt == nil || !rt.transportAttached || rt.transportGeneration != generation {
return false
}
current := rt.tuConn
if rt.transport != nil && rt.transport.connSnapshot() != nil {
current = rt.transport.connSnapshot()
}
return current == conn
}
func (c *ClientConn) pushServerOwnedTransportMessage(data []byte, conn net.Conn, generation uint64) {
if logical := c.LogicalConn(); logical != nil {
logical.pushServerOwnedTransportMessage(data, conn, generation)
return
}
if c == nil || c.server == nil || len(data) == 0 {
return
}
if pusher, ok := c.server.(serverInboundSourcePusher); ok {
pusher.pushMessageSource(data, newServerInboundSource(logicalConnFromClient(c), conn, nil, generation))
return
}
c.server.pushMessage(data, c.clientConnIDSnapshot())
}
func (c *ClientConn) shouldCloseClientConnTransportOnStop(conn net.Conn) bool {
if logical := c.LogicalConn(); logical != nil {
return logical.shouldCloseTransportOnStop(conn)
}
if c == nil || conn == nil {
return false
}
rt := c.clientConnSessionRuntimeSnapshot()
if rt == nil || !rt.transportAttached {
return false
}
current := rt.tuConn
if rt.transport != nil && rt.transport.connSnapshot() != nil {
current = rt.transport.connSnapshot()
}
return current == conn
}