640 lines
14 KiB
Go
640 lines
14 KiB
Go
|
|
package notify
|
||
|
|
|
||
|
|
import (
|
||
|
|
"context"
|
||
|
|
"net"
|
||
|
|
"sync"
|
||
|
|
"sync/atomic"
|
||
|
|
"time"
|
||
|
|
)
|
||
|
|
|
||
|
|
const bulkDedicatedLaneMicroBatchWait = 50 * time.Microsecond
|
||
|
|
|
||
|
|
type bulkDedicatedLaneBatchRequest struct {
|
||
|
|
Ctx context.Context
|
||
|
|
DataID uint64
|
||
|
|
Items []bulkDedicatedSendRequest
|
||
|
|
Deadline time.Time
|
||
|
|
wait bool
|
||
|
|
resultCh chan error
|
||
|
|
state bulkDedicatedRequestState
|
||
|
|
aborted atomic.Bool
|
||
|
|
}
|
||
|
|
|
||
|
|
var bulkDedicatedLaneBatchRequestPool sync.Pool
|
||
|
|
|
||
|
|
type bulkDedicatedLaneSender struct {
|
||
|
|
conn net.Conn
|
||
|
|
encode func([]bulkDedicatedOutboundBatch) ([]byte, func(), error)
|
||
|
|
fail func(error)
|
||
|
|
|
||
|
|
reqCh chan *bulkDedicatedLaneBatchRequest
|
||
|
|
stopCh chan struct{}
|
||
|
|
doneCh chan struct{}
|
||
|
|
stopOnce sync.Once
|
||
|
|
flushMu sync.Mutex
|
||
|
|
queued atomic.Int64
|
||
|
|
|
||
|
|
errMu sync.Mutex
|
||
|
|
err error
|
||
|
|
}
|
||
|
|
|
||
|
|
func newBulkDedicatedLaneSender(conn net.Conn, encode func([]bulkDedicatedOutboundBatch) ([]byte, func(), error), fail func(error)) *bulkDedicatedLaneSender {
|
||
|
|
sender := &bulkDedicatedLaneSender{
|
||
|
|
conn: conn,
|
||
|
|
encode: encode,
|
||
|
|
fail: fail,
|
||
|
|
reqCh: make(chan *bulkDedicatedLaneBatchRequest, bulkDedicatedSendQueueSize),
|
||
|
|
stopCh: make(chan struct{}),
|
||
|
|
doneCh: make(chan struct{}),
|
||
|
|
}
|
||
|
|
go sender.run()
|
||
|
|
return sender
|
||
|
|
}
|
||
|
|
|
||
|
|
func getBulkDedicatedLaneBatchRequest() *bulkDedicatedLaneBatchRequest {
|
||
|
|
if pooled, ok := bulkDedicatedLaneBatchRequestPool.Get().(*bulkDedicatedLaneBatchRequest); ok && pooled != nil {
|
||
|
|
pooled.reset()
|
||
|
|
return pooled
|
||
|
|
}
|
||
|
|
req := &bulkDedicatedLaneBatchRequest{
|
||
|
|
resultCh: make(chan error, 1),
|
||
|
|
}
|
||
|
|
req.reset()
|
||
|
|
return req
|
||
|
|
}
|
||
|
|
|
||
|
|
func (r *bulkDedicatedLaneBatchRequest) reset() {
|
||
|
|
if r == nil {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
r.Ctx = nil
|
||
|
|
r.DataID = 0
|
||
|
|
r.Deadline = time.Time{}
|
||
|
|
r.wait = false
|
||
|
|
r.Items = r.Items[:0]
|
||
|
|
r.state.value.Store(bulkDedicatedRequestQueued)
|
||
|
|
r.aborted.Store(false)
|
||
|
|
if r.resultCh != nil {
|
||
|
|
select {
|
||
|
|
case <-r.resultCh:
|
||
|
|
default:
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func (r *bulkDedicatedLaneBatchRequest) prepare(ctx context.Context, dataID uint64, items []bulkDedicatedSendRequest, wait bool) {
|
||
|
|
if r == nil {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
r.reset()
|
||
|
|
r.Ctx = ctx
|
||
|
|
r.DataID = dataID
|
||
|
|
r.wait = wait
|
||
|
|
if deadline, ok := ctx.Deadline(); ok {
|
||
|
|
r.Deadline = deadline
|
||
|
|
}
|
||
|
|
if cap(r.Items) < len(items) {
|
||
|
|
r.Items = make([]bulkDedicatedSendRequest, len(items))
|
||
|
|
} else {
|
||
|
|
r.Items = r.Items[:len(items)]
|
||
|
|
}
|
||
|
|
copy(r.Items, items)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (r *bulkDedicatedLaneBatchRequest) recycle() {
|
||
|
|
if r == nil {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
r.reset()
|
||
|
|
bulkDedicatedLaneBatchRequestPool.Put(r)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) submitData(ctx context.Context, dataID uint64, seq uint64, payload []byte) error {
|
||
|
|
if s == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
items := []bulkDedicatedSendRequest{{
|
||
|
|
Type: bulkFastPayloadTypeData,
|
||
|
|
Seq: seq,
|
||
|
|
Payload: append([]byte(nil), payload...),
|
||
|
|
}}
|
||
|
|
return s.submitBatch(ctx, dataID, items, false)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) submitWrite(ctx context.Context, dataID uint64, startSeq uint64, payload []byte, chunkSize int) (int, error) {
|
||
|
|
if s == nil {
|
||
|
|
return 0, errTransportDetached
|
||
|
|
}
|
||
|
|
if len(payload) == 0 {
|
||
|
|
return 0, nil
|
||
|
|
}
|
||
|
|
if chunkSize <= 0 {
|
||
|
|
chunkSize = defaultBulkChunkSize
|
||
|
|
}
|
||
|
|
written := 0
|
||
|
|
seq := startSeq
|
||
|
|
for written < len(payload) {
|
||
|
|
var itemBuf [bulkDedicatedBatchMaxItems]bulkDedicatedSendRequest
|
||
|
|
items := itemBuf[:0]
|
||
|
|
batchBytes := bulkDedicatedBatchHeaderLen
|
||
|
|
start := written
|
||
|
|
for written < len(payload) && len(items) < bulkDedicatedBatchMaxItems {
|
||
|
|
end := written + chunkSize
|
||
|
|
if end > len(payload) {
|
||
|
|
end = len(payload)
|
||
|
|
}
|
||
|
|
itemLen := bulkDedicatedSendRequestLenFromPayloadLen(end - written)
|
||
|
|
if len(items) > 0 && batchBytes+itemLen > bulkDedicatedBatchMaxPlainBytes {
|
||
|
|
break
|
||
|
|
}
|
||
|
|
items = append(items, bulkDedicatedSendRequest{
|
||
|
|
Type: bulkFastPayloadTypeData,
|
||
|
|
Seq: seq,
|
||
|
|
Payload: payload[written:end],
|
||
|
|
})
|
||
|
|
batchBytes += itemLen
|
||
|
|
seq++
|
||
|
|
written = end
|
||
|
|
}
|
||
|
|
if len(items) == 0 {
|
||
|
|
end := written + chunkSize
|
||
|
|
if end > len(payload) {
|
||
|
|
end = len(payload)
|
||
|
|
}
|
||
|
|
items = append(items, bulkDedicatedSendRequest{
|
||
|
|
Type: bulkFastPayloadTypeData,
|
||
|
|
Seq: seq,
|
||
|
|
Payload: payload[written:end],
|
||
|
|
})
|
||
|
|
seq++
|
||
|
|
written = end
|
||
|
|
}
|
||
|
|
if err := s.submitWriteBatch(ctx, dataID, items); err != nil {
|
||
|
|
return start, err
|
||
|
|
}
|
||
|
|
start = written
|
||
|
|
}
|
||
|
|
return written, nil
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) submitWriteBatch(ctx context.Context, dataID uint64, items []bulkDedicatedSendRequest) error {
|
||
|
|
if s == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
if len(items) == 0 {
|
||
|
|
return nil
|
||
|
|
}
|
||
|
|
if submitted, err := s.tryDirectSubmitBatch(ctx, dataID, items); submitted {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
queuedItems := copyBulkDedicatedSendRequests(items)
|
||
|
|
return s.submitBatch(ctx, dataID, queuedItems, true)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) submitControl(ctx context.Context, dataID uint64, frameType uint8, flags uint8, seq uint64, payload []byte) error {
|
||
|
|
if s == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
items := []bulkDedicatedSendRequest{{
|
||
|
|
Type: frameType,
|
||
|
|
Flags: flags,
|
||
|
|
Seq: seq,
|
||
|
|
}}
|
||
|
|
if len(payload) > 0 {
|
||
|
|
items[0].Payload = append([]byte(nil), payload...)
|
||
|
|
}
|
||
|
|
return s.submitBatch(ctx, dataID, items, true)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) submitBatch(ctx context.Context, dataID uint64, items []bulkDedicatedSendRequest, wait bool) error {
|
||
|
|
if s == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
if ctx == nil {
|
||
|
|
ctx = context.Background()
|
||
|
|
}
|
||
|
|
if err := s.errSnapshot(); err != nil {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
req := getBulkDedicatedLaneBatchRequest()
|
||
|
|
req.prepare(ctx, dataID, items, wait)
|
||
|
|
s.queued.Add(1)
|
||
|
|
select {
|
||
|
|
case <-ctx.Done():
|
||
|
|
s.queued.Add(-1)
|
||
|
|
req.recycle()
|
||
|
|
return normalizeStreamDeadlineError(ctx.Err())
|
||
|
|
case <-s.stopCh:
|
||
|
|
s.queued.Add(-1)
|
||
|
|
req.recycle()
|
||
|
|
return s.stoppedErr()
|
||
|
|
case s.reqCh <- req:
|
||
|
|
if !wait {
|
||
|
|
return nil
|
||
|
|
}
|
||
|
|
return s.waitAck(req)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) tryDirectSubmitBatch(ctx context.Context, dataID uint64, items []bulkDedicatedSendRequest) (bool, error) {
|
||
|
|
if s == nil {
|
||
|
|
return true, errTransportDetached
|
||
|
|
}
|
||
|
|
if ctx == nil {
|
||
|
|
ctx = context.Background()
|
||
|
|
}
|
||
|
|
if len(items) == 0 {
|
||
|
|
return true, nil
|
||
|
|
}
|
||
|
|
if err := s.errSnapshot(); err != nil {
|
||
|
|
return true, err
|
||
|
|
}
|
||
|
|
select {
|
||
|
|
case <-ctx.Done():
|
||
|
|
return true, normalizeStreamDeadlineError(ctx.Err())
|
||
|
|
case <-s.stopCh:
|
||
|
|
return true, s.stoppedErr()
|
||
|
|
default:
|
||
|
|
}
|
||
|
|
if s.queued.Load() != 0 {
|
||
|
|
return false, nil
|
||
|
|
}
|
||
|
|
if !s.flushMu.TryLock() {
|
||
|
|
return false, nil
|
||
|
|
}
|
||
|
|
defer s.flushMu.Unlock()
|
||
|
|
if s.queued.Load() != 0 {
|
||
|
|
return false, nil
|
||
|
|
}
|
||
|
|
if err := s.errSnapshot(); err != nil {
|
||
|
|
return true, err
|
||
|
|
}
|
||
|
|
select {
|
||
|
|
case <-ctx.Done():
|
||
|
|
return true, normalizeStreamDeadlineError(ctx.Err())
|
||
|
|
case <-s.stopCh:
|
||
|
|
return true, s.stoppedErr()
|
||
|
|
default:
|
||
|
|
}
|
||
|
|
deadline, _ := ctx.Deadline()
|
||
|
|
if err := s.flush([]bulkDedicatedOutboundBatch{{
|
||
|
|
DataID: dataID,
|
||
|
|
Items: items,
|
||
|
|
}}, deadline); err != nil {
|
||
|
|
err = normalizeDedicatedBulkSendError(err)
|
||
|
|
s.setErr(err)
|
||
|
|
s.failPending(err)
|
||
|
|
if s.fail != nil {
|
||
|
|
go s.fail(err)
|
||
|
|
}
|
||
|
|
return true, err
|
||
|
|
}
|
||
|
|
return true, nil
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) waitAck(req *bulkDedicatedLaneBatchRequest) error {
|
||
|
|
if s == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
if req == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
ctx := req.Ctx
|
||
|
|
if ctx == nil {
|
||
|
|
ctx = context.Background()
|
||
|
|
}
|
||
|
|
select {
|
||
|
|
case err := <-req.resultCh:
|
||
|
|
req.recycle()
|
||
|
|
return normalizeDedicatedBulkSendError(err)
|
||
|
|
case <-ctx.Done():
|
||
|
|
if req.tryCancel() {
|
||
|
|
req.aborted.Store(true)
|
||
|
|
return normalizeStreamDeadlineError(ctx.Err())
|
||
|
|
}
|
||
|
|
err := <-req.resultCh
|
||
|
|
req.recycle()
|
||
|
|
return normalizeDedicatedBulkSendError(err)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) stop() {
|
||
|
|
if s == nil {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
s.stopOnce.Do(func() {
|
||
|
|
s.setErr(errTransportDetached)
|
||
|
|
close(s.stopCh)
|
||
|
|
})
|
||
|
|
<-s.doneCh
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) run() {
|
||
|
|
defer close(s.doneCh)
|
||
|
|
var carry *bulkDedicatedLaneBatchRequest
|
||
|
|
for {
|
||
|
|
req, ok := s.nextRequest(carry)
|
||
|
|
carry = nil
|
||
|
|
if !ok {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
if !req.tryStart() {
|
||
|
|
s.finishRequest(req, req.canceledErr())
|
||
|
|
continue
|
||
|
|
}
|
||
|
|
if err := req.contextErr(); err != nil {
|
||
|
|
s.finishRequest(req, err)
|
||
|
|
continue
|
||
|
|
}
|
||
|
|
batchReqs := []*bulkDedicatedLaneBatchRequest{req}
|
||
|
|
batches := []bulkDedicatedOutboundBatch{{
|
||
|
|
DataID: req.DataID,
|
||
|
|
Items: req.Items,
|
||
|
|
}}
|
||
|
|
batchBytes := bulkDedicatedBatchesPlainLen(batches)
|
||
|
|
deadline := req.Deadline
|
||
|
|
s.flushMu.Lock()
|
||
|
|
err := s.errSnapshot()
|
||
|
|
if err == nil {
|
||
|
|
carry, err = s.collectBatchRequests(&batchReqs, &batches, &batchBytes, &deadline)
|
||
|
|
if err == nil {
|
||
|
|
err = s.flush(batches, deadline)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
s.flushMu.Unlock()
|
||
|
|
if err != nil {
|
||
|
|
err = normalizeDedicatedBulkSendError(err)
|
||
|
|
s.setErr(err)
|
||
|
|
s.finishBatchRequests(batchReqs, err)
|
||
|
|
if carry != nil {
|
||
|
|
s.finishRequest(carry, err)
|
||
|
|
carry = nil
|
||
|
|
}
|
||
|
|
s.failPending(err)
|
||
|
|
if s.fail != nil {
|
||
|
|
go s.fail(err)
|
||
|
|
}
|
||
|
|
return
|
||
|
|
}
|
||
|
|
s.finishBatchRequests(batchReqs, nil)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func appendBulkDedicatedLaneBatch(batches *[]bulkDedicatedOutboundBatch, req *bulkDedicatedLaneBatchRequest) {
|
||
|
|
if req == nil {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
if len(*batches) > 0 && (*batches)[len(*batches)-1].DataID == req.DataID {
|
||
|
|
last := &(*batches)[len(*batches)-1]
|
||
|
|
last.Items = append(last.Items, req.Items...)
|
||
|
|
return
|
||
|
|
}
|
||
|
|
*batches = append(*batches, bulkDedicatedOutboundBatch{
|
||
|
|
DataID: req.DataID,
|
||
|
|
Items: req.Items,
|
||
|
|
})
|
||
|
|
}
|
||
|
|
|
||
|
|
func bulkDedicatedLaneBatchItemCount(batches []bulkDedicatedOutboundBatch) int {
|
||
|
|
total := 0
|
||
|
|
for _, batch := range batches {
|
||
|
|
total += len(batch.Items)
|
||
|
|
}
|
||
|
|
return total
|
||
|
|
}
|
||
|
|
|
||
|
|
func bulkDedicatedLaneNextBatchBytes(batches []bulkDedicatedOutboundBatch, req *bulkDedicatedLaneBatchRequest, currentBytes int) int {
|
||
|
|
reqBytes := bulkDedicatedSendRequestsLen(req.Items)
|
||
|
|
if len(batches) == 0 {
|
||
|
|
return bulkDedicatedBatchHeaderLen + reqBytes
|
||
|
|
}
|
||
|
|
last := batches[len(batches)-1]
|
||
|
|
if last.DataID == req.DataID {
|
||
|
|
return currentBytes + reqBytes
|
||
|
|
}
|
||
|
|
if len(batches) == 1 {
|
||
|
|
return bulkDedicatedSuperBatchHeaderLen +
|
||
|
|
bulkDedicatedSuperBatchGroupHeaderLen + bulkDedicatedSendRequestsLen(batches[0].Items) +
|
||
|
|
bulkDedicatedSuperBatchGroupHeaderLen + reqBytes
|
||
|
|
}
|
||
|
|
return currentBytes + bulkDedicatedSuperBatchGroupHeaderLen + reqBytes
|
||
|
|
}
|
||
|
|
|
||
|
|
func (r *bulkDedicatedLaneBatchRequest) contextErr() error {
|
||
|
|
if r.Ctx == nil {
|
||
|
|
return nil
|
||
|
|
}
|
||
|
|
select {
|
||
|
|
case <-r.Ctx.Done():
|
||
|
|
return normalizeStreamDeadlineError(r.Ctx.Err())
|
||
|
|
default:
|
||
|
|
return nil
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func (r *bulkDedicatedLaneBatchRequest) tryStart() bool {
|
||
|
|
if r == nil {
|
||
|
|
return false
|
||
|
|
}
|
||
|
|
return r.state.value.CompareAndSwap(bulkDedicatedRequestQueued, bulkDedicatedRequestStarted)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (r *bulkDedicatedLaneBatchRequest) tryCancel() bool {
|
||
|
|
if r == nil {
|
||
|
|
return false
|
||
|
|
}
|
||
|
|
return r.state.value.CompareAndSwap(bulkDedicatedRequestQueued, bulkDedicatedRequestCanceled)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (r *bulkDedicatedLaneBatchRequest) canceledErr() error {
|
||
|
|
if r == nil {
|
||
|
|
return context.Canceled
|
||
|
|
}
|
||
|
|
if err := r.contextErr(); err != nil {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
return context.Canceled
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) nextRequest(carry *bulkDedicatedLaneBatchRequest) (*bulkDedicatedLaneBatchRequest, bool) {
|
||
|
|
if carry != nil {
|
||
|
|
select {
|
||
|
|
case <-s.stopCh:
|
||
|
|
err := s.stoppedErr()
|
||
|
|
s.finishRequest(carry, err)
|
||
|
|
s.failPending(err)
|
||
|
|
return nil, false
|
||
|
|
default:
|
||
|
|
return carry, true
|
||
|
|
}
|
||
|
|
}
|
||
|
|
select {
|
||
|
|
case <-s.stopCh:
|
||
|
|
s.failPending(s.stoppedErr())
|
||
|
|
return nil, false
|
||
|
|
case req := <-s.reqCh:
|
||
|
|
return req, true
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) collectBatchRequests(batchReqs *[]*bulkDedicatedLaneBatchRequest, batches *[]bulkDedicatedOutboundBatch, batchBytes *int, deadline *time.Time) (*bulkDedicatedLaneBatchRequest, error) {
|
||
|
|
if s == nil || bulkDedicatedLaneBatchItemCount(*batches) >= bulkDedicatedBatchMaxItems || *batchBytes >= bulkDedicatedBatchMaxPlainBytes {
|
||
|
|
return nil, nil
|
||
|
|
}
|
||
|
|
wait := bulkDedicatedLaneMicroBatchWait
|
||
|
|
var (
|
||
|
|
timer *time.Timer
|
||
|
|
timerCh <-chan time.Time
|
||
|
|
waited bool
|
||
|
|
)
|
||
|
|
if wait > 0 {
|
||
|
|
timer = time.NewTimer(wait)
|
||
|
|
timerCh = timer.C
|
||
|
|
defer timer.Stop()
|
||
|
|
}
|
||
|
|
for {
|
||
|
|
if bulkDedicatedLaneBatchItemCount(*batches) >= bulkDedicatedBatchMaxItems || *batchBytes >= bulkDedicatedBatchMaxPlainBytes {
|
||
|
|
return nil, nil
|
||
|
|
}
|
||
|
|
var (
|
||
|
|
req *bulkDedicatedLaneBatchRequest
|
||
|
|
ok bool
|
||
|
|
)
|
||
|
|
select {
|
||
|
|
case <-s.stopCh:
|
||
|
|
return nil, s.stoppedErr()
|
||
|
|
case req = <-s.reqCh:
|
||
|
|
ok = true
|
||
|
|
default:
|
||
|
|
}
|
||
|
|
if !ok {
|
||
|
|
if waited || timerCh == nil {
|
||
|
|
return nil, nil
|
||
|
|
}
|
||
|
|
waited = true
|
||
|
|
select {
|
||
|
|
case <-s.stopCh:
|
||
|
|
return nil, s.stoppedErr()
|
||
|
|
case req = <-s.reqCh:
|
||
|
|
case <-timerCh:
|
||
|
|
return nil, nil
|
||
|
|
}
|
||
|
|
}
|
||
|
|
if err := req.contextErr(); err != nil {
|
||
|
|
if req.tryCancel() {
|
||
|
|
s.finishRequest(req, err)
|
||
|
|
continue
|
||
|
|
}
|
||
|
|
s.finishRequest(req, req.canceledErr())
|
||
|
|
continue
|
||
|
|
}
|
||
|
|
nextItems := bulkDedicatedLaneBatchItemCount(*batches) + len(req.Items)
|
||
|
|
if nextItems > bulkDedicatedBatchMaxItems {
|
||
|
|
return req, nil
|
||
|
|
}
|
||
|
|
nextBytes := bulkDedicatedLaneNextBatchBytes(*batches, req, *batchBytes)
|
||
|
|
if nextBytes > bulkDedicatedBatchMaxPlainBytes {
|
||
|
|
return req, nil
|
||
|
|
}
|
||
|
|
if !req.tryStart() {
|
||
|
|
s.finishRequest(req, req.canceledErr())
|
||
|
|
continue
|
||
|
|
}
|
||
|
|
if err := req.contextErr(); err != nil {
|
||
|
|
s.finishRequest(req, err)
|
||
|
|
continue
|
||
|
|
}
|
||
|
|
*batchReqs = append(*batchReqs, req)
|
||
|
|
appendBulkDedicatedLaneBatch(batches, req)
|
||
|
|
*batchBytes = nextBytes
|
||
|
|
*deadline = earlierDeadline(*deadline, req.Deadline)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func earlierDeadline(current time.Time, next time.Time) time.Time {
|
||
|
|
if current.IsZero() {
|
||
|
|
return next
|
||
|
|
}
|
||
|
|
if next.IsZero() || current.Before(next) {
|
||
|
|
return current
|
||
|
|
}
|
||
|
|
return next
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) flush(batches []bulkDedicatedOutboundBatch, deadline time.Time) error {
|
||
|
|
if s == nil || s.conn == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
payload, release, err := s.encode(batches)
|
||
|
|
if err != nil {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
if release != nil {
|
||
|
|
defer release()
|
||
|
|
}
|
||
|
|
return writeBulkDedicatedRecordWithDeadline(s.conn, payload, deadline)
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) finishRequest(req *bulkDedicatedLaneBatchRequest, err error) {
|
||
|
|
if s != nil {
|
||
|
|
s.queued.Add(-1)
|
||
|
|
}
|
||
|
|
if req == nil {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
if req.wait && req.resultCh != nil {
|
||
|
|
if req.aborted.Load() {
|
||
|
|
req.recycle()
|
||
|
|
return
|
||
|
|
}
|
||
|
|
req.resultCh <- err
|
||
|
|
return
|
||
|
|
}
|
||
|
|
req.recycle()
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) finishBatchRequests(reqs []*bulkDedicatedLaneBatchRequest, err error) {
|
||
|
|
for _, req := range reqs {
|
||
|
|
s.finishRequest(req, err)
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) failPending(err error) {
|
||
|
|
for {
|
||
|
|
select {
|
||
|
|
case item := <-s.reqCh:
|
||
|
|
s.finishRequest(item, err)
|
||
|
|
default:
|
||
|
|
return
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) setErr(err error) {
|
||
|
|
if s == nil || err == nil {
|
||
|
|
return
|
||
|
|
}
|
||
|
|
s.errMu.Lock()
|
||
|
|
if s.err == nil {
|
||
|
|
s.err = err
|
||
|
|
}
|
||
|
|
s.errMu.Unlock()
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) errSnapshot() error {
|
||
|
|
if s == nil {
|
||
|
|
return errTransportDetached
|
||
|
|
}
|
||
|
|
s.errMu.Lock()
|
||
|
|
defer s.errMu.Unlock()
|
||
|
|
return s.err
|
||
|
|
}
|
||
|
|
|
||
|
|
func (s *bulkDedicatedLaneSender) stoppedErr() error {
|
||
|
|
if err := s.errSnapshot(); err != nil {
|
||
|
|
return err
|
||
|
|
}
|
||
|
|
return errTransportDetached
|
||
|
|
}
|