Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
140 changes: 140 additions & 0 deletions internal/client/conn_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,19 @@
package client

import (
"context"
"strconv"
"sync"
"sync/atomic"
"time"

"github.com/prometheus/client_golang/prometheus"
"github.com/tikv/client-go/v2/internal/logutil"
"github.com/tikv/client-go/v2/metrics"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/stats"
)

type monitoredConn struct {
Expand Down Expand Up @@ -99,3 +104,138 @@ func (c *connMonitor) start() {
}
}
}

const batchCommandsMethod = "/tikvpb.Tikv/BatchCommands"

type batchClientStreamKey struct{}

var globalBatchClientStreamID uint64

type batchClientStreamMetrics struct {
inited sync.Once

sendTime prometheus.Gauge
recvTime prometheus.Gauge
}

func (m *batchClientStreamMetrics) init(addr string, id uint64) *batchClientStreamMetrics {
m.inited.Do(func() {
streamID := strconv.FormatUint(id, 10)
m.sendTime = metrics.TiKVBatchCommandsSendTime.WithLabelValues(addr, streamID)
m.recvTime = metrics.TiKVBatchCommandsRecvTime.WithLabelValues(addr, streamID)
})
return m
}

func (m *batchClientStreamMetrics) onOutPayload(ev *stats.OutPayload) {
m.sendTime.Set(float64(ev.SentTime.UnixNano()) / float64(time.Second))
}

func (m *batchClientStreamMetrics) onInPayload(ev *stats.InPayload) {
m.recvTime.Set(float64(ev.RecvTime.UnixNano()) / float64(time.Second))
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.

func (m *batchClientStreamMetrics) cleanUp(addr string, id uint64) {
streamID := strconv.FormatUint(id, 10)
metrics.TiKVBatchCommandsSendTime.DeleteLabelValues(addr, streamID)
metrics.TiKVBatchCommandsRecvTime.DeleteLabelValues(addr, streamID)
}

type batchClientTargetMetrics struct {
sendWireBytes prometheus.Counter
recvWireBytes prometheus.Counter
delayedPick prometheus.Counter
transparentRetry prometheus.Counter
}

func (m *batchClientTargetMetrics) onOutPayload(ev *stats.OutPayload) {
m.sendWireBytes.Add(float64(ev.WireLength))
}

func (m *batchClientTargetMetrics) onInPayload(ev *stats.InPayload) {
m.recvWireBytes.Add(float64(ev.WireLength))
}

func (m *batchClientTargetMetrics) onDelayedPickComplete(*stats.DelayedPickComplete) {
m.delayedPick.Inc()
}

func (m *batchClientTargetMetrics) onBegin(ev *stats.Begin) {
if ev.IsTransparentRetryAttempt {
m.transparentRetry.Inc()
}
}

type batchClientStatsMonitor struct {
addr string
targetMetrics batchClientTargetMetrics
streamMetrics sync.Map
}

func newBatchClientStatsMonitor(addr string) *batchClientStatsMonitor {
return &batchClientStatsMonitor{
addr: addr,
targetMetrics: batchClientTargetMetrics{
sendWireBytes: metrics.TiKVBatchCommandsSendWireBytes.WithLabelValues(addr),
recvWireBytes: metrics.TiKVBatchCommandsRecvWireBytes.WithLabelValues(addr),
delayedPick: metrics.TiKVBatchCommandsDelayedPick.WithLabelValues(addr),
transparentRetry: metrics.TiKVBatchCommandsTransparentRetry.WithLabelValues(addr),
},
}
}

func (m *batchClientStatsMonitor) getStreamMetrics(id uint64) *batchClientStreamMetrics {
mm, _ := m.streamMetrics.LoadOrStore(id, &batchClientStreamMetrics{})
return mm.(*batchClientStreamMetrics).init(m.addr, id)
}

// TagConn implements [stats.Handler].
func (m *batchClientStatsMonitor) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
return ctx
}

// HandleConn implements [stats.Handler].
func (m *batchClientStatsMonitor) HandleConn(context.Context, stats.ConnStats) {
}

// TagRPC implements [stats.Handler].
func (m *batchClientStatsMonitor) TagRPC(ctx context.Context, i *stats.RPCTagInfo) context.Context {
if i.FullMethodName != batchCommandsMethod {
return ctx
}
return context.WithValue(ctx, batchClientStreamKey{}, atomic.AddUint64(&globalBatchClientStreamID, 1))
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we associate a conn index with streamID ?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, we can merge #1931 first since the concept of conn index is introduce by #1931.

}

// HandleRPC implements [stats.Handler].
func (m *batchClientStatsMonitor) HandleRPC(ctx context.Context, s stats.RPCStats) {
id, ok := ctx.Value(batchClientStreamKey{}).(uint64)
if !ok {
return
}
switch ev := s.(type) {
case *stats.OutPayload:
m.targetMetrics.onOutPayload(ev)

streamMetrics := m.getStreamMetrics(id)
streamMetrics.onOutPayload(ev)

case *stats.InPayload:
m.targetMetrics.onInPayload(ev)

streamMetrics := m.getStreamMetrics(id)
streamMetrics.onInPayload(ev)

case *stats.DelayedPickComplete:
m.targetMetrics.onDelayedPickComplete(ev)

case *stats.Begin:
m.targetMetrics.onBegin(ev)

case *stats.End:
mm, ok := m.streamMetrics.LoadAndDelete(id)
if !ok {
return
}
mm.(*batchClientStreamMetrics).cleanUp(m.addr, id)
}
}
119 changes: 119 additions & 0 deletions internal/client/conn_monitor_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
// Copyright 2026 TiKV Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package client

import (
"context"
"strconv"
"testing"
"time"

dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/metrics"
"google.golang.org/grpc/stats"
)

func resetBatchCommandsStatsMetrics() {
metrics.TiKVBatchCommandsSendTime.Reset()
metrics.TiKVBatchCommandsRecvTime.Reset()
metrics.TiKVBatchCommandsSendWireBytes.Reset()
metrics.TiKVBatchCommandsRecvWireBytes.Reset()
metrics.TiKVBatchCommandsDelayedPick.Reset()
metrics.TiKVBatchCommandsTransparentRetry.Reset()
}

func readGauge(t *testing.T, metric interface{ Write(*dto.Metric) error }) float64 {
t.Helper()

pb := &dto.Metric{}
require.NoError(t, metric.Write(pb))
return pb.GetGauge().GetValue()
}

func readCounter(t *testing.T, metric interface{ Write(*dto.Metric) error }) float64 {
t.Helper()

pb := &dto.Metric{}
require.NoError(t, metric.Write(pb))
return pb.GetCounter().GetValue()
}

func TestBatchClientStatsMonitorTagRPCUsesGlobalStreamID(t *testing.T) {
resetBatchCommandsStatsMetrics()
t.Cleanup(resetBatchCommandsStatsMetrics)

ctx1 := (newBatchClientStatsMonitor("tikv-1")).TagRPC(context.Background(), &stats.RPCTagInfo{
FullMethodName: batchCommandsMethod,
})
ctx2 := (newBatchClientStatsMonitor("tikv-1")).TagRPC(context.Background(), &stats.RPCTagInfo{
FullMethodName: batchCommandsMethod,
})

id1, ok1 := ctx1.Value(batchClientStreamKey{}).(uint64)
id2, ok2 := ctx2.Value(batchClientStreamKey{}).(uint64)
require.True(t, ok1)
require.True(t, ok2)
require.NotZero(t, id1)
require.NotZero(t, id2)
require.NotEqual(t, id1, id2)
}

func TestBatchClientStatsMonitorHandleRPC(t *testing.T) {
resetBatchCommandsStatsMetrics()
t.Cleanup(resetBatchCommandsStatsMetrics)

addr := "tikv-1"
monitor := newBatchClientStatsMonitor(addr)
ctx := monitor.TagRPC(context.Background(), &stats.RPCTagInfo{
FullMethodName: batchCommandsMethod,
})
id, ok := ctx.Value(batchClientStreamKey{}).(uint64)
require.True(t, ok)

monitor.HandleRPC(ctx, &stats.DelayedPickComplete{})
monitor.HandleRPC(ctx, &stats.Begin{IsTransparentRetryAttempt: true})

sentTime := time.Unix(1710000000, 500000000)
recvTime := time.Unix(1710000001, 250000000)
monitor.HandleRPC(ctx, &stats.OutPayload{
SentTime: sentTime,
WireLength: 11,
})
monitor.HandleRPC(ctx, &stats.InPayload{
RecvTime: recvTime,
WireLength: 13,
})

streamID := strconv.FormatUint(id, 10)
require.Equal(t,
float64(sentTime.UnixNano())/float64(time.Second),
readGauge(t, metrics.TiKVBatchCommandsSendTime.WithLabelValues(addr, streamID)),
)
require.Equal(t,
float64(recvTime.UnixNano())/float64(time.Second),
readGauge(t, metrics.TiKVBatchCommandsRecvTime.WithLabelValues(addr, streamID)),
)
require.Equal(t, 11.0, readCounter(t, metrics.TiKVBatchCommandsSendWireBytes.WithLabelValues(addr)))
require.Equal(t, 13.0, readCounter(t, metrics.TiKVBatchCommandsRecvWireBytes.WithLabelValues(addr)))
require.Equal(t, 1.0, readCounter(t, metrics.TiKVBatchCommandsDelayedPick.WithLabelValues(addr)))
require.Equal(t, 1.0, readCounter(t, metrics.TiKVBatchCommandsTransparentRetry.WithLabelValues(addr)))

monitor.HandleRPC(ctx, &stats.End{})
require.False(t, metrics.TiKVBatchCommandsSendTime.DeleteLabelValues(addr, streamID))
require.False(t, metrics.TiKVBatchCommandsRecvTime.DeleteLabelValues(addr, streamID))
require.Equal(t, 11.0, readCounter(t, metrics.TiKVBatchCommandsSendWireBytes.WithLabelValues(addr)))
require.Equal(t, 13.0, readCounter(t, metrics.TiKVBatchCommandsRecvWireBytes.WithLabelValues(addr)))
}
8 changes: 7 additions & 1 deletion internal/client/conn_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"google.golang.org/grpc/experimental"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/stats"
)

type connPool struct {
Expand Down Expand Up @@ -102,6 +103,7 @@ func (a *connPool) Init(addr string, security config.Security, idleNotify *uint3
var (
unaryInterceptor grpc.UnaryClientInterceptor
streamInterceptor grpc.StreamClientInterceptor
batchStatsMonitor stats.Handler
)
if cfg.OpenTracingEnable {
unaryInterceptor = grpc_opentracing.UnaryClientInterceptor()
Expand All @@ -112,6 +114,7 @@ func (a *connPool) Init(addr string, security config.Security, idleNotify *uint3
if allowBatch {
a.batchConn = newBatchConn(uint(len(a.conns)), cfg.TiKVClient.MaxBatchSize, idleNotify)
a.initMetrics(a.target)
batchStatsMonitor = newBatchClientStatsMonitor(addr)
}
keepAlive := cfg.TiKVClient.GrpcKeepAliveTime
for i := range a.conns {
Expand All @@ -125,7 +128,7 @@ func (a *connPool) Init(addr string, security config.Security, idleNotify *uint3
// Don't remove this until we no longer use sharedBytes in tipb and kvproto.
callOptions = append(callOptions, grpc.ForceCodec(&legacyCodec{}))

opts = append([]grpc.DialOption{
opts := append([]grpc.DialOption{
opt,
grpc.WithInitialWindowSize(cfg.TiKVClient.GrpcInitialWindowSize),
grpc.WithInitialConnWindowSize(cfg.TiKVClient.GrpcInitialConnWindowSize),
Expand All @@ -149,6 +152,9 @@ func (a *connPool) Init(addr string, security config.Security, idleNotify *uint3
if !cfg.TiKVClient.GrpcSharedBufferPool {
opts = append(opts, experimental.WithBufferPool(mem.NopBufferPool{}))
}
if batchStatsMonitor != nil {
opts = append(opts, grpc.WithStatsHandler(batchStatsMonitor))
}
conn, err := a.monitoredDial(
ctx,
fmt.Sprintf("%s-%d", a.target, i),
Expand Down
Loading
Loading