Skip to content
This repository was archived by the owner on Apr 2, 2024. It is now read-only.

Commit 0261f72

Browse files
committed
Change subsystem to type label.
Signed-off-by: Harkishen-Singh <[email protected]> As per the design doc, now type can have ['metric', 'trace']. This commit ensures the same, leaving subsystem for ['metric_batcher', 'copier'] which will be implemented in another PR that will be responsible to update all metric path based metrics.
1 parent ebd9f77 commit 0261f72

File tree

8 files changed

+182
-208
lines changed

8 files changed

+182
-208
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ We use the following categories for changes:
1414

1515
## [Unreleased]
1616

17+
### Added
18+
- Add Prometheus metrics support for Tracing [#1102]
19+
1720
## [0.9.0] - 2022-02-02
1821

1922
### Added

pkg/jaeger/query/metrics.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,14 @@ import (
1313
)
1414

1515
var (
16+
// TODO (harkishen): update telemetry module to extract metric data from series in new consistent metrics.
1617
traceRequestsExec = prometheus.NewCounter(prometheus.CounterOpts{
1718
Namespace: util.PromNamespace,
1819
Subsystem: "trace",
1920
Name: "query_requests_executed_total",
2021
Help: "Total number of query requests successfully executed by /getTrace and /fetchTraces API.",
2122
})
22-
// Even though this is handled by promscale_query_requests_total{subsystem="trace", handler="get_dependencies", code="200"}
23+
// Even though this is handled by promscale_query_requests_total{type="trace", handler="get_dependencies", code="200"}
2324
// yet we will have to keep this metric for telemetry as extracting the underlying series from a metric will require
2425
// changing telemetry arch that tracks the all prometheus metrics, just for this metric, which is not worth.
2526
dependencyRequestsExec = prometheus.NewCounter(prometheus.CounterOpts{

pkg/jaeger/query/query.go

Lines changed: 60 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import (
1414
"github.com/jaegertracing/jaeger/model"
1515
"github.com/jaegertracing/jaeger/storage/dependencystore"
1616
"github.com/jaegertracing/jaeger/storage/spanstore"
17+
1718
"github.com/timescale/promscale/pkg/log"
1819
"github.com/timescale/promscale/pkg/pgmodel/metrics"
1920
"github.com/timescale/promscale/pkg/pgxconn"
@@ -44,85 +45,98 @@ func (p *Query) SpanWriter() spanstore.Writer {
4445
}
4546

4647
func (p *Query) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) {
47-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_trace", "code": ""}).Inc()
48+
code := "2xx"
4849
start := time.Now()
50+
defer func() {
51+
metrics.RequestsTotal.With(prometheus.Labels{"type": "trace", "handler": "Get_Trace", "code": code}).Inc()
52+
metrics.RequestsDuration.With(prometheus.Labels{"type": "trace", "handler": "Get_Trace", "code": code}).Observe(time.Since(start).Seconds())
53+
}()
4954
res, err := getTrace(ctx, p.conn, traceID)
50-
if err == nil {
51-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_trace", "code": "200"}).Inc()
52-
traceRequestsExec.Add(1)
53-
metrics.RequestsDuration.With(prometheus.Labels{"subsystem": "trace", "handler": "get_trace"}).Observe(time.Since(start).Seconds())
54-
} else {
55-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_trace", "code": "500"}).Inc()
55+
if err != nil {
56+
code = "5xx"
57+
return nil, logError(err)
5658
}
57-
return res, logError(err)
59+
traceRequestsExec.Add(1)
60+
return res, nil
5861
}
5962

6063
func (p *Query) GetServices(ctx context.Context) ([]string, error) {
61-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_services", "code": ""}).Inc()
64+
code := "2xx"
6265
start := time.Now()
66+
defer func() {
67+
metrics.RequestsTotal.With(prometheus.Labels{"type": "trace", "handler": "Get_Services", "code": code}).Inc()
68+
metrics.RequestsDuration.With(prometheus.Labels{"type": "trace", "handler": "Get_Services", "code": code}).Observe(time.Since(start).Seconds())
69+
}()
6370
res, err := getServices(ctx, p.conn)
64-
if err == nil {
65-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_services", "code": "200"}).Inc()
66-
metrics.RequestsDuration.With(prometheus.Labels{"subsystem": "trace", "handler": "get_services"}).Observe(time.Since(start).Seconds())
67-
} else {
68-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_services", "code": "500"}).Inc()
71+
if err != nil {
72+
code = "5xx"
73+
return nil, logError(err)
6974
}
70-
return res, logError(err)
75+
return res, nil
7176
}
7277

7378
func (p *Query) GetOperations(ctx context.Context, query spanstore.OperationQueryParameters) ([]spanstore.Operation, error) {
74-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_operations", "code": ""}).Inc()
79+
code := "2xx"
7580
start := time.Now()
81+
defer func() {
82+
metrics.RequestsTotal.With(prometheus.Labels{"type": "trace", "handler": "Get_Operations", "code": code}).Inc()
83+
metrics.RequestsDuration.With(prometheus.Labels{"type": "trace", "handler": "Get_Operations", "code": code}).Observe(time.Since(start).Seconds())
84+
}()
7685
res, err := getOperations(ctx, p.conn, query)
77-
if err == nil {
78-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_operations", "code": "200"}).Inc()
79-
metrics.RequestsDuration.With(prometheus.Labels{"subsystem": "trace", "handler": "get_operations"}).Observe(time.Since(start).Seconds())
80-
} else {
81-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_operations", "code": "500"}).Inc()
86+
if err != nil {
87+
code = "5xx"
88+
return nil, logError(err)
8289
}
83-
return res, logError(err)
90+
return res, nil
8491
}
8592

8693
func (p *Query) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) {
87-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "find_traces", "code": ""}).Inc()
94+
code := "2xx"
8895
start := time.Now()
96+
defer func() {
97+
metrics.RequestsTotal.With(prometheus.Labels{"type": "trace", "handler": "Find_Traces", "code": code}).Inc()
98+
metrics.RequestsDuration.With(prometheus.Labels{"type": "trace", "handler": "Find_Traces", "code": code}).Observe(time.Since(start).Seconds())
99+
}()
89100
res, err := findTraces(ctx, p.conn, query)
90-
if err == nil {
91-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "find_traces", "code": "200"}).Inc()
92-
traceRequestsExec.Add(1)
93-
metrics.RequestsDuration.With(prometheus.Labels{"subsystem": "trace", "handler": "find_traces"}).Observe(time.Since(start).Seconds())
94-
} else {
95-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "find_traces", "code": "500"}).Inc()
101+
if err != nil {
102+
code = "5xx"
103+
return nil, logError(err)
96104
}
97-
return res, logError(err)
105+
traceRequestsExec.Add(1)
106+
return res, nil
98107
}
99108

100109
func (p *Query) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) {
101-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "find_trace_ids", "code": ""}).Inc()
110+
code := "2xx"
102111
start := time.Now()
112+
defer func() {
113+
metrics.RequestsTotal.With(prometheus.Labels{"type": "trace", "handler": "Find_Trace_IDs", "code": code}).Inc()
114+
metrics.RequestsDuration.With(prometheus.Labels{"type": "trace", "handler": "Find_Trace_IDs", "code": code}).Observe(time.Since(start).Seconds())
115+
}()
103116
res, err := findTraceIDs(ctx, p.conn, query)
104-
if err == nil {
105-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "find_trace_ids", "code": "200"}).Inc()
106-
traceRequestsExec.Add(1)
107-
metrics.RequestsDuration.With(prometheus.Labels{"subsystem": "trace", "handler": "find_trace_ids"}).Observe(time.Since(start).Seconds())
108-
} else {
109-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "find_trace_ids", "code": "500"}).Inc()
117+
if err != nil {
118+
code = "5xx"
119+
return nil, logError(err)
110120
}
111-
return res, logError(err)
121+
traceRequestsExec.Add(1)
122+
return res, nil
112123
}
113124

114125
func (p *Query) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) {
115-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_dependencies", "code": ""}).Inc()
126+
code := "2xx"
116127
start := time.Now()
128+
defer func() {
129+
metrics.RequestsTotal.With(prometheus.Labels{"type": "trace", "handler": "Get_Dependencies", "code": code}).Inc()
130+
metrics.RequestsDuration.With(prometheus.Labels{"type": "trace", "handler": "Get_Dependencies", "code": code}).Observe(time.Since(start).Seconds())
131+
}()
132+
117133
res, err := getDependencies(ctx, p.conn, endTs, lookback)
118-
if err == nil {
119-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_dependencies", "code": "200"}).Inc()
120-
dependencyRequestsExec.Add(1)
121-
metrics.RequestsDuration.With(prometheus.Labels{"subsystem": "trace", "handler": "get_dependencies"}).Observe(time.Since(start).Seconds())
122-
} else {
123-
metrics.RequestsTotal.With(prometheus.Labels{"subsystem": "trace", "handler": "get_dependencies", "code": "500"}).Inc()
134+
if err != nil {
135+
code = "5xx"
136+
return nil, logError(err)
124137
}
125-
return res, logError(err)
138+
dependencyRequestsExec.Add(1)
139+
return res, nil
126140
}
127141

128142
func logError(err error) error {

pkg/pgmodel/cache/metrics.go

Lines changed: 0 additions & 119 deletions
This file was deleted.

0 commit comments

Comments
 (0)