Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore!: adopt log/slog, drop go-kit/log #378

Merged
merged 2 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions .golangci.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
---
linters:
enable:
- sloglint

run:
deadline: 5m
skip-files:
Expand All @@ -16,5 +20,3 @@ linters-settings:
exclude-functions:
# Used in HTTP handlers, any error is handled by the server itself.
- (net/http.ResponseWriter).Write
# Never check for logger errors.
- (github.com/go-kit/log.Logger).Log
35 changes: 17 additions & 18 deletions collectors/monitoring_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,12 @@ package collectors
import (
"errors"
"fmt"
"log/slog"
"math"
"strings"
"sync"
"time"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
"google.golang.org/api/monitoring/v3"
Expand Down Expand Up @@ -53,7 +52,7 @@ type MonitoringCollector struct {
lastScrapeDurationSecondsMetric prometheus.Gauge
collectorFillMissingLabels bool
monitoringDropDelegatedProjects bool
logger log.Logger
logger *slog.Logger
counterStore DeltaCounterStore
histogramStore DeltaHistogramStore
aggregateDeltas bool
Expand Down Expand Up @@ -120,10 +119,10 @@ type DeltaHistogramStore interface {
ListMetrics(metricDescriptorName string) []*HistogramMetric
}

func NewMonitoringCollector(projectID string, monitoringService *monitoring.Service, opts MonitoringCollectorOptions, logger log.Logger, counterStore DeltaCounterStore, histogramStore DeltaHistogramStore) (*MonitoringCollector, error) {
func NewMonitoringCollector(projectID string, monitoringService *monitoring.Service, opts MonitoringCollectorOptions, logger *slog.Logger, counterStore DeltaCounterStore, histogramStore DeltaHistogramStore) (*MonitoringCollector, error) {
const subsystem = "monitoring"

logger = log.With(logger, "project_id", projectID)
logger = logger.With("project_id", projectID)

apiCallsTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Expand Down Expand Up @@ -237,7 +236,7 @@ func (c *MonitoringCollector) Collect(ch chan<- prometheus.Metric) {
if err := c.reportMonitoringMetrics(ch, begun); err != nil {
errorMetric = float64(1)
c.scrapeErrorsTotalMetric.Inc()
level.Error(c.logger).Log("msg", "Error while getting Google Stackdriver Monitoring metrics", "err", err)
c.logger.Error("Error while getting Google Stackdriver Monitoring metrics", "err", err)
}
c.scrapeErrorsTotalMetric.Collect(ch)

Expand Down Expand Up @@ -283,7 +282,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
wg.Add(1)
go func(metricDescriptor *monitoring.MetricDescriptor, ch chan<- prometheus.Metric, startTime, endTime time.Time) {
defer wg.Done()
level.Debug(c.logger).Log("msg", "retrieving Google Stackdriver Monitoring metrics for descriptor", "descriptor", metricDescriptor.Type)
c.logger.Debug("retrieving Google Stackdriver Monitoring metrics for descriptor", "descriptor", metricDescriptor.Type)
filter := fmt.Sprintf("metric.type=\"%s\"", metricDescriptor.Type)
if c.monitoringDropDelegatedProjects {
filter = fmt.Sprintf(
Expand All @@ -298,11 +297,11 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
ingestDelay := metricDescriptor.Metadata.IngestDelay
ingestDelayDuration, err := time.ParseDuration(ingestDelay)
if err != nil {
level.Error(c.logger).Log("msg", "error parsing ingest delay from metric metadata", "descriptor", metricDescriptor.Type, "err", err, "delay", ingestDelay)
c.logger.Error("error parsing ingest delay from metric metadata", "descriptor", metricDescriptor.Type, "err", err, "delay", ingestDelay)
errChannel <- err
return
}
level.Debug(c.logger).Log("msg", "adding ingest delay", "descriptor", metricDescriptor.Type, "delay", ingestDelay)
c.logger.Debug("adding ingest delay", "descriptor", metricDescriptor.Type, "delay", ingestDelay)
endTime = endTime.Add(ingestDelayDuration * -1)
startTime = startTime.Add(ingestDelayDuration * -1)
}
Expand All @@ -313,7 +312,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
}
}

level.Debug(c.logger).Log("msg", "retrieving Google Stackdriver Monitoring metrics with filter", "filter", filter)
c.logger.Debug("retrieving Google Stackdriver Monitoring metrics with filter", "filter", filter)

timeSeriesListCall := c.monitoringService.Projects.TimeSeries.List(utils.ProjectResource(c.projectID)).
Filter(filter).
Expand All @@ -324,15 +323,15 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
c.apiCallsTotalMetric.Inc()
page, err := timeSeriesListCall.Do()
if err != nil {
level.Error(c.logger).Log("msg", "error retrieving Time Series metrics for descriptor", "descriptor", metricDescriptor.Type, "err", err)
c.logger.Error("error retrieving Time Series metrics for descriptor", "descriptor", metricDescriptor.Type, "err", err)
errChannel <- err
break
}
if page == nil {
break
}
if err := c.reportTimeSeriesMetrics(page, metricDescriptor, ch, begun); err != nil {
level.Error(c.logger).Log("msg", "error reporting Time Series metrics for descriptor", "descriptor", metricDescriptor.Type, "err", err)
c.logger.Error("error reporting Time Series metrics for descriptor", "descriptor", metricDescriptor.Type, "err", err)
errChannel <- err
break
}
Expand Down Expand Up @@ -368,7 +367,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
}

if cached := c.descriptorCache.Lookup(metricsTypePrefix); cached != nil {
level.Debug(c.logger).Log("msg", "using cached Google Stackdriver Monitoring metric descriptors starting with", "prefix", metricsTypePrefix)
c.logger.Debug("using cached Google Stackdriver Monitoring metric descriptors starting with", "prefix", metricsTypePrefix)
if err := metricDescriptorsFunction(cached); err != nil {
errChannel <- err
}
Expand All @@ -381,7 +380,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
return metricDescriptorsFunction(r.MetricDescriptors)
}

level.Debug(c.logger).Log("msg", "listing Google Stackdriver Monitoring metric descriptors starting with", "prefix", metricsTypePrefix)
c.logger.Debug("listing Google Stackdriver Monitoring metric descriptors starting with", "prefix", metricsTypePrefix)
if err := c.monitoringService.Projects.MetricDescriptors.List(utils.ProjectResource(c.projectID)).
Filter(filter).
Pages(ctx, callback); err != nil {
Expand All @@ -396,7 +395,7 @@ func (c *MonitoringCollector) reportMonitoringMetrics(ch chan<- prometheus.Metri
wg.Wait()
close(errChannel)

level.Debug(c.logger).Log("msg", "Done reporting monitoring metrics")
c.logger.Debug("Done reporting monitoring metrics")
return <-errChannel
}

Expand Down Expand Up @@ -500,12 +499,12 @@ func (c *MonitoringCollector) reportTimeSeriesMetrics(
if err == nil {
timeSeriesMetrics.CollectNewConstHistogram(timeSeries, newestEndTime, labelKeys, dist, buckets, labelValues, timeSeries.MetricKind)
} else {
level.Debug(c.logger).Log("msg", "discarding", "resource", timeSeries.Resource.Type, "metric",
c.logger.Debug("discarding", "resource", timeSeries.Resource.Type, "metric",
timeSeries.Metric.Type, "err", err)
}
continue
default:
level.Debug(c.logger).Log("msg", "discarding", "value_type", timeSeries.ValueType, "metric", timeSeries)
c.logger.Debug("discarding", "value_type", timeSeries.ValueType, "metric", timeSeries)
continue
}

Expand Down Expand Up @@ -569,7 +568,7 @@ func (c *MonitoringCollector) generateHistogramBuckets(
func (c *MonitoringCollector) keyExists(labelKeys []string, key string) bool {
for _, item := range labelKeys {
if item == key {
level.Debug(c.logger).Log("msg", "Found duplicate label key", "key", key)
c.logger.Debug("Found duplicate label key", "key", key)
return true
}
}
Expand Down
15 changes: 7 additions & 8 deletions delta/counter.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@ package delta

import (
"fmt"
"log/slog"
"sort"
"strings"
"sync"
"time"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"google.golang.org/api/monitoring/v3"

"github.com/prometheus-community/stackdriver_exporter/collectors"
Expand All @@ -36,11 +35,11 @@ type MetricEntry struct {
type InMemoryCounterStore struct {
store *sync.Map
ttl time.Duration
logger log.Logger
logger *slog.Logger
}

// NewInMemoryCounterStore returns an implementation of CounterStore which is persisted in-memory
func NewInMemoryCounterStore(logger log.Logger, ttl time.Duration) *InMemoryCounterStore {
func NewInMemoryCounterStore(logger *slog.Logger, ttl time.Duration) *InMemoryCounterStore {
store := &InMemoryCounterStore{
store: &sync.Map{},
logger: logger,
Expand Down Expand Up @@ -68,19 +67,19 @@ func (s *InMemoryCounterStore) Increment(metricDescriptor *monitoring.MetricDesc
existing := entry.Collected[key]

if existing == nil {
level.Debug(s.logger).Log("msg", "Tracking new counter", "fqName", currentValue.FqName, "key", key, "current_value", currentValue.Value, "incoming_time", currentValue.ReportTime)
s.logger.Debug("Tracking new counter", "fqName", currentValue.FqName, "key", key, "current_value", currentValue.Value, "incoming_time", currentValue.ReportTime)
entry.Collected[key] = currentValue
return
}

if existing.ReportTime.Before(currentValue.ReportTime) {
level.Debug(s.logger).Log("msg", "Incrementing existing counter", "fqName", currentValue.FqName, "key", key, "current_value", existing.Value, "adding", currentValue.Value, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
s.logger.Debug("Incrementing existing counter", "fqName", currentValue.FqName, "key", key, "current_value", existing.Value, "adding", currentValue.Value, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
currentValue.Value = currentValue.Value + existing.Value
entry.Collected[key] = currentValue
return
}

level.Debug(s.logger).Log("msg", "Ignoring old sample for counter", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
s.logger.Debug("Ignoring old sample for counter", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
}

func toCounterKey(c *collectors.ConstMetric) uint64 {
Expand Down Expand Up @@ -118,7 +117,7 @@ func (s *InMemoryCounterStore) ListMetrics(metricDescriptorName string) []*colle
for key, collected := range entry.Collected {
//Scan and remove metrics which are outside the TTL
if ttlWindowStart.After(collected.CollectionTime) {
level.Debug(s.logger).Log("msg", "Deleting counter entry outside of TTL", "key", key, "fqName", collected.FqName)
s.logger.Debug("Deleting counter entry outside of TTL", "key", key, "fqName", collected.FqName)
delete(entry.Collected, key)
continue
}
Expand Down
4 changes: 2 additions & 2 deletions delta/counter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/promslog"
"google.golang.org/api/monitoring/v3"

"github.com/prometheus-community/stackdriver_exporter/collectors"
Expand All @@ -31,7 +31,7 @@ var _ = Describe("Counter", func() {
descriptor := &monitoring.MetricDescriptor{Name: "This is a metric"}

BeforeEach(func() {
store = delta.NewInMemoryCounterStore(promlog.New(&promlog.Config{}), time.Minute)
store = delta.NewInMemoryCounterStore(promslog.New(&promslog.Config{}), time.Minute)
metric = &collectors.ConstMetric{
FqName: "counter_name",
LabelKeys: []string{"labelKey"},
Expand Down
15 changes: 7 additions & 8 deletions delta/histogram.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@ package delta

import (
"fmt"
"log/slog"
"sort"
"strings"
"sync"
"time"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"google.golang.org/api/monitoring/v3"

"github.com/prometheus-community/stackdriver_exporter/collectors"
Expand All @@ -36,11 +35,11 @@ type HistogramEntry struct {
type InMemoryHistogramStore struct {
store *sync.Map
ttl time.Duration
logger log.Logger
logger *slog.Logger
}

// NewInMemoryHistogramStore returns an implementation of HistogramStore which is persisted in-memory
func NewInMemoryHistogramStore(logger log.Logger, ttl time.Duration) *InMemoryHistogramStore {
func NewInMemoryHistogramStore(logger *slog.Logger, ttl time.Duration) *InMemoryHistogramStore {
store := &InMemoryHistogramStore{
store: &sync.Map{},
logger: logger,
Expand Down Expand Up @@ -68,20 +67,20 @@ func (s *InMemoryHistogramStore) Increment(metricDescriptor *monitoring.MetricDe
existing := entry.Collected[key]

if existing == nil {
level.Debug(s.logger).Log("msg", "Tracking new histogram", "fqName", currentValue.FqName, "key", key, "incoming_time", currentValue.ReportTime)
s.logger.Debug("Tracking new histogram", "fqName", currentValue.FqName, "key", key, "incoming_time", currentValue.ReportTime)
entry.Collected[key] = currentValue
return
}

if existing.ReportTime.Before(currentValue.ReportTime) {
level.Debug(s.logger).Log("msg", "Incrementing existing histogram", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
s.logger.Debug("Incrementing existing histogram", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
currentValue.MergeHistogram(existing)
// Replace the existing histogram by the new one after merging it.
entry.Collected[key] = currentValue
return
}

level.Debug(s.logger).Log("msg", "Ignoring old sample for histogram", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
s.logger.Debug("Ignoring old sample for histogram", "fqName", currentValue.FqName, "key", key, "last_reported_time", existing.ReportTime, "incoming_time", currentValue.ReportTime)
}

func toHistogramKey(hist *collectors.HistogramMetric) uint64 {
Expand Down Expand Up @@ -119,7 +118,7 @@ func (s *InMemoryHistogramStore) ListMetrics(metricDescriptorName string) []*col
for key, collected := range entry.Collected {
// Scan and remove metrics which are outside the TTL
if ttlWindowStart.After(collected.CollectionTime) {
level.Debug(s.logger).Log("msg", "Deleting histogram entry outside of TTL", "key", key, "fqName", collected.FqName)
s.logger.Debug("Deleting histogram entry outside of TTL", "key", key, "fqName", collected.FqName)
delete(entry.Collected, key)
continue
}
Expand Down
4 changes: 2 additions & 2 deletions delta/histogram_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/promslog"
"google.golang.org/api/monitoring/v3"

"github.com/prometheus-community/stackdriver_exporter/collectors"
Expand All @@ -33,7 +33,7 @@ var _ = Describe("HistogramStore", func() {
bucketValue := uint64(1000)

BeforeEach(func() {
store = delta.NewInMemoryHistogramStore(promlog.New(&promlog.Config{}), time.Minute)
store = delta.NewInMemoryHistogramStore(promslog.New(&promslog.Config{}), time.Minute)
histogram = &collectors.HistogramMetric{
FqName: "histogram_name",
LabelKeys: []string{"labelKey"},
Expand Down
12 changes: 6 additions & 6 deletions go.mod
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
module github.com/prometheus-community/stackdriver_exporter

go 1.21
go 1.22

require (
github.com/PuerkitoBio/rehttp v1.4.0
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/fatih/camelcase v1.0.0
github.com/go-kit/log v0.2.1
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.34.1
github.com/prometheus/client_golang v1.20.2
github.com/prometheus/common v0.59.1
github.com/prometheus/exporter-toolkit v0.11.0
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.60.0
github.com/prometheus/exporter-toolkit v0.13.0
golang.org/x/net v0.29.0
golang.org/x/oauth2 v0.23.0
google.golang.org/api v0.199.0
Expand All @@ -27,7 +26,6 @@ require (
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
Expand All @@ -38,6 +36,8 @@ require (
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mdlayher/socket v0.4.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/nxadm/tail v1.4.8 // indirect
Expand Down
Loading