Utilize a declarative API to wrap your functions to capture Prometheus metrics & logs for each function
yarn add @osskit/monitor
import { createMonitor } from '@osskit/monitor'
export const monitor = createMonitor({ scope: 'metrics' });
const result1 = await monitor('query', async () => db.query());
const result2 = await monitor('update', async () => db.update());
// Custom labeling
export const monitor = createMonitor<['my_label']>({ scope: 'metrics' });
const customLabels = (id: string) => await monitor('query', async () => db.query(id), {context: {key: 'myId' }, labeling: { 'my_label': 'label' } });
import monitor from '@osskit/monitor'
const result = await monitor('query', async () => db.query());
// Custom labeling
const customLabels = (id: string) => await monitor('query', async () => db.query(id), {context: {key: 'myId' }, labeling: { 'my_label': 'label' } });
import { createMonitor } from '@osskit/monitor'
export const monitor = createMonitor({ scope: 'metrics' });
// Context
const result = (id: string) => await monitor('query', async () => db.query(id), { context: { id } });
// Parse & Log Results
const logResults = (id: string) => await monitor('query', async () => db.query(id), { logResult: true, parseResult: (res) => res.prop });
// Parse Error
const errored = (id: string) => await monitor('query', async () => db.query(id), { logResult: true, parseError: (e) => e.statusCode });
// Log Execution Start
const executionStart = (id: string) => await monitor('query', async () => db.query(id), { logExecutionStart: true });
import { setGlobalOptions, setGlobalContext } from '@osskit/monitor';
import logger from './logger.js';
setGlobalOptions({
context: { globalContextId: 'bla' },
logResult: true,
logExecutionStart: false,
parseError: (res) => console.log(res),
prometheusBuckets: [0.0001, 0.1, 0.5, 10],
logger,
errorLogLevel: 'fatal'
});
setGlobalContext(() => getDynamicContext());
Type: string
The scope of the monitor's metrics
Will be used as the Prometheus metric name
Returns an instance of a function that calls monitor
- <T>(method: string, callable: () => T, options?: MonitorOptions<T>)
Type: string
Will be used for the method
label of the metric, or the metric name if no parent scope was declared
Set a number of options that will be used globally for all monitor invocations
Invoke a function that returns a global context to use in all monitor invocation logs
Parameter | Description |
---|---|
context?: boolean |
add context that will be logged in all method's logs |
logResult?: boolean |
log the method's result |
logExecutionStart?: boolean |
log the start of the method's execution method.start |
parseResult?: (e: any) => any |
transform the method's result that will be returned |
parseError?: (e: any) => any |
if the method errored, transform the error that will be thrown |
errorLogLevel?: pino.Level |
if the method errored, which level should the message be, default - error |
labeling?: Record<string, string> |
add custom labeled counters using keys and values |
Parameter | Description |
---|---|
logResult?: boolean |
log the monitored methods results |
logExecutionStart?: boolean |
log the start of the method's execution method.start |
parseError?: (e: any) => any |
if the method errored, transform the error that will be thrown |
prometheusBuckets?: number[] |
use the following prometheus bucket list for monitor metrics across methods |
logger?: BaseLogger |
supply a pino BaseLogger for monitor to use in logging results |
errorLogLevel?: pino.Level |
if the method errored, which level should the message be, default - error |