Compare commits

..

No commits in common. "main" and "v0.9" have entirely different histories.
main ... v0.9

265 changed files with 6976 additions and 18783 deletions

2
.github/CODEOWNERS vendored
View file

@ -1,2 +1,2 @@
* @c-cube
* @c-cube @mattjbray

View file

@ -1,30 +0,0 @@
name: format
on:
push:
branches:
- main
pull_request:
jobs:
format:
name: format
strategy:
matrix:
ocaml-compiler:
- '5.3'
runs-on: 'ubuntu-latest'
steps:
- uses: actions/checkout@main
- name: Use OCaml ${{ matrix.ocaml-compiler }}
uses: ocaml/setup-ocaml@v3
with:
ocaml-compiler: ${{ matrix.ocaml-compiler }}
dune-cache: true
allow-prerelease-opam: true
- run: opam install ocamlformat.0.27.0
- run: opam exec -- make format-check

31
.github/workflows/gh-pages.yml vendored Normal file
View file

@ -0,0 +1,31 @@
name: github pages
on:
push:
branches:
- main
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@main
- uses: ocaml/setup-ocaml@v2
with:
ocaml-compiler: '5.1.x'
dune-cache: true
allow-prerelease-opam: true
- name: Deps
run: opam install odig opentelemetry opentelemetry-lwt opentelemetry-client-ocurl opentelemetry-cohttp-lwt
- name: Build
run: opam exec -- odig odoc --cache-dir=_doc/ opentelemetry opentelemetry-lwt opentelemetry-client-ocurl opentelemetry-cohttp-lwt
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./_doc/html
enable_jekyll: false

View file

@ -9,7 +9,7 @@ on:
jobs:
build:
strategy:
fail-fast: false
fail-fast: true
matrix:
os:
- ubuntu-latest
@ -19,22 +19,21 @@ jobs:
- 4.08.x
- 4.13.x
- 5.0.x
- 5.3.x
runs-on: ${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
submodules: recursive
# needed for depext to work
- run: sudo apt-get update && sudo apt-get install mccs
- run: sudo apt-get update
if: ${{ matrix.os == 'ubuntu-latest' }}
- name: Use OCaml ${{ matrix.ocaml-compiler }}
uses: ocaml/setup-ocaml@v3
uses: ocaml/setup-ocaml@v2
with:
ocaml-compiler: ${{ matrix.ocaml-compiler }}
opam-depext-flags: --with-test
@ -42,24 +41,15 @@ jobs:
allow-prerelease-opam: true
- run: |
opam pin trace https://github.com/ocaml-tracing/ocaml-trace.git#main -y -n
opam pin https://github.com/ocaml-tracing/ambient-context.git#HEAD -y -n
opam install pbrt.4.0 -y
opam install ambient-context
opam pin ocaml-protoc 3.0.1 -y -n
opam pin pbrt 3.0.1 -y -n
opam install pbrt -y
# We cannot install packages that need eio on ocaml versions before 5
- run: |
packages=$(ls ./*.opam | grep -v eio)
opam install $packages --deps-only --with-test --solver=mccs
if: ${{ ! (startsWith(matrix.ocaml-compiler, '5')) }}
- run: opam install . --deps-only --with-test
# We should be able to install all packages on ocaml 5
- run: opam install . --deps-only --with-test --solver=mccs
if: ${{ startsWith(matrix.ocaml-compiler, '5') }}
- run: opam exec -- dune build @install -p opentelemetry,opentelemetry-lwt,opentelemetry-client-ocurl,opentelemetry-cohttp-lwt,opentelemetry-client-cohttp-lwt
- run: opam exec -- dune build @install -p opentelemetry,opentelemetry-client,opentelemetry-lwt,opentelemetry-client-ocurl,opentelemetry-cohttp-lwt,opentelemetry-client-cohttp-lwt,opentelemetry-logs
- run: opam install trace
- run: opam install trace.0.7
- run: opam exec -- dune build @install -p opentelemetry
- run: opam install ocaml-protoc

5
.gitignore vendored
View file

@ -5,8 +5,3 @@ _opam
*.db
.merlin
*.install
*.exe
*.tmp
src/lib/version.ml
src/lib/.git_index_path
src/lib/.git_index.lnk

View file

@ -1,4 +1,4 @@
version = 0.27.0
version = 0.24.1
profile=conventional
margin=80
if-then-else=k-r

View file

@ -1,85 +1,4 @@
## 0.20
- major refactor: split library into `opentelemetry.core`, `opentelemetry`,
`opentelemetry.util`, `opentelemetry.emitter`, `opentelemetry.atomic`, revamp internals
- per-signal providers: separate trace, meter, and logger providers replace
the single monolithic exporter
- `opentelemetry.ambient-context` is now a standalone library, once again
- new `opentelemetry-client-ocurl-lwt` package
- client: split `opentelemetry-client-sync` off of the main client library
- client: add support for `http/json` protocol alongside `http/protobuf`
- client: add HTTP retry with exponential backoff
- client: overhaul bounded queue; introduce generic consumer framework
- client: add `Exporter_add_batching`, `Emitter_add_batching`, `Emitter_sample`,
`Emitter_limit_interval` combinators. Batching is factored out of individual
client libraries.
- client: add sampler as an emitter transformer
- client: add `exporter_stdout` and `debug_exporter`
- client: add `self_metrics` and `self_debug` to exporters
- client: add `after_shutdown` callback in ocurl/ocurl-lwt clients
- `Span.dummy`: inert span that is never modified
- `Span.record_exception` now also sets the span status to error
- `Span.set_span_status` added in `opentelemetry.trace`
- `Span`: carry flags to `span_link`
- `Span`: now mutable thanks to ocaml-protoc 4.0, replaces old `Scope.t` entirely
- `Meter.emit` and `Meter_provider.emit_l` added
- emitter: add `flat_map`, `tap`, `to_list`, `enabled` combinators
- clock abstraction added; `ptime` used by default in logger and metrics
- interval limiter used for `metrics_callbacks`
- update to OTEL spec 1.8.0
- update semantic conventions
- various bug fixes and performance improvements
## 0.12
- breaking: change `Collector.cleanup` so it takes a callback
- feat: add Eio collector
- feat: add Logs integration
- Specify and document the Signal_gatherer API
- feat: add `Globals.service_version`
- add `pp` implemtation for `Client_ocurl.Config`
- feat: adapt to trace 0.10
- fix concurrency issues, make the libraries thread safe
- add many tests
- fix: opentelemetry exception attr spec
- Add tests for Opentelemetry.Client.Config
- large refactorings to factor out batching logic out of cohttp-lwt client,
also encoding logic. The code for collectors is now much cleaner.
## 0.11.2
- fix: opentelemetry-client-ocurl: don't block signals on Windows
- fix otel-client-ocurl: use ptime timestamps for self metrics
## 0.11.1
- add missing sample argument to `Traceparent.to_value`
## 0.11
- add `Span_kind.t`, add {kind,set_kind} to `Scope`
- expose `Span_status` types
- add `Scope.set_span_status`
- add `record_exception`
- otel.trace: extension points for links, record_exn, kind
- otel.trace: set status of a span based on `exception.message`
- add cohttp upper bound version constraint
- in backends, call `tick()` before cleaning up
- reduce memory usage of `Scope.t` (@tatchi)
- remove dependency on ambient-context, vendor/inline/specialize it
## 0.10
- feat: add support for per-signal urls (by @tatchi)
- build: disable protobuf regeneration during normal library use
- fix: emit GC metrics even in the absence of custom metrics
## 0.9
- compat with trace 0.7

View file

@ -11,23 +11,10 @@ clean:
@dune clean
protoc-gen:
FORCE_GENPROTO=true dune build @lint
update-submodules:
git submodule update --init
doc:
@dune build @doc
PACKAGES=$(shell opam show . -f name)
odig-doc:
@odig odoc --cache-dir=_doc/ $(PACKAGES)
@dune build @lint
format:
@dune build @fmt --auto-promote
format-check:
@dune build $(DUNE_OPTS) @fmt --display=quiet
@dune build @fmt
WATCH ?= @all
watch:

View file

@ -7,11 +7,9 @@ connectors to talk to opentelemetry software such as [jaeger](https://www.jaeger
- library `opentelemetry` should be used to instrument your code
and possibly libraries. It doesn't communicate with anything except
an exporter (default: no-op);
- library `opentelemetry-client-ocurl` is an exporter that communicates
via http+protobuf with some collector (otelcol, datadog-agent, etc.) using cURL bindings;
- library `opentelemetry-client-cohttp-lwt` is an exporter that communicates
via http+protobuf with some collector using cohttp.
a backend (default: dummy backend)
- library `opentelemetry-client-ocurl` is a backend that communicates
via http+protobuf with some collector (otelcol, datadog-agent, etc.)
## License
@ -28,7 +26,7 @@ MIT
* [x] batching, perf, etc.
- [ ] async collector relying on ocurl-multi
- [ ] interface with `logs` (carry context around)
- [x] implicit scope (via vendored `ambient-context`, see `opentelemetry.ambient-context`)
- [x] implicit scope (via [ambient-context][])
## Use
@ -36,17 +34,17 @@ For now, instrument traces/spans, logs, and metrics manually:
```ocaml
module Otel = Opentelemetry
let (let@) = (@@)
let (let@) f x = f x
let foo () =
let@ span = Otel.Tracer.with_ "foo"
let@ scope = Otel.Trace.with_ "foo"
~attrs:["hello", `String "world"] in
do_work ();
let now = Otel.Clock.now Otel.Meter.default.clock in
Otel.Meter.emit1 Otel.Meter.default
Otel.Metrics.(gauge ~name:"foo.x" [int ~now 42]);
Otel.Span.add_event span (Otel.Event.make "work done");
do_more_work ();
do_work();
Otel.Metrics.(
emit [
gauge ~name:"foo.x" [int 42];
]);
do_more_work();
()
```
@ -55,72 +53,47 @@ let foo () =
If you're writing a top-level application, you need to perform some initial configuration.
1. Set the [`service_name`][];
2. optionally configure [ambient-context][] with the appropriate storage for your environment — TLS, Lwt, Eio;
3. and install an exporter (usually by calling your client library's `with_setup` function.)
2. configure our [ambient-context][] dependency with the appropriate storage for your environment — TLS, Lwt, Eio ... (see [their docs][install-ambient-storage] for more details);
3. and install a [`Collector`][] (usually by calling your collector's `with_setup` function.)
For example, if your application is using Lwt, and you're using `ocurl` as your collector, you might do something like this:
```ocaml
let main () =
Otel.Globals.service_name := "my_service";
Otel.Gc_metrics.setup ();
Otel.GC_metrics.basic_setup();
Opentelemetry_ambient_context.set_storage_provider (Opentelemetry_ambient_context_lwt.storage ());
Ambient_context.with_storage_provider (Ambient_context_lwt.storage ()) @@ fun () ->
Opentelemetry_client_ocurl.with_setup () @@ fun () ->
(* … *)
foo ();
(* … *)
```
[`service_name`]: <https://v3.ocaml.org/p/opentelemetry/latest/doc/Opentelemetry/Globals/index.html#val-service_name>
[ambient-context]: now vendored as `opentelemetry.ambient-context`, formerly <https://v3.ocaml.org/p/ambient-context>
## Migration v012 → v0.13
see `doc/migration_guide_v0.13.md`
[`service_name`]: <https://v3.ocaml.org/p/opentelemetry/0.5/doc/Opentelemetry/Globals/index.html#val-service_name>
[`Collector`]: <https://v3.ocaml.org/p/opentelemetry/0.5/doc/Opentelemetry/Collector/index.html>
[ambient-context]: <https://v3.ocaml.org/p/ambient-context>
[install-ambient-storage]: <https://github.com/ELLIOTTCABLE/ocaml-ambient-context#-as-a-top-level-application>
## Configuration
### Environment Variables
The library is configurable via `Opentelemetry.Config`, via the standard
opentelemetry env variables, or with some custom environment variables.
The library supports standard OpenTelemetry environment variables:
- `OTEL_EXPORTER_OTLP_ENDPOINT` sets the http endpoint to send signals to
- `OTEL_OCAML_DEBUG=1` to print some debug messages from the opentelemetry library ide
- `OTEL_RESOURCE_ATTRIBUTES` sets a comma separated list of custom resource attributes
**General:**
- `OTEL_SDK_DISABLED` - disable the SDK (default: false)
- `OTEL_SERVICE_NAME` - service name
- `OTEL_RESOURCE_ATTRIBUTES` - comma-separated key=value resource attributes
- `OTEL_OCAML_DEBUG=1` - print debug messages from the opentelemetry library
## Collector opentelemetry-client-ocurl
**Exporter endpoints:**
- `OTEL_EXPORTER_OTLP_ENDPOINT` - base endpoint (default: http://localhost:4318)
- `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - traces endpoint
- `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` - metrics endpoint
- `OTEL_EXPORTER_OTLP_LOGS_ENDPOINT` - logs endpoint
**Exporter configuration:**
- `OTEL_EXPORTER_OTLP_PROTOCOL` - protocol: http/protobuf or http/json (default: http/protobuf)
**Headers:**
- `OTEL_EXPORTER_OTLP_HEADERS` - headers as comma-separated key=value pairs
- `OTEL_EXPORTER_OTLP_TRACES_HEADERS` - traces-specific headers
- `OTEL_EXPORTER_OTLP_METRICS_HEADERS` - metrics-specific headers
- `OTEL_EXPORTER_OTLP_LOGS_HEADERS` - logs-specific headers
## opentelemetry-client-ocurl
This is a synchronous exporter that uses the http+protobuf format
to send signals (metrics, traces, logs) to some collector (eg. `otelcol`
This is a synchronous collector that uses the http+protobuf format
to send signals (metrics, traces, logs) to some other collector (eg. `otelcol`
or the datadog agent).
Do note that it uses a thread pool and is incompatible
with uses of `fork` on some Unixy systems.
See [#68](https://github.com/imandra-ai/ocaml-opentelemetry/issues/68) for a possible workaround.
## Collector opentelemetry-client-cohttp-lwt
## opentelemetry-client-cohttp-lwt
This is a Lwt-friendly exporter that uses cohttp to send
signals to some collector (e.g. `otelcol`). It must be run
This is a Lwt-friendly collector that uses cohttp to send
signals to some other collector (e.g. `otelcol`). It must be run
inside a `Lwt_main.run` scope.
## Opentelemetry-trace

View file

@ -1,271 +0,0 @@
# Migration guide: v0.12 → v0.13
This guide covers breaking changes when upgrading from v0.12.
## 1. Backend setup: `Collector``Sdk` + `Exporter`
v0.12 used a first-class module `BACKEND` installed into a global slot via
`Collector.set_backend`. v0.13 replaces this with a plain record `Exporter.t`
installed via `Sdk.set`.
The `with_setup` helper in each client library still exists, so if you use that
you mainly need to rename the module.
```ocaml
(* v0.12 *)
Opentelemetry_client_ocurl.with_setup ~config () (fun () ->
(* your code *)
())
(* v0.13: same call, internals changed; ~stop removed, ~after_shutdown added *)
Opentelemetry_client_ocurl.with_setup
~after_shutdown:(fun _exp -> ())
~config () (fun () ->
(* your code *)
())
```
If you called `setup`/`remove_backend` manually:
```ocaml
(* v0.12 *)
Opentelemetry_client_ocurl.setup ~config ()
(* ... *)
Opentelemetry_client_ocurl.remove_backend ()
(* v0.13 *)
Opentelemetry_client_ocurl.setup ~config ()
(* ... *)
Opentelemetry_client_ocurl.remove_exporter ()
```
The `~stop:bool Atomic.t` parameter has been removed from the ocurl client.
Use `Sdk.active ()` (an `Aswitch.t`) to detect shutdown instead.
## 2. `Trace.with_``Tracer.with_`, callback gets a `Span.t`
The most common migration. The module is renamed and the callback argument type
changes from `Scope.t` to `Span.t`.
```ocaml
(* v0.12 *)
Trace.with_ "my-op" ~attrs:["k", `String "v"] (fun (scope : Scope.t) ->
Scope.add_event scope (fun () -> Event.make "something happened");
Scope.add_attrs scope (fun () -> ["extra", `Int 42]);
do_work ()
)
(* v0.13 *)
Tracer.with_ "my-op" ~attrs:["k", `String "v"] (fun (span : Span.t) ->
Span.add_event span (Event.make "something happened");
Span.add_attrs span ["extra", `Int 42];
do_work ()
)
```
`Trace` is kept as a deprecated alias for `Tracer`.
Key differences on the callback argument:
| v0.12 (`Scope.t`) | v0.13 (`Span.t`) |
|--------------------------------------------|--------------------------------------|
| `scope.trace_id` | `Span.trace_id span` |
| `scope.span_id` | `Span.id span` |
| `Scope.add_event scope (fun () -> ev)` | `Span.add_event span ev` |
| `Scope.add_attrs scope (fun () -> attrs)` | `Span.add_attrs span attrs` |
| `Scope.set_status scope st` | `Span.set_status span st` |
| `Scope.record_exception scope e bt` | `Span.record_exception span e bt` |
| `Scope.to_span_ctx scope` | `Span.to_span_ctx span` |
| `Scope.to_span_link scope` | `Span.to_span_link span` |
| `~scope:scope` (pass parent explicitly) | `~parent:span` |
The `~scope` parameter of `Trace.with_` is renamed to `~parent`:
```ocaml
(* v0.12 *)
Trace.with_ "child" ~scope:parent_scope (fun child -> ...)
(* v0.13 *)
Tracer.with_ "child" ~parent:parent_span (fun child -> ...)
```
In addition, `Scope.t` is entirely removed because `Span.t` is now mutable.
For additional efficiency, `Span.t` is directly encodable to protobuf
without the need to allocate further intermediate structures.
## 3. `Logs``Logger`, new emit helpers
The `Logs` module is renamed to `Logger` (`Logs` is kept as a deprecated alias).
Direct construction of log records and batch-emit is replaced by convenience
functions.
```ocaml
(* v0.12 *)
Logs.emit [
Logs.make_str ~severity:Severity_number_warn "something went wrong"
]
Logs.emit [
Logs.make_strf ~severity:Severity_number_info "processed %d items" n
]
(* v0.13: simple string *)
Logger.log ~severity:Severity_number_warn "something went wrong"
(* v0.13: formatted *)
Logger.logf ~severity:Severity_number_info (fun k -> k "processed %d items" n)
```
If you need to keep the trace/span correlation:
```ocaml
(* v0.12 *)
Logs.emit [
Logs.make_str ~trace_id ~span_id ~severity:Severity_number_info "ok"
]
(* v0.13 *)
Logger.log ~trace_id ~span_id ~severity:Severity_number_info "ok"
```
`Log_record.make_str` / `Log_record.make` still exist if you need to build
records manually and emit them via a `Logger.t`.
## 4. `Metrics.emit` → emit via a `Meter`
In v0.12 `Metrics.emit` was a top-level function that sent directly to the
collector. In v0.13 metrics go through a `Meter.t`. For most code the change
is mechanical:
```ocaml
(* v0.12 *)
Metrics.emit [
Metrics.gauge ~name:"queue.depth" [ Metrics.int ~now depth ]
]
(* v0.13: Meter.default emits to the global provider *)
Meter.emit1 Meter.default
(Metrics.gauge ~name:"queue.depth" [ Metrics.int ~now depth ])
```
`now` is now obtained from the meter's clock rather than `Timestamp_ns.now_unix_ns ()`:
```ocaml
(* v0.12 *)
let now = Timestamp_ns.now_unix_ns () in
Metrics.emit [ Metrics.sum ~name:"counter" [ Metrics.int ~now n ] ]
(* v0.13 *)
let now = Clock.now Meter.default.clock in
Meter.emit1 Meter.default
(Metrics.sum ~name:"counter" [ Metrics.int ~now n ])
```
## 5. `Metrics_callbacks.register``Meter.add_cb`
```ocaml
(* v0.12 *)
Metrics_callbacks.register (fun () ->
[ Metrics.gauge ~name:"foo" [ Metrics.int ~now:... 42 ] ])
(* v0.13: callback now receives a clock *)
Meter.add_cb (fun ~clock () ->
let now = Clock.now clock in
[ Metrics.gauge ~name:"foo" [ Metrics.int ~now 42 ] ])
```
After registering callbacks you must tell the SDK to drive them:
```ocaml
(* v0.13: call once after setup to schedule periodic emission *)
Meter.add_to_main_exporter Meter.default
```
In v0.12 this was automatic once `Metrics_callbacks.register` was called.
## 6. `GC_metrics.basic_setup` signature unchanged, `setup` changed
`GC_metrics.basic_setup ()` still works. The module has been renamed
to `Gc_metrics`, but the former name persists as a deprecated alias.
If you called the lower-level `GC_metrics.setup exp` directly:
```ocaml
(* v0.12 *)
GC_metrics.setup exporter
(* or *)
GC_metrics.setup_on_main_exporter ()
(* v0.13 *)
Gc_metrics.setup () (* uses Meter.default *)
(* or with a specific meter: *)
Gc_metrics.setup ~meter:my_meter ()
```
`GC_metrics.setup_on_main_exporter` has been removed.
## 7. `Collector.on_tick``Sdk.add_on_tick_callback`
```ocaml
(* v0.12 *)
Collector.on_tick (fun () -> do_background_work ())
(* v0.13 *)
Sdk.add_on_tick_callback (fun () -> do_background_work ())
```
## 8. `?service_name` parameter removed
`Trace.with_`, `Logs.emit`, and `Metrics.emit` accepted a `?service_name`
override. This is no longer supported per-call; set it once globally:
```ocaml
(* v0.12 *)
Trace.with_ "op" ~service_name:"my-svc" (fun _ -> ...)
(* v0.13: set globally before setup *)
Opentelemetry.Globals.service_name := "my-svc"
Tracer.with_ "op" (fun _ -> ...)
```
## 9. `create_backend` / `BACKEND` module type removed
If you held a reference to a backend module:
```ocaml
(* v0.12 *)
let (module B : Collector.BACKEND) =
Opentelemetry_client_ocurl.create_backend ~config ()
in
Collector.set_backend (module B)
(* v0.13 *)
let exp : Exporter.t =
Opentelemetry_client_ocurl.create_exporter ~config ()
in
Sdk.set exp
```
## 10. New features (no migration needed)
- **`Sdk.get_tracer/get_meter/get_logger`**: obtain a provider pre-stamped with
instrumentation-scope metadata (`~name`, `~version`, `~__MODULE__`).
- **`Trace_provider` / `Meter_provider` / `Log_provider`**: independent
per-signal providers; useful for testing or multi-backend setups.
- **`Dynamic_enricher`**: register callbacks that inject attributes into every
span and log record at creation time (wide events).
- **Batch**: much better handling of batching overall.
## Quick checklist
- [ ] `Trace.with_``Tracer.with_`; callback argument `Scope.t``Span.t`
- [ ] `Scope.add_event`/`add_attrs``Span.add_event`/`add_attrs` (no thunk wrapper)
- [ ] `~scope:``~parent:` in nested `with_` calls
- [ ] `Logs.emit [Logs.make_str ...]``Logger.log`/`Logger.logf`
- [ ] `Metrics.emit [...]``Meter.emit1 Meter.default ...`
- [ ] `Metrics_callbacks.register``Meter.add_cb` (+ call `Meter.add_to_main_exporter`)
- [ ] `GC_metrics.setup exp``Gc_metrics.setup ()`
- [ ] `Collector.on_tick``Sdk.add_on_tick_callback`
- [ ] Remove `?service_name` call-site overrides; set `Globals.service_name` once
- [ ] `create_backend``create_exporter`; `set_backend``Sdk.set`
- [ ] `~stop:bool Atomic.t` removed from ocurl client

9
dune
View file

@ -1,9 +1,4 @@
(env
(_
(flags
:standard
-warn-error
-a+8
-w
+a-4-30-40-41-42-44-48-58-70
-strict-sequence)))
(flags :standard -warn-error -a+8 -w +a-4-30-40-41-42-44-48-70
-strict-sequence)))

View file

@ -7,9 +7,7 @@
(source
(github imandra-ai/ocaml-opentelemetry))
(version 0.12)
(implicit_transitive_deps false)
(version 0.9)
(authors "the Imandra team and contributors")
@ -24,63 +22,38 @@
(package
(name opentelemetry)
(synopsis "Core library for instrumentation and serialization for https://opentelemetry.io")
(synopsis "Instrumentation for https://opentelemetry.io")
(depends
(ocaml
(>= "4.08"))
ptime
hmap
ambient-context
(odoc :with-doc)
(alcotest :with-test)
(pbrt
(and
(>= 4.0)
(< 5.0)))
(pbrt_yojson
(and
(>= 4.0)
(< 5.0)))
(ambient-context
(>= 0.2))
(and (>= 3.0) (< 4.0)))
(ocaml-lsp-server :with-dev-setup)
(ocamlformat
(and
:with-dev-setup
(>= 0.27)
(< 0.28)))
(mtime
(>= "1.4")))
(depopts atomic trace thread-local-storage lwt eio picos)
(>= 0.24)
(< 0.25))))
(depopts trace)
(conflicts
(trace
(< 0.12)))
(trace (< 0.7)))
(tags
(instrumentation tracing opentelemetry datadog jaeger)))
(package
(name opentelemetry-client)
(synopsis "Client SDK for https://opentelemetry.io")
(depends
(opentelemetry
(= :version))
(odoc :with-doc)
(alcotest :with-test)
(thread-local-storage
(and
(>= 0.2)
(< 0.3))))
(tags
(tracing opentelemetry sdk)))
(package
(name opentelemetry-lwt)
(synopsis "Lwt-compatible instrumentation for https://opentelemetry.io")
(depends
(ocaml
(>= "4.08"))
ambient-context
(opentelemetry
(= :version))
ambient-context-lwt
(cohttp-lwt-unix :with-test)
(odoc :with-doc)
(lwt
@ -102,8 +75,6 @@
; atomic ; vendored
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(odoc :with-doc)
(ezcurl
(>= 0.2.3))
@ -111,51 +82,6 @@
(alcotest :with-test))
(synopsis "Collector client for opentelemetry, using http + ezcurl"))
(package
(name opentelemetry-client-ocurl-lwt)
(depends
(ocaml
(>= "4.08"))
(mtime
(>= "1.4"))
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(odoc :with-doc)
(ezcurl-lwt
(>= 0.2.3))
ocurl
(lwt
(>= "5.3"))
(lwt_ppx
(>= "2.0"))
(alcotest :with-test))
(synopsis "Collector client for opentelemetry, using ezcurl-lwt"))
(package
(name opentelemetry-logs)
(depends
(ocaml
(>= "4.08"))
(opentelemetry
(= :version))
(odoc :with-doc)
(logs
(>= "0.7.0"))
(alcotest :with-test)
(containers :with-test)
(cohttp-lwt-unix :with-test)
(opentelemetry-client-cohttp-lwt
(and
:with-test
(= :version)))
(opentelemetry-cohttp-lwt
(and
:with-test
(= :version))))
(synopsis "Opentelemetry-based reporter for Logs"))
(package
(name opentelemetry-cohttp-lwt)
(depends
@ -165,14 +91,11 @@
(= :version))
(opentelemetry-lwt
(= :version))
ambient-context-lwt
(odoc :with-doc)
(lwt
(>= "5.3"))
(cohttp
(>= "6.0.0"))
(cohttp-lwt
(>= "6.0.0"))
(>= "4.0.0"))
(alcotest :with-test))
(synopsis "Opentelemetry tracing for Cohttp HTTP servers"))
@ -186,11 +109,6 @@
; for spans
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(opentelemetry-lwt
(= :version))
ambient-context-lwt
(odoc :with-doc)
(lwt
(>= "5.3"))
@ -198,35 +116,5 @@
(>= "2.0"))
cohttp-lwt
cohttp-lwt-unix
(alcotest :with-test)
(containers :with-test)
(opentelemetry-lwt
(and
:with-test
(= :version))))
(alcotest :with-test))
(synopsis "Collector client for opentelemetry, using cohttp + lwt"))
(package
(name opentelemetry-client-cohttp-eio)
(depends
(ocaml
(>= "5.00"))
(mtime
(>= "1.4"))
ca-certs
mirage-crypto-rng
ambient-context-eio
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(odoc :with-doc)
(cohttp-eio
(>= 6.1.0))
(eio_main :with-test)
(tls-eio
(>= 2.0.1))
(alcotest :with-test)
(containers :with-test)
(cohttp-lwt-unix :with-test))
(synopsis "Collector client for opentelemetry, using cohttp + eio"))

View file

@ -1,2 +0,0 @@
#!/bin/sh
exec dune exec --profile=release tests/bin/emit1_ocurl_lwt.exe -- $@

View file

@ -1,2 +0,0 @@
#!/bin/sh
exec dune exec --profile=release tests/bin/emit1_stdout.exe -- $@

223
flake.lock generated
View file

@ -1,223 +0,0 @@
{
"nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"mirage-opam-overlays": {
"flake": false,
"locked": {
"lastModified": 1710922379,
"narHash": "sha256-j4QREQDUf8oHOX7qg6wAOupgsNQoYlufxoPrgagD+pY=",
"owner": "dune-universe",
"repo": "mirage-opam-overlays",
"rev": "797cb363df3ff763c43c8fbec5cd44de2878757e",
"type": "github"
},
"original": {
"owner": "dune-universe",
"repo": "mirage-opam-overlays",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1751792365,
"narHash": "sha256-J1kI6oAj25IG4EdVlg2hQz8NZTBNYvIS0l4wpr9KcUo=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "1fd8bada0b6117e6c7eb54aad5813023eed37ccb",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"opam-nix": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": "flake-utils_2",
"mirage-opam-overlays": "mirage-opam-overlays",
"nixpkgs": "nixpkgs",
"opam-overlays": "opam-overlays",
"opam-repository": "opam-repository",
"opam2json": "opam2json"
},
"locked": {
"lastModified": 1762273592,
"narHash": "sha256-dXex1fPdmzj4xKWEWrcvbgin/iLFaxrt9vi305m6nUc=",
"owner": "tweag",
"repo": "opam-nix",
"rev": "98ca8f4401e996aeac38b6f14bf3a82d85b7add7",
"type": "github"
},
"original": {
"owner": "tweag",
"repo": "opam-nix",
"type": "github"
}
},
"opam-overlays": {
"flake": false,
"locked": {
"lastModified": 1741116009,
"narHash": "sha256-Z0PIW82fHJFvAv/JYpAffnp2DaOjLhsPutqyIrORZd0=",
"owner": "dune-universe",
"repo": "opam-overlays",
"rev": "e031bb64e33bf93be963e9a38b28962e6e14381f",
"type": "github"
},
"original": {
"owner": "dune-universe",
"repo": "opam-overlays",
"type": "github"
}
},
"opam-repository": {
"flake": false,
"locked": {
"lastModified": 1759971927,
"narHash": "sha256-aUZWd0KOpEnioBwqlwRU40rUFAqT3RTlojXt2oI3omY=",
"owner": "ocaml",
"repo": "opam-repository",
"rev": "551314ad1550478ec6be39bb0eaadd2569190464",
"type": "github"
},
"original": {
"owner": "ocaml",
"repo": "opam-repository",
"type": "github"
}
},
"opam2json": {
"inputs": {
"nixpkgs": [
"opam-nix",
"nixpkgs"
],
"systems": "systems_3"
},
"locked": {
"lastModified": 1749457947,
"narHash": "sha256-+QVm+HOYikF3wUhqSIV8qJbE/feSG+p48fgxIosbHS0=",
"owner": "tweag",
"repo": "opam2json",
"rev": "0ecd66fc2bfb25d910522c990dd36412259eac1f",
"type": "github"
},
"original": {
"owner": "tweag",
"repo": "opam2json",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": [
"opam-nix",
"nixpkgs"
],
"opam-nix": "opam-nix"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,49 +0,0 @@
{
inputs = {
opam-nix.url = "github:tweag/opam-nix";
flake-utils.url = "github:numtide/flake-utils";
nixpkgs.follows = "opam-nix/nixpkgs";
};
outputs = { self, flake-utils, opam-nix, nixpkgs, }@inputs:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
on = opam-nix.lib.${system};
localPackagesQuery = builtins.mapAttrs (_: pkgs.lib.last)
(on.listRepo (on.makeOpamRepo ./.));
devPackagesQuery = {
# You can add "development" packages here. They will get added to the devShell automatically.
ocaml-lsp-server = "*";
ocamlformat = "*";
};
query = devPackagesQuery // { ocaml-base-compiler = "5.3.0"; };
scope =
on.buildOpamProject' { resolveArgs.with-test = true; } ./. query;
overlay = final: prev:
{
# You can add overrides here
};
scope' = scope.overrideScope overlay;
# Packages from devPackagesQuery
devPackages = builtins.attrValues
(pkgs.lib.getAttrs (builtins.attrNames devPackagesQuery) scope');
# Packages in this workspace
packages =
pkgs.lib.getAttrs (builtins.attrNames localPackagesQuery) scope';
in {
legacyPackages = scope';
inherit packages;
## If you want to have a "default" package which will be built with just `nix build`, do this instead of `inherit packages;`:
# packages = packages // { default = packages.<your default package>; };
devShells.default = pkgs.mkShell {
inputsFrom = builtins.attrValues packages;
buildInputs = devPackages ++ [
# You can add packages from nixpkgs here
];
};
});
}

View file

@ -1,47 +0,0 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
synopsis: "Collector client for opentelemetry, using cohttp + eio"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"ocaml" {>= "5.00"}
"mtime" {>= "1.4"}
"ca-certs"
"mirage-crypto-rng"
"ambient-context-eio"
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"odoc" {with-doc}
"cohttp-eio" {>= "6.1.0"}
"eio_main" {with-test}
"tls-eio" {>= "2.0.1"}
"alcotest" {with-test}
"containers" {with-test}
"cohttp-lwt-unix" {with-test}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

View file

@ -1,6 +1,6 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
version: "0.9"
synopsis: "Collector client for opentelemetry, using cohttp + lwt"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
@ -16,17 +16,12 @@ depends: [
"ocaml" {>= "4.08"}
"mtime" {>= "1.4"}
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"opentelemetry-lwt" {= version}
"ambient-context-lwt"
"odoc" {with-doc}
"lwt" {>= "5.3"}
"lwt_ppx" {>= "2.0"}
"cohttp-lwt"
"cohttp-lwt-unix"
"alcotest" {with-test}
"containers" {with-test}
"opentelemetry-lwt" {with-test & = version}
]
build: [
["dune" "subst"] {dev}

View file

@ -1,43 +0,0 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
synopsis: "Collector client for opentelemetry, using ezcurl-lwt"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"ocaml" {>= "4.08"}
"mtime" {>= "1.4"}
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"odoc" {with-doc}
"ezcurl-lwt" {>= "0.2.3"}
"ocurl"
"lwt" {>= "5.3"}
"lwt_ppx" {>= "2.0"}
"alcotest" {with-test}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

View file

@ -1,6 +1,6 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
version: "0.9"
synopsis: "Collector client for opentelemetry, using http + ezcurl"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
@ -16,7 +16,6 @@ depends: [
"ocaml" {>= "4.08"}
"mtime" {>= "1.4"}
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"odoc" {with-doc}
"ezcurl" {>= "0.2.3"}
"ocurl"

View file

@ -1,38 +0,0 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
synopsis: "Client SDK for https://opentelemetry.io"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
tags: ["tracing" "opentelemetry" "sdk"]
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"opentelemetry" {= version}
"odoc" {with-doc}
"alcotest" {with-test}
"thread-local-storage" {>= "0.2" & < "0.3"}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

View file

@ -1,6 +1,6 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
version: "0.9"
synopsis: "Opentelemetry tracing for Cohttp HTTP servers"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
@ -16,11 +16,9 @@ depends: [
"ocaml" {>= "4.08"}
"opentelemetry" {= version}
"opentelemetry-lwt" {= version}
"ambient-context-lwt"
"odoc" {with-doc}
"lwt" {>= "5.3"}
"cohttp" {>= "6.0.0"}
"cohttp-lwt" {>= "6.0.0"}
"cohttp-lwt" {>= "4.0.0"}
"alcotest" {with-test}
]
build: [

View file

@ -1,42 +0,0 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
synopsis: "Opentelemetry-based reporter for Logs"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"ocaml" {>= "4.08"}
"opentelemetry" {= version}
"odoc" {with-doc}
"logs" {>= "0.7.0"}
"alcotest" {with-test}
"containers" {with-test}
"cohttp-lwt-unix" {with-test}
"opentelemetry-client-cohttp-lwt" {with-test & = version}
"opentelemetry-cohttp-lwt" {with-test & = version}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

View file

@ -1,6 +1,6 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
version: "0.9"
synopsis: "Lwt-compatible instrumentation for https://opentelemetry.io"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
@ -15,8 +15,8 @@ bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"ocaml" {>= "4.08"}
"ambient-context"
"opentelemetry" {= version}
"ambient-context-lwt"
"cohttp-lwt-unix" {with-test}
"odoc" {with-doc}
"lwt" {>= "5.3"}

View file

@ -1,8 +1,7 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
synopsis:
"Core library for instrumentation and serialization for https://opentelemetry.io"
version: "0.9"
synopsis: "Instrumentation for https://opentelemetry.io"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
@ -18,18 +17,16 @@ depends: [
"ocaml" {>= "4.08"}
"ptime"
"hmap"
"ambient-context"
"odoc" {with-doc}
"alcotest" {with-test}
"pbrt" {>= "4.0" & < "5.0"}
"pbrt_yojson" {>= "4.0" & < "5.0"}
"ambient-context" {>= "0.2"}
"pbrt" {>= "3.0" & < "4.0"}
"ocaml-lsp-server" {with-dev-setup}
"ocamlformat" {with-dev-setup & >= "0.27" & < "0.28"}
"mtime" {>= "1.4"}
"ocamlformat" {with-dev-setup & >= "0.24" & < "0.25"}
]
depopts: ["atomic" "trace" "thread-local-storage" "lwt" "eio" "picos"]
depopts: ["trace"]
conflicts: [
"trace" {< "0.12"}
"trace" {< "0.7"}
]
build: [
["dune" "subst"] {dev}

View file

@ -1,7 +0,0 @@
(library
(name opentelemetry_ambient_context)
(public_name opentelemetry.ambient-context)
(synopsis "re-export ambient-context")
(libraries
(re_export ambient-context.core)
(re_export ambient-context)))

View file

@ -1,3 +0,0 @@
(** Just forward to the [ambient-context] library *)
include Ambient_context

View file

@ -15,7 +15,8 @@
(* *)
(**************************************************************************)
(** Atomic references. *)
(** Atomic references.
*)
type 'a t = 'a Stdlib.Atomic.t
(** An atomic (mutable) reference to a value of type ['a]. *)
@ -33,14 +34,15 @@ val exchange : 'a t -> 'a -> 'a
(** Set a new value for the atomic reference, and return the current value. *)
val compare_and_set : 'a t -> 'a -> 'a -> bool
(** [compare_and_set r seen v] sets the new value of [r] to [v] only if its
current value is physically equal to [seen] -- the comparison and the set
occur atomically. Returns [true] if the comparison succeeded (so the set
happened) and [false] otherwise. *)
(** [compare_and_set r seen v] sets the new value of [r] to [v] only
if its current value is physically equal to [seen] -- the
comparison and the set occur atomically. Returns [true] if the
comparison succeeded (so the set happened) and [false]
otherwise. *)
val fetch_and_add : int t -> int -> int
(** [fetch_and_add r n] atomically increments the value of [r] by [n], and
returns the current value (before the increment). *)
(** [fetch_and_add r n] atomically increments the value of [r] by [n],
and returns the current value (before the increment). *)
val incr : int t -> unit
(** [incr r] atomically increments the value of [r] by [1]. *)

View file

@ -15,7 +15,8 @@
(* *)
(**************************************************************************)
(** Atomic references. *)
(** Atomic references.
*)
type 'a t
(** An atomic (mutable) reference to a value of type ['a]. *)
@ -33,14 +34,15 @@ val exchange : 'a t -> 'a -> 'a
(** Set a new value for the atomic reference, and return the current value. *)
val compare_and_set : 'a t -> 'a -> 'a -> bool
(** [compare_and_set r seen v] sets the new value of [r] to [v] only if its
current value is physically equal to [seen] -- the comparison and the set
occur atomically. Returns [true] if the comparison succeeded (so the set
happened) and [false] otherwise. *)
(** [compare_and_set r seen v] sets the new value of [r] to [v] only
if its current value is physically equal to [seen] -- the
comparison and the set occur atomically. Returns [true] if the
comparison succeeded (so the set happened) and [false]
otherwise. *)
val fetch_and_add : int t -> int -> int
(** [fetch_and_add r n] atomically increments the value of [r] by [n], and
returns the current value (before the increment). *)
(** [fetch_and_add r n] atomically increments the value of [r] by [n],
and returns the current value (before the increment). *)
val incr : int t -> unit
(** [incr r] atomically increments the value of [r] by [1]. *)

View file

@ -51,13 +51,13 @@ let () =
let version = Scanf.sscanf Sys.ocaml_version "%d.%d.%s" (fun x y _ -> x, y) in
write_file "atomic.ml"
(if version >= (4, 12) then
atomic_after_412
else
atomic_before_412);
atomic_after_412
else
atomic_before_412);
copy_file
(if version >= (4, 12) then
"atomic.post412.mli"
else
"atomic.pre412.mli")
"atomic.post412.mli"
else
"atomic.pre412.mli")
"atomic.mli";
()

View file

@ -1,7 +0,0 @@
type t = Opentelemetry_client.Http_config.t
module Env = Opentelemetry_client.Http_config.Env ()
let pp = Opentelemetry_client.Http_config.pp
let make = Env.make (fun common () -> common)

View file

@ -1,12 +0,0 @@
type t = Opentelemetry_client.Http_config.t
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
the future. *)
val pp : Format.formatter -> t -> unit
val make : (unit -> t) Opentelemetry_client.Http_config.make
(** Make a configuration {!t}. *)
module Env : Opentelemetry_client.Http_config.ENV

View file

@ -1,27 +0,0 @@
(library
(name opentelemetry_client_cohttp_eio)
(public_name opentelemetry-client-cohttp-eio)
(synopsis "Opentelemetry collector using cohttp+eio+unix")
(enabled_if
(>= %{ocaml_version} 5.0))
(libraries
(re_export opentelemetry)
(re_export opentelemetry-client)
(re_export opentelemetry-client.sync)
ambient-context-eio
(re_export eio)
(re_export eio.core)
(re_export eio.unix)
(re_export cohttp)
(re_export cohttp-eio)
(re_export tls-eio)
fmt
tls
domain-name
uri
pbrt
threads
mtime
mtime.clock.os
ca-certs
mirage-crypto-rng.unix))

View file

@ -1,208 +0,0 @@
(*
https://github.com/open-telemetry/oteps/blob/main/text/0035-opentelemetry-protocol.md
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module Config = Config
open Opentelemetry
open Opentelemetry_client
let spf = Printf.sprintf
module Make (CTX : sig
val sw : Eio.Switch.t
val env : Eio_unix.Stdenv.base
end) =
struct
module IO : Generic_io.S_WITH_CONCURRENCY with type 'a t = 'a = struct
include Generic_io.Direct_style
(* NOTE: This is only used in the main consumer thread, even though producers
might be in other domains *)
let sleep_s n = Eio.Time.sleep CTX.env#clock n
let spawn f = Eio.Fiber.fork ~sw:CTX.sw f
end
module Notifier : Generic_notifier.S with module IO = IO = struct
module IO = IO
type t = {
mutex: Eio.Mutex.t;
cond: Eio.Condition.t;
}
let create () : t =
{ mutex = Eio.Mutex.create (); cond = Eio.Condition.create () }
let trigger self =
(* Eio.Condition.broadcast is lock-free since eio 0.8 (ocaml-multicore/eio#397)
and safe to call from other threads/domains and signal handlers. *)
Eio.Condition.broadcast self.cond
let delete self =
trigger self;
()
let wait self ~should_keep_waiting =
Eio.Mutex.lock self.mutex;
while should_keep_waiting () do
Eio.Condition.await self.cond self.mutex
done;
Eio.Mutex.unlock self.mutex
(** Ensure we get signalled when the queue goes from empty to non-empty *)
let register_bounded_queue (self : t) (bq : _ Bounded_queue.Recv.t) : unit =
Bounded_queue.Recv.on_non_empty bq (fun () -> trigger self)
end
module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
open Opentelemetry.Proto
module Httpc = Cohttp_eio.Client
type t = Httpc.t
let authenticator =
match Ca_certs.authenticator () with
| Ok x -> x
| Error (`Msg m) ->
Fmt.failwith "Failed to create system store X509 authenticator: %s" m
let https ~authenticator =
let tls_config =
match Tls.Config.client ~authenticator () with
| Error (`Msg msg) -> failwith ("tls configuration problem: " ^ msg)
| Ok tls_config -> tls_config
in
fun uri raw ->
let host =
Uri.host uri
|> Option.map (fun x -> Domain_name.(host_exn (of_string_exn x)))
in
Tls_eio.client_of_flow ?host tls_config raw
let create () = Httpc.make ~https:(Some (https ~authenticator)) CTX.env#net
let cleanup = ignore
(* send the content to the remote endpoint/path *)
let send (client : t) ~attempt_descr ~url ~headers:user_headers ~decode
(body : string) : ('a, Export_error.t) result =
Eio.Switch.run @@ fun sw ->
let uri = Uri.of_string url in
let open Cohttp in
let headers = Header.(add_list (init ()) user_headers) in
let body = Cohttp_eio.Body.of_string body in
let r =
try
let r = Httpc.post client ~sw ~headers ~body uri in
Ok r
with e -> Error e
in
match r with
| Error e ->
let err =
`Failure
(spf "sending signals via http POST to %S\nfailed with:\n%s" url
(Printexc.to_string e))
in
Error err
| Ok (resp, body) ->
let body =
Eio.Buf_read.(parse_exn take_all) body ~max_size:(10 * 1024 * 1024)
in
let code = Response.status resp |> Code.code_of_status in
if not (Code.is_error code) then (
match decode with
| `Ret x -> Ok x
| `Dec f ->
let dec = Pbrt.Decoder.of_string body in
let r =
try Ok (f dec)
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e)
bt))
in
r
) else (
let dec = Pbrt.Decoder.of_string body in
let r =
try
let status = Status.decode_pb_status dec in
Error (`Status (code, status, attempt_descr))
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
status: %S\n\
%s"
url code (Printexc.to_string e) body bt))
in
r
)
end
end
let create_consumer ?(config = Config.make ()) ~sw ~env () :
_ Consumer.Builder.t =
let module M = Make (struct
let sw = sw
let env = env
end) in
let module C = Generic_http_consumer.Make (M.IO) (M.Notifier) (M.Httpc) in
C.consumer ~ticker_task:(Some 0.5) ~on_tick:Sdk.tick ~config ()
let create_exporter ?(config = Config.make ()) ~sw ~env () =
let consumer = create_consumer ~config ~sw ~env () in
let bq =
Opentelemetry_client_sync.Bounded_queue_sync.create
~high_watermark:Bounded_queue.Defaults.high_watermark ()
in
Exporter_queued.create ~clock:Clock.ptime_clock ~q:bq ~consumer ()
let create_backend = create_exporter
let setup_ ~sw ~config env : unit =
Opentelemetry_ambient_context.set_current_storage Ambient_context_eio.storage;
let exp = create_exporter ~config ~sw ~env () in
Sdk.set ~traces:config.traces ~metrics:config.metrics ~logs:config.logs exp;
Option.iter
(fun min_level -> Opentelemetry.Self_debug.to_stderr ~min_level ())
config.log_level;
Opentelemetry.Self_debug.log Opentelemetry.Self_debug.Info (fun () ->
"opentelemetry: cohttp-eio exporter installed");
Opentelemetry_client.Self_trace.set_enabled config.self_trace;
if config.self_metrics then Opentelemetry.Sdk.setup_self_metrics ()
let setup ?(config = Config.make ()) ?(enable = true) ~sw env =
if enable && not config.sdk_disabled then setup_ ~sw ~config env
let remove_exporter () =
let p, waker = Eio.Promise.create () in
Sdk.remove () ~on_done:(fun () -> Eio.Promise.resolve waker ());
Eio.Promise.await p
let remove_backend = remove_exporter
let with_setup ?(config = Config.make ()) ?(enable = true) env f =
if enable && not config.sdk_disabled then (
Eio.Switch.run @@ fun sw ->
setup_ ~sw ~config env;
Fun.protect f ~finally:remove_exporter
) else
f ()

View file

@ -1,58 +0,0 @@
(*
TODO: more options from
https://opentelemetry.io/docs/reference/specification/protocol/exporter/
*)
module Config = Config
val create_consumer :
?config:Config.t ->
sw:Eio.Switch.t ->
env:Eio_unix.Stdenv.base ->
unit ->
Opentelemetry_client.Consumer.any_signal_l_builder
(** Consumer that pulls from a queue *)
val create_exporter :
?config:Config.t ->
sw:Eio.Switch.t ->
env:Eio_unix.Stdenv.base ->
unit ->
Opentelemetry.Exporter.t
(** NOTE [after_cleanup] optional parameter removed @since 0.12 *)
val create_backend :
?config:Config.t ->
sw:Eio.Switch.t ->
env:Eio_unix.Stdenv.base ->
unit ->
Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup :
?config:Config.t ->
?enable:bool ->
sw:Eio.Switch.t ->
Eio_unix.Stdenv.base ->
unit
(** Setup endpoint. This modifies {!Opentelemetry.Collector.backend}.
@param enable
actually setup the backend (default true). This can be used to
enable/disable the setup depending on CLI arguments or environment.
@param config configuration to use
@param stop
an atomic boolean. When it becomes true, background threads will all stop
after a little while. *)
val remove_exporter : unit -> unit
(** Shutdown current exporter
@since 0.12 *)
val remove_backend : unit -> unit
[@@deprecated "use remove_exporter"]
(** @since 0.12 *)
val with_setup :
?config:Config.t -> ?enable:bool -> Eio_unix.Stdenv.base -> (unit -> 'a) -> 'a
(** [with_setup () f] is like [setup(); f()] but takes care of cleaning up after
[f()] returns See {!setup} for more details. *)

View file

@ -5,3 +5,37 @@ let[@inline] ( let@ ) f x = f x
let spf = Printf.sprintf
let tid () = Thread.id @@ Thread.self ()
let debug_ =
ref
(match Sys.getenv_opt "OTEL_OCAML_DEBUG" with
| Some ("1" | "true") -> true
| _ -> false)
let default_url = "http://localhost:4318"
let url =
ref (try Sys.getenv "OTEL_EXPORTER_OTLP_ENDPOINT" with _ -> default_url)
let get_url () = !url
let set_url s = url := s
let parse_headers s =
let parse_header s =
match String.split_on_char '=' s with
| [ key; value ] -> key, value
| _ -> failwith "Unexpected format for header"
in
String.split_on_char ',' s |> List.map parse_header
let default_headers = []
let headers =
ref
(try parse_headers (Sys.getenv "OTEL_EXPORTER_OTLP_HEADERS")
with _ -> default_headers)
let get_headers () = !headers
let set_headers s = headers := s

View file

@ -1,7 +1,45 @@
type t = Opentelemetry_client.Http_config.t
open Common_
module Env = Opentelemetry_client.Http_config.Env ()
type t = {
debug: bool;
url: string;
headers: (string * string) list;
batch_traces: int option;
batch_metrics: int option;
batch_logs: int option;
batch_timeout_ms: int;
}
let pp = Opentelemetry_client.Http_config.pp
let pp out self : unit =
let ppiopt = Format.pp_print_option Format.pp_print_int in
let pp_header ppf (a, b) = Format.fprintf ppf "@[%s: @,%s@]@." a b in
let ppheaders = Format.pp_print_list pp_header in
let {
debug;
url;
headers;
batch_traces;
batch_metrics;
batch_logs;
batch_timeout_ms;
} =
self
in
Format.fprintf out
"{@[ debug=%B;@ url=%S;@ headers=%a;@ batch_traces=%a;@ batch_metrics=%a;@ \
batch_logs=%a;@ batch_timeout_ms=%d; @]}"
debug url ppheaders headers ppiopt batch_traces ppiopt batch_metrics ppiopt
batch_logs batch_timeout_ms
let make = Env.make (fun common () -> common)
let make ?(debug = !debug_) ?(url = get_url ()) ?(headers = get_headers ())
?(batch_traces = Some 400) ?(batch_metrics = Some 20)
?(batch_logs = Some 400) ?(batch_timeout_ms = 500) () : t =
{
debug;
url;
headers;
batch_traces;
batch_metrics;
batch_timeout_ms;
batch_logs;
}

View file

@ -1,12 +1,54 @@
type t = Opentelemetry_client.Http_config.t
type t = private {
debug: bool;
url: string;
(** Url of the endpoint. Default is "http://localhost:4318",
or "OTEL_EXPORTER_OTLP_ENDPOINT" if set. *)
headers: (string * string) list;
(** API headers sent to the endpoint. Default is none or
"OTEL_EXPORTER_OTLP_HEADERS" if set. *)
batch_traces: int option;
(** Batch traces? If [Some i], then this produces batches of (at most)
[i] items. If [None], there is no batching.
Note that traces and metrics are batched separately.
Default [Some 400].
*)
batch_metrics: int option;
(** Batch metrics? If [Some i], then this produces batches of (at most)
[i] items. If [None], there is no batching.
Note that traces and metrics are batched separately.
Default [None].
*)
batch_logs: int option;
(** Batch logs? See {!batch_metrics} for details.
Default [Some 400] *)
batch_timeout_ms: int;
(** Number of milliseconds after which we will emit a batch, even
incomplete.
Note that the batch might take longer than that, because this is
only checked when a new event occurs. Default 500. *)
}
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
the future. *)
To build one, use {!make} below. This might be extended with more
fields in the future. *)
val make :
?debug:bool ->
?url:string ->
?headers:(string * string) list ->
?batch_traces:int option ->
?batch_metrics:int option ->
?batch_logs:int option ->
?batch_timeout_ms:int ->
unit ->
t
(** Make a configuration.
@param thread if true and [bg_threads] is not provided, we will pick a number
of bg threads. Otherwise the number of [bg_threads] superseeds this option.
*)
val pp : Format.formatter -> t -> unit
val make : (unit -> t) Opentelemetry_client.Http_config.make
(** Make a configuration {!t}. *)
module Env : Opentelemetry_client.Http_config.ENV

View file

@ -4,20 +4,5 @@
(synopsis "Opentelemetry collector using cohttp+lwt+unix")
(preprocess
(pps lwt_ppx))
(libraries
(re_export opentelemetry)
(re_export opentelemetry-lwt)
(re_export opentelemetry-client)
(re_export opentelemetry-client.lwt)
(re_export opentelemetry-client.sync)
(re_export lwt)
(re_export lwt.unix)
(re_export cohttp-lwt)
(re_export cohttp-lwt-unix)
(re_export cohttp)
(re_export unix)
uri
threads
pbrt
mtime
mtime.clock.os))
(libraries opentelemetry lwt cohttp-lwt cohttp-lwt-unix pbrt mtime
mtime.clock.os))

View file

@ -3,19 +3,83 @@
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module OT = Opentelemetry
module Config = Config
open Opentelemetry_client
open Opentelemetry
open Common_
include Common_
type error = Export_error.t
let needs_gc_metrics = Atomic.make false
open struct
module IO = Opentelemetry_client_lwt.Io_lwt
end
let last_gc_metrics = Atomic.make (Mtime_clock.now ())
module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
let timeout_gc_metrics = Mtime.Span.(20 * s)
let gc_metrics = ref []
(* side channel for GC, appended to {!E_metrics}'s data *)
(* capture current GC metrics if {!needs_gc_metrics} is true,
or it has been a long time since the last GC metrics collection,
and push them into {!gc_metrics} for later collection *)
let sample_gc_metrics_if_needed () =
let now = Mtime_clock.now () in
let alarm = Atomic.compare_and_set needs_gc_metrics true false in
let timeout () =
let elapsed = Mtime.span now (Atomic.get last_gc_metrics) in
Mtime.Span.compare elapsed timeout_gc_metrics > 0
in
if alarm || timeout () then (
Atomic.set last_gc_metrics now;
let l =
OT.Metrics.make_resource_metrics
~attrs:(Opentelemetry.GC_metrics.get_runtime_attributes ())
@@ Opentelemetry.GC_metrics.get_metrics ()
in
gc_metrics := l :: !gc_metrics
)
type error =
[ `Status of int * Opentelemetry.Proto.Status.status
| `Failure of string
| `Sysbreak
]
let n_errors = Atomic.make 0
let n_dropped = Atomic.make 0
let report_err_ = function
| `Sysbreak -> Printf.eprintf "opentelemetry: ctrl-c captured, stopping\n%!"
| `Failure msg ->
Format.eprintf "@[<2>opentelemetry: export failed: %s@]@." msg
| `Status (code, { Opentelemetry.Proto.Status.code = scode; message; details })
->
let pp_details out l =
List.iter
(fun s -> Format.fprintf out "%S;@ " (Bytes.unsafe_to_string s))
l
in
Format.eprintf
"@[<2>opentelemetry: export failed with@ http code=%d@ status \
{@[code=%ld;@ message=%S;@ details=[@[%a@]]@]}@]@."
code scode
(Bytes.unsafe_to_string message)
pp_details details
module Httpc : sig
type t
val create : unit -> t
val send :
t ->
config:Config.t ->
path:string ->
decode:[ `Dec of Pbrt.Decoder.t -> 'a | `Ret of 'a ] ->
string ->
('a, error) result Lwt.t
val cleanup : t -> unit
end = struct
open Opentelemetry.Proto
open Lwt.Syntax
module Httpc = Cohttp_lwt_unix.Client
@ -27,12 +91,23 @@ module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
let cleanup _self = ()
(* send the content to the remote endpoint/path *)
let send (_self : t) ~attempt_descr ~url ~headers:user_headers ~decode
(bod : string) : ('a, error) result Lwt.t =
let uri = Uri.of_string url in
let send (_self : t) ~(config : Config.t) ~path ~decode (bod : string) :
('a, error) result Lwt.t =
let url =
let url = config.url in
if url <> "" && String.get url (String.length url - 1) = '/' then
String.sub url 0 (String.length url - 1)
else
url
in
let full_url = url ^ path in
let uri = Uri.of_string full_url in
let open Cohttp in
let headers = Header.(add_list (init ()) user_headers) in
let headers = Header.(add_list (init ()) !headers) in
let headers =
Header.(add headers "Content-Type" "application/x-protobuf")
in
let body = Cohttp_lwt.Body.of_string bod in
@ -46,7 +121,7 @@ module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
| Error e ->
let err =
`Failure
(spf "sending signals via http POST to %S\nfailed with:\n%s" url
(spf "sending signals via http POST to %S\nfailed with:\n%s" full_url
(Printexc.to_string e))
in
Lwt.return @@ Error err
@ -64,8 +139,7 @@ module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e)
bt))
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e) bt))
in
Lwt.return r
) else (
@ -74,74 +148,446 @@ module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
let r =
try
let status = Status.decode_pb_status dec in
Error (`Status (code, status, attempt_descr))
Error (`Status (code, status))
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
status: %S\n\
%s"
url code (Printexc.to_string e) body bt))
(spf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
status: %S\n\
%s"
full_url code (Printexc.to_string e) body bt))
in
Lwt.return r
)
end
module Consumer_impl =
Generic_http_consumer.Make (IO) (Opentelemetry_client_lwt.Notifier_lwt)
(Httpc)
(** Batch of resources to be pushed later.
let create_consumer ?(config = Config.make ()) () =
Consumer_impl.consumer ~ticker_task:(Some 0.5) ~on_tick:OTEL.Sdk.tick ~config
()
This type is thread-safe. *)
module Batch : sig
type 'a t
let create_exporter ?(config = Config.make ()) () =
let consumer = create_consumer ~config () in
let bq =
Opentelemetry_client_sync.Bounded_queue_sync.create
~high_watermark:Bounded_queue.Defaults.high_watermark ()
val push' : 'a t -> 'a -> unit
val pop_if_ready : ?force:bool -> now:Mtime.t -> 'a t -> 'a list option
(** Is the batch ready to be emitted? If batching is disabled,
this is true as soon as {!is_empty} is false. If a timeout is provided
for this batch, then it will be ready if an element has been in it
for at least the timeout.
@param now passed to implement timeout *)
val make : ?batch:int -> ?timeout:Mtime.span -> unit -> 'a t
(** Create a new batch *)
end = struct
type 'a t = {
mutable size: int;
mutable q: 'a list;
batch: int option;
high_watermark: int;
timeout: Mtime.span option;
mutable start: Mtime.t;
}
let make ?batch ?timeout () : _ t =
Option.iter (fun b -> assert (b > 0)) batch;
let high_watermark = Option.fold ~none:100 ~some:(fun x -> x * 10) batch in
{
size = 0;
start = Mtime_clock.now ();
q = [];
batch;
timeout;
high_watermark;
}
let timeout_expired_ ~now self : bool =
match self.timeout with
| Some t ->
let elapsed = Mtime.span now self.start in
Mtime.Span.compare elapsed t >= 0
| None -> false
let is_full_ self : bool =
match self.batch with
| None -> self.size > 0
| Some b -> self.size >= b
let pop_if_ready ?(force = false) ~now (self : _ t) : _ list option =
if self.size > 0 && (force || is_full_ self || timeout_expired_ ~now self)
then (
let l = self.q in
self.q <- [];
self.size <- 0;
assert (l <> []);
Some l
) else
None
let push (self : _ t) x : bool =
if self.size >= self.high_watermark then (
(* drop this to prevent queue from growing too fast *)
Atomic.incr n_dropped;
true
) else (
if self.size = 0 && Option.is_some self.timeout then
(* current batch starts now *)
self.start <- Mtime_clock.now ();
(* add to queue *)
self.size <- 1 + self.size;
self.q <- x :: self.q;
let ready = is_full_ self in
ready
)
let push' self x = ignore (push self x : bool)
end
(** An emitter. This is used by {!Backend} below to forward traces/metrics/…
from the program to whatever collector client we have. *)
module type EMITTER = sig
open Opentelemetry.Proto
val push_trace : Trace.resource_spans list -> unit
val push_metrics : Metrics.resource_metrics list -> unit
val push_logs : Logs.resource_logs list -> unit
val set_on_tick_callbacks : (unit -> unit) AList.t -> unit
val tick : unit -> unit
val cleanup : unit -> unit
end
(* make an emitter.
exceptions inside should be caught, see
https://opentelemetry.io/docs/reference/specification/error-handling/ *)
let mk_emitter ~stop ~(config : Config.t) () : (module EMITTER) =
let open Proto in
let open Lwt.Syntax in
(* local helpers *)
let open struct
let timeout =
if config.batch_timeout_ms > 0 then
Some Mtime.Span.(config.batch_timeout_ms * ms)
else
None
let batch_traces : Trace.resource_spans list Batch.t =
Batch.make ?batch:config.batch_traces ?timeout ()
let batch_metrics : Metrics.resource_metrics list Batch.t =
Batch.make ?batch:config.batch_metrics ?timeout ()
let batch_logs : Logs.resource_logs list Batch.t =
Batch.make ?batch:config.batch_logs ?timeout ()
let on_tick_cbs_ = Atomic.make (AList.make ())
let set_on_tick_callbacks = Atomic.set on_tick_cbs_
let send_http_ (httpc : Httpc.t) encoder ~path ~encode x : unit Lwt.t =
Pbrt.Encoder.reset encoder;
encode x encoder;
let data = Pbrt.Encoder.to_string encoder in
let* r = Httpc.send httpc ~config ~path ~decode:(`Ret ()) data in
match r with
| Ok () -> Lwt.return ()
| Error `Sysbreak ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set stop true;
Lwt.return ()
| Error err ->
(* TODO: log error _via_ otel? *)
Atomic.incr n_errors;
report_err_ err;
(* avoid crazy error loop *)
Lwt_unix.sleep 3.
let send_metrics_http curl encoder (l : Metrics.resource_metrics list list)
=
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let x =
Metrics_service.default_export_metrics_service_request
~resource_metrics:l ()
in
send_http_ curl encoder ~path:"/v1/metrics"
~encode:Metrics_service.encode_pb_export_metrics_service_request x
let send_traces_http curl encoder (l : Trace.resource_spans list list) =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let x =
Trace_service.default_export_trace_service_request ~resource_spans:l ()
in
send_http_ curl encoder ~path:"/v1/traces"
~encode:Trace_service.encode_pb_export_trace_service_request x
let send_logs_http curl encoder (l : Logs.resource_logs list list) =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let x =
Logs_service.default_export_logs_service_request ~resource_logs:l ()
in
send_http_ curl encoder ~path:"/v1/logs"
~encode:Logs_service.encode_pb_export_logs_service_request x
(* emit metrics, if the batch is full or timeout lapsed *)
let emit_metrics_maybe ~now ?force httpc encoder : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_metrics with
| None -> Lwt.return false
| Some l ->
let batch = !gc_metrics :: l in
gc_metrics := [];
let+ () = send_metrics_http httpc encoder batch in
true
let emit_traces_maybe ~now ?force httpc encoder : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_traces with
| None -> Lwt.return false
| Some l ->
let+ () = send_traces_http httpc encoder l in
true
let emit_logs_maybe ~now ?force httpc encoder : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_logs with
| None -> Lwt.return false
| Some l ->
let+ () = send_logs_http httpc encoder l in
true
let[@inline] guard_exn_ where f =
try f ()
with e ->
let bt = Printexc.get_backtrace () in
Printf.eprintf
"opentelemetry-curl: uncaught exception in %s: %s\n%s\n%!" where
(Printexc.to_string e) bt
let emit_all_force (httpc : Httpc.t) encoder : unit Lwt.t =
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now ~force:true httpc encoder
and+ (_ : bool) = emit_logs_maybe ~now ~force:true httpc encoder
and+ (_ : bool) = emit_metrics_maybe ~now ~force:true httpc encoder in
()
let tick_common_ () =
if !debug_ then Printf.eprintf "tick (from %d)\n%!" (tid ());
sample_gc_metrics_if_needed ();
List.iter
(fun f ->
try f ()
with e ->
Printf.eprintf "on tick callback raised: %s\n"
(Printexc.to_string e))
(AList.get @@ Atomic.get on_tick_cbs_);
()
(* thread that calls [tick()] regularly, to help enforce timeouts *)
let setup_ticker_thread ~tick ~finally () =
let rec tick_thread () =
if Atomic.get stop then (
finally ();
Lwt.return ()
) else
let* () = Lwt_unix.sleep 0.5 in
let* () = tick () in
tick_thread ()
in
Lwt.async tick_thread
end in
let httpc = Httpc.create () in
let encoder = Pbrt.Encoder.create () in
let module M = struct
(* we make sure that this is thread-safe, even though we don't have a
background thread. There can still be a ticker thread, and there
can also be several user threads that produce spans and call
the emit functions. *)
let push_trace e =
let@ () = guard_exn_ "push trace" in
Batch.push' batch_traces e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_traces_maybe ~now httpc encoder in
())
let push_metrics e =
let@ () = guard_exn_ "push metrics" in
sample_gc_metrics_if_needed ();
Batch.push' batch_metrics e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_metrics_maybe ~now httpc encoder in
())
let push_logs e =
let@ () = guard_exn_ "push logs" in
Batch.push' batch_logs e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_logs_maybe ~now httpc encoder in
())
let set_on_tick_callbacks = set_on_tick_callbacks
let tick_ () =
tick_common_ ();
sample_gc_metrics_if_needed ();
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now httpc encoder
and+ (_ : bool) = emit_logs_maybe ~now httpc encoder
and+ (_ : bool) = emit_metrics_maybe ~now httpc encoder in
()
let () = setup_ticker_thread ~tick:tick_ ~finally:ignore ()
(* if called in a blocking context: work in the background *)
let tick () = Lwt.async tick_
let cleanup () =
if !debug_ then Printf.eprintf "opentelemetry: exiting…\n%!";
Lwt.async (fun () ->
let* () = emit_all_force httpc encoder in
Httpc.cleanup httpc;
Lwt.return ())
end in
(module M)
module Backend (Arg : sig
val stop : bool Atomic.t
val config : Config.t
end)
() : Opentelemetry.Collector.BACKEND = struct
include (val mk_emitter ~stop:Arg.stop ~config:Arg.config ())
open Opentelemetry.Proto
open Opentelemetry.Collector
let send_trace : Trace.resource_spans list sender =
{
send =
(fun l ~ret ->
(if !debug_ then
let@ () = Lock.with_lock in
Format.eprintf "send spans %a@."
(Format.pp_print_list Trace.pp_resource_spans)
l);
push_trace l;
ret ());
}
let last_sent_metrics = Atomic.make (Mtime_clock.now ())
let timeout_sent_metrics = Mtime.Span.(5 * s)
(* send metrics from time to time *)
let signal_emit_gc_metrics () =
if !debug_ then
Printf.eprintf "opentelemetry: emit GC metrics requested\n%!";
Atomic.set needs_gc_metrics true
let additional_metrics () : Metrics.resource_metrics list =
(* add exporter metrics to the lot? *)
let last_emit = Atomic.get last_sent_metrics in
let now = Mtime_clock.now () in
let add_own_metrics =
let elapsed = Mtime.span last_emit now in
Mtime.Span.compare elapsed timeout_sent_metrics > 0
in
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_errors);
];
];
]
) else
[]
let send_metrics : Metrics.resource_metrics list sender =
{
send =
(fun m ~ret ->
(if !debug_ then
let@ () = Lock.with_lock in
Format.eprintf "send metrics %a@."
(Format.pp_print_list Metrics.pp_resource_metrics)
m);
let m = List.rev_append (additional_metrics ()) m in
push_metrics m;
ret ());
}
let send_logs : Logs.resource_logs list sender =
{
send =
(fun m ~ret ->
(if !debug_ then
let@ () = Lock.with_lock in
Format.eprintf "send logs %a@."
(Format.pp_print_list Logs.pp_resource_logs)
m);
push_logs m;
ret ());
}
end
let create_backend ?(stop = Atomic.make false) ?(config = Config.make ()) () =
debug_ := config.debug;
if config.url <> get_url () then set_url config.url;
let module B =
Backend
(struct
let stop = stop
let config = config
end)
()
in
Exporter_queued.create ~clock:Clock.ptime_clock ~q:bq ~consumer ()
(module B : OT.Collector.BACKEND)
let create_backend = create_exporter
let setup_ ?stop ?config () =
let backend = create_backend ?stop ?config () in
let (module B : OT.Collector.BACKEND) = backend in
OT.Collector.set_backend backend;
B.cleanup
let setup_ ~config () : unit =
Opentelemetry_client_lwt.Util_ambient_context.setup_ambient_context ();
let exp = create_exporter ~config () in
Sdk.set ~traces:config.traces ~metrics:config.metrics ~logs:config.logs exp;
let setup ?stop ?config ?(enable = true) () =
if enable then (
let cleanup = setup_ ?stop ?config () in
at_exit cleanup
)
Option.iter
(fun min_level -> Opentelemetry.Self_debug.to_stderr ~min_level ())
config.log_level;
Opentelemetry.Self_debug.log Opentelemetry.Self_debug.Info (fun () ->
"opentelemetry: cohttp-lwt exporter installed");
Opentelemetry_client.Self_trace.set_enabled config.self_trace;
if config.self_metrics then Opentelemetry.Sdk.setup_self_metrics ();
()
let setup ?(config = Config.make ()) ?(enable = true) () =
if enable && not config.sdk_disabled then setup_ ~config ()
let remove_exporter () : unit Lwt.t =
let done_fut, done_u = Lwt.wait () in
(* Printf.eprintf "otel.client.cohttp-lwt: removing…\n%!"; *)
Sdk.remove
~on_done:(fun () ->
(* Printf.eprintf "otel.client.cohttp-lwt: done removing\n%!"; *)
Lwt.wakeup_later done_u ())
();
done_fut
let remove_backend = remove_exporter
let with_setup ?(config = Config.make ()) ?(enable = true) () f : _ Lwt.t =
if enable && not config.sdk_disabled then (
setup_ ~config ();
Lwt.finalize f remove_exporter
let with_setup ?stop ?(config = Config.make ()) ?(enable = true) () f =
if enable then (
let cleanup = setup_ ?stop ~config () in
Fun.protect ~finally:cleanup f
) else
f ()

View file

@ -3,33 +3,45 @@
https://opentelemetry.io/docs/reference/specification/protocol/exporter/
*)
open Common_
val get_url : unit -> string
val set_url : string -> unit
(** Url of the endpoint. Default is "http://localhost:4318",
or "OTEL_EXPORTER_OTLP_ENDPOINT" if set. *)
val get_headers : unit -> (string * string) list
val set_headers : (string * string) list -> unit
(** Set http headers that are sent on every http query to the collector. *)
module Config = Config
val create_consumer :
?config:Config.t -> unit -> Opentelemetry_client.Consumer.any_signal_l_builder
(** Consumer that pulls from a queue *)
val create_backend :
?stop:bool Atomic.t ->
?config:Config.t ->
unit ->
(module Opentelemetry.Collector.BACKEND)
val create_exporter : ?config:Config.t -> unit -> Opentelemetry.Exporter.t
(** Create a new backend using lwt and ezcurl-lwt *)
val create_backend : ?config:Config.t -> unit -> Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup : ?config:Config.t -> ?enable:bool -> unit -> unit
val setup :
?stop:bool Atomic.t -> ?config:Config.t -> ?enable:bool -> unit -> unit
(** Setup endpoint. This modifies {!Opentelemetry.Collector.backend}.
@param enable
actually setup the backend (default true). This can be used to
enable/disable the setup depending on CLI arguments or environment.
@param enable actually setup the backend (default true). This can
be used to enable/disable the setup depending on CLI arguments
or environment.
@param config configuration to use
@param stop
an atomic boolean. When it becomes true, background threads will all stop
after a little while. *)
val remove_backend : unit -> unit Lwt.t
(** Shutdown current backend
@since 0.12 *)
@param stop an atomic boolean. When it becomes true, background threads
will all stop after a little while.
*)
val with_setup :
?config:Config.t -> ?enable:bool -> unit -> (unit -> 'a Lwt.t) -> 'a Lwt.t
(** [with_setup () f] is like [setup(); f()] but takes care of cleaning up after
[f()] returns See {!setup} for more details. *)
?stop:bool Atomic.t ->
?config:Config.t ->
?enable:bool ->
unit ->
(unit -> 'a) ->
'a
(** [with_setup () f] is like [setup(); f()] but takes care of cleaning up
after [f()] returns
See {!setup} for more details. *)

View file

@ -1,7 +0,0 @@
module Atomic = Opentelemetry_atomic.Atomic
let[@inline] ( let@ ) f x = f x
let spf = Printf.sprintf
let tid () = Thread.id @@ Thread.self ()

View file

@ -1,7 +0,0 @@
type t = Opentelemetry_client.Http_config.t
module Env = Opentelemetry_client.Http_config.Env ()
let pp = Opentelemetry_client.Http_config.pp
let make = Env.make (fun common () -> common)

View file

@ -1,12 +0,0 @@
type t = Opentelemetry_client.Http_config.t
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
the future. *)
val pp : Format.formatter -> t -> unit
val make : (unit -> t) Opentelemetry_client.Http_config.make
(** Make a configuration {!t}. *)
module Env : Opentelemetry_client.Http_config.ENV

View file

@ -1,21 +0,0 @@
(library
(name opentelemetry_client_ocurl_lwt)
(public_name opentelemetry-client-ocurl-lwt)
(synopsis "Opentelemetry collector using ezcurl-lwt")
(preprocess
(pps lwt_ppx))
(libraries
(re_export opentelemetry)
opentelemetry.atomic
(re_export opentelemetry-client)
(re_export opentelemetry-client.sync)
(re_export opentelemetry-client.lwt)
threads
pbrt
mtime
mtime.clock.os
(re_export curl)
(re_export ezcurl-lwt)
(re_export ezcurl.core)
(re_export lwt)
(re_export lwt.unix)))

View file

@ -1,132 +0,0 @@
(*
https://github.com/open-telemetry/oteps/blob/main/text/0035-opentelemetry-protocol.md
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module Config = Config
open Opentelemetry
open Opentelemetry_client
open Common_
type error = Export_error.t
open struct
module IO = Opentelemetry_client_lwt.Io_lwt
end
(** HTTP client *)
module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
open Lwt.Syntax
type t = Ezcurl_core.t
let create () : t = Ezcurl_lwt.make ()
let cleanup self = Ezcurl_lwt.delete self
(** send the content to the remote endpoint/path *)
let send (self : t) ~attempt_descr ~url ~headers:user_headers ~decode
(bod : string) : ('a, error) result Lwt.t =
let* r =
let headers = user_headers in
Ezcurl_lwt.post ~client:self ~headers ~params:[] ~url
~content:(`String bod) ()
in
match r with
| Error (code, msg) ->
let err =
`Failure
(spf
"sending signals via http POST failed:\n\
\ %s\n\
\ curl code: %s\n\
\ url: %s\n\
%!"
msg (Curl.strerror code) url)
in
Lwt.return @@ Error err
| Ok { code; body; _ } when code >= 200 && code < 300 ->
(match decode with
| `Ret x -> Lwt.return @@ Ok x
| `Dec f ->
let dec = Pbrt.Decoder.of_string body in
let r =
try Ok (f dec)
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e) bt))
in
Lwt.return r)
| Ok { code; body; _ } ->
let err =
Export_error.decode_invalid_http_response ~attempt_descr ~url ~code body
in
Lwt.return (Error err)
end
module Consumer_impl =
Generic_http_consumer.Make (IO) (Opentelemetry_client_lwt.Notifier_lwt)
(Httpc)
let create_consumer ?(config = Config.make ()) () =
Consumer_impl.consumer ~ticker_task:(Some 0.5) ~on_tick:OTEL.Sdk.tick ~config
()
let create_exporter ?(config = Config.make ()) () =
let consumer = create_consumer ~config () in
let bq =
Opentelemetry_client_sync.Bounded_queue_sync.create
~high_watermark:Bounded_queue.Defaults.high_watermark ()
in
Exporter_queued.create ~clock:Clock.ptime_clock ~q:bq ~consumer ()
let create_backend = create_exporter
let setup_ ~config () : Exporter.t =
Opentelemetry_client_lwt.Util_ambient_context.setup_ambient_context ();
let exp = create_exporter ~config () in
Sdk.set ~traces:config.traces ~metrics:config.metrics ~logs:config.logs exp;
Option.iter
(fun min_level -> Opentelemetry.Self_debug.to_stderr ~min_level ())
config.log_level;
Opentelemetry.Self_debug.log Opentelemetry.Self_debug.Info (fun () ->
"opentelemetry: ocurl-lwt exporter installed");
Opentelemetry_client.Self_trace.set_enabled config.self_trace;
if config.self_metrics then Opentelemetry.Sdk.setup_self_metrics ();
exp
let setup ?(config = Config.make ()) ?(enable = true) () =
if enable && not config.sdk_disabled then
ignore (setup_ ~config () : Exporter.t)
let remove_exporter () : unit Lwt.t =
let done_fut, done_u = Lwt.wait () in
Sdk.remove ~on_done:(fun () -> Lwt.wakeup_later done_u ()) ();
done_fut
let remove_backend = remove_exporter
let with_setup ?(after_shutdown = ignore) ?(config = Config.make ())
?(enable = true) () f : _ Lwt.t =
if enable && not config.sdk_disabled then
let open Lwt.Syntax in
let exp = setup_ ~config () in
Lwt.catch
(fun () ->
let* res = f () in
let+ () = remove_exporter () in
after_shutdown exp;
res)
(fun exn ->
let* () = remove_exporter () in
after_shutdown exp;
Lwt.reraise exn)
else
f ()

View file

@ -1,37 +0,0 @@
(*
TODO: more options from
https://opentelemetry.io/docs/reference/specification/protocol/exporter/
*)
module Config = Config
val create_consumer :
?config:Config.t -> unit -> Opentelemetry_client.Consumer.any_signal_l_builder
(** Consumer that pulls from a queue *)
val create_exporter : ?config:Config.t -> unit -> Opentelemetry.Exporter.t
(** Create a new backend using lwt and ezcurl-lwt *)
val create_backend : ?config:Config.t -> unit -> Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup : ?config:Config.t -> ?enable:bool -> unit -> unit
(** Setup endpoint. This modifies {!Opentelemetry.Collector.backend}.
@param enable
actually setup the backend (default true). This can be used to
enable/disable the setup depending on CLI arguments or environment.
@param config configuration to use *)
val remove_backend : unit -> unit Lwt.t
(** Shutdown current backend
@since NEXT_RELEASE *)
val with_setup :
?after_shutdown:(Opentelemetry.Exporter.t -> unit) ->
?config:Config.t ->
?enable:bool ->
unit ->
(unit -> 'a Lwt.t) ->
'a Lwt.t
(** [with_setup () f] is like [setup(); f()] but takes care of cleaning up after
[f()] returns See {!setup} for more details. *)

View file

@ -1,5 +1,3 @@
module UM = Util_mutex
type 'a t = {
mutex: Mutex.t;
cond: Condition.t;
@ -18,42 +16,54 @@ let create () : _ t =
}
let close (self : _ t) =
UM.protect self.mutex @@ fun () ->
Mutex.lock self.mutex;
if not self.closed then (
self.closed <- true;
Condition.broadcast self.cond (* awake waiters so they fail *)
)
);
Mutex.unlock self.mutex
let push (self : _ t) x : unit =
UM.protect self.mutex @@ fun () ->
if self.closed then
Mutex.lock self.mutex;
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
else (
) else (
Queue.push x self.q;
Condition.signal self.cond
Condition.signal self.cond;
Mutex.unlock self.mutex
)
let pop (self : 'a t) : 'a =
Mutex.lock self.mutex;
let rec loop () =
if self.closed then
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
else if Queue.is_empty self.q then (
) else if Queue.is_empty self.q then (
Condition.wait self.cond self.mutex;
(loop [@tailcall]) ()
) else (
let x = Queue.pop self.q in
Mutex.unlock self.mutex;
x
)
in
UM.protect self.mutex loop
loop ()
let pop_all (self : 'a t) into : unit =
Mutex.lock self.mutex;
let rec loop () =
if Queue.is_empty self.q then (
if self.closed then raise Closed;
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
);
Condition.wait self.cond self.mutex;
(loop [@tailcall]) ()
) else
Queue.transfer self.q into
) else (
Queue.transfer self.q into;
Mutex.unlock self.mutex
)
in
UM.protect self.mutex loop
loop ()

View file

@ -1,4 +1,4 @@
(** Simple blocking queue *)
(** Basic Blocking Queue *)
type 'a t
@ -12,13 +12,12 @@ val push : 'a t -> 'a -> unit
val pop : 'a t -> 'a
(** [pop q] pops the next element in [q]. It might block until an element comes.
@raise Closed if the queue was closed before a new element was available. *)
@raise Closed if the queue was closed before a new element was available. *)
val pop_all : 'a t -> 'a Queue.t -> unit
(** [pop_all q into] pops all the elements of [q] and moves them into [into]. if
no element is available, it will block until it successfully transfers at
least one item to [into].
@raise Closed if the queue was closed before a new element was available. *)
(** [pop_all q into] pops all the elements of [q]
and moves them into [into]. It might block until an element comes.
@raise Closed if the queue was closed before a new element was available. *)
val close : _ t -> unit
(** Close the queue, meaning there won't be any more [push] allowed. *)

24
src/client-ocurl/batch.ml Normal file
View file

@ -0,0 +1,24 @@
type 'a t = {
mutable len: int;
mutable l: 'a list list;
mutable started: Mtime.t;
}
let create () = { len = 0; l = []; started = Mtime_clock.now () }
let push self l =
if l != [] then (
if self.l == [] then self.started <- Mtime_clock.now ();
self.l <- l :: self.l;
self.len <- self.len + List.length l
)
let[@inline] len self = self.len
let[@inline] time_started self = self.started
let pop_all self =
let l = self.l in
self.l <- [];
self.len <- 0;
l

View file

@ -0,0 +1,14 @@
(** List of lists with length *)
type 'a t
val create : unit -> 'a t
val push : 'a t -> 'a list -> unit
val len : _ t -> int
val time_started : _ t -> Mtime.t
(** Time at which the batch most recently became non-empty *)
val pop_all : 'a t -> 'a list list

View file

@ -1,8 +1,42 @@
module Atomic = Opentelemetry_atomic.Atomic
module Proto = Opentelemetry_proto
include Opentelemetry.Lock
let spf = Printf.sprintf
let ( let@ ) = ( @@ )
let[@inline] tid () = Thread.id @@ Thread.self ()
let tid () = Thread.id @@ Thread.self ()
let debug_ =
ref
(match Sys.getenv_opt "OTEL_OCAML_DEBUG" with
| Some ("1" | "true") -> true
| _ -> false)
let default_url = "http://localhost:4318"
let url =
ref (try Sys.getenv "OTEL_EXPORTER_OTLP_ENDPOINT" with _ -> default_url)
let get_url () = !url
let set_url s = url := s
let parse_headers s =
let parse_header s =
match String.split_on_char '=' s with
| [ key; value ] -> key, value
| _ -> failwith "Unexpected format for header"
in
String.split_on_char ',' s |> List.map parse_header
let default_headers = []
let headers =
ref
(try parse_headers (Sys.getenv "OTEL_EXPORTER_OTLP_HEADERS")
with _ -> default_headers)
let get_headers () = !headers
let set_headers s = headers := s

View file

@ -1,37 +1,48 @@
open Opentelemetry_client
open Common_
type t = {
debug: bool;
url: string;
headers: (string * string) list;
batch_timeout_ms: int;
bg_threads: int;
(** Are there background threads, and how many? Default [4]. This will be
adjusted to be at least [1] and at most [32]. *)
ticker_thread: bool;
(** If true, start a thread that regularly checks if signals should be
sent to the collector. Default [true] *)
ticker_interval_ms: int;
(** Interval for ticker thread, in milliseconds. This is only useful if
[ticker_thread] is [true]. This will be clamped between [2 ms] and
some longer interval (maximum [60s] currently). Default 500.
@since 0.7 *)
common: Http_config.t;
(** Common configuration options
@since 0.12*)
self_trace: bool;
}
let pp out self =
let { bg_threads; ticker_thread; ticker_interval_ms; common } = self in
let pp_header ppf (a, b) = Format.fprintf ppf "@[%s: @,%s@]@." a b in
let ppheaders = Format.pp_print_list pp_header in
let {
debug;
url;
headers;
batch_timeout_ms;
bg_threads;
ticker_thread;
ticker_interval_ms;
self_trace;
} =
self
in
Format.fprintf out
"{@[ bg_threads=%d;@ ticker_thread=%B;@ ticker_interval_ms=%d;@ common=%a \
@]}"
bg_threads ticker_thread ticker_interval_ms Http_config.pp common
"{@[ debug=%B;@ url=%S;@ headers=%a;@ batch_timeout_ms=%d; bg_threads=%d;@ \
ticker_thread=%B;@ ticker_interval_ms=%d;@ self_trace=%B @]}"
debug url ppheaders headers batch_timeout_ms bg_threads ticker_thread
ticker_interval_ms self_trace
module Env = Http_config.Env ()
let make =
Env.make
(fun
common
?(bg_threads = 4)
?(ticker_thread = true)
?(ticker_interval_ms = 500)
()
-> { bg_threads; ticker_thread; ticker_interval_ms; common })
let make ?(debug = !debug_) ?(url = get_url ()) ?(headers = get_headers ())
?(batch_timeout_ms = 2_000) ?(bg_threads = 4) ?(ticker_thread = true)
?(ticker_interval_ms = 500) ?(self_trace = false) () : t =
let bg_threads = max 1 (min bg_threads 32) in
{
debug;
url;
headers;
batch_timeout_ms;
bg_threads;
ticker_thread;
ticker_interval_ms;
self_trace;
}

View file

@ -1,35 +1,53 @@
(** Configuration for the ocurl backend *)
type t = {
type t = private {
debug: bool;
url: string;
(** Url of the endpoint. Default is "http://localhost:4318",
or "OTEL_EXPORTER_OTLP_ENDPOINT" if set. *)
headers: (string * string) list;
(** API headers sent to the endpoint. Default is none or
"OTEL_EXPORTER_OTLP_HEADERS" if set. *)
batch_timeout_ms: int;
(** Number of milliseconds after which we will emit a batch, even
incomplete.
Note that the batch might take longer than that, because this is
only checked when a new event occurs or when a tick
is emitted. Default 2_000. *)
bg_threads: int;
(** Are there background threads, and how many? Default [4]. This will be
adjusted to be at least [1] and at most [32]. *)
(** Are there background threads, and how many? Default [4].
This will be adjusted to be at least [1] and at most [32]. *)
ticker_thread: bool;
(** If true, start a thread that regularly checks if signals should be
sent to the collector. Default [true] *)
(** If true, start a thread that regularly checks if signals should
be sent to the collector. Default [true] *)
ticker_interval_ms: int;
(** Interval for ticker thread, in milliseconds. This is only useful if
[ticker_thread] is [true]. This will be clamped between [2 ms] and
some longer interval (maximum [60s] currently). Default 500.
(** Interval for ticker thread, in milliseconds. This is
only useful if [ticker_thread] is [true].
This will be clamped between [2 ms] and some longer
interval (maximum [60s] currently).
Default 500.
@since 0.7 *)
self_trace: bool;
(** If true, the OTEL library will also emit its own spans. Default [false].
@since 0.7 *)
common: Opentelemetry_client.Http_config.t;
(** Common configuration options
@since 0.12*)
}
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
the future. *)
val pp : Format.formatter -> t -> unit
To build one, use {!make} below. This might be extended with more
fields in the future. *)
val make :
(?bg_threads:int ->
?debug:bool ->
?url:string ->
?headers:(string * string) list ->
?batch_timeout_ms:int ->
?bg_threads:int ->
?ticker_thread:bool ->
?ticker_interval_ms:int ->
?self_trace:bool ->
unit ->
t)
Opentelemetry_client.Http_config.make
(** Make a configuration {!t}. *)
t
(** Make a configuration.
*)
module Env : Opentelemetry_client.Http_config.ENV
val pp : Format.formatter -> t -> unit

View file

@ -1,16 +1,5 @@
(library
(name opentelemetry_client_ocurl)
(public_name opentelemetry-client-ocurl)
(libraries
(re_export opentelemetry)
opentelemetry.atomic
(re_export opentelemetry-client)
(re_export opentelemetry-client.sync)
(re_export curl)
unix
pbrt
threads
mtime
mtime.clock.os
(re_export ezcurl)
(re_export ezcurl.core)))
(libraries opentelemetry opentelemetry.atomic curl pbrt threads mtime
mtime.clock.os ezcurl ezcurl.core))

View file

@ -3,126 +3,532 @@
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module OT = Opentelemetry
module Config = Config
module OTELC = Opentelemetry_client
module OTEL = Opentelemetry
open Common_
open Opentelemetry
include Common_
type error = OTELC.Export_error.t
let needs_gc_metrics = Atomic.make false
open struct
module Notifier = Opentelemetry_client_sync.Notifier_sync
module IO = Opentelemetry_client_sync.Io_sync
let last_gc_metrics = Atomic.make (Mtime_clock.now ())
let timeout_gc_metrics = Mtime.Span.(20 * s)
(** side channel for GC, appended to metrics batch data *)
let gc_metrics = AList.make ()
(** Mini tracing module (disabled if [config.self_trace=false]) *)
module Self_trace = struct
let enabled = Atomic.make true
let add_event (scope : Scope.t) ev = scope.events <- ev :: scope.events
let dummy_trace_id_ = Trace_id.create ()
let dummy_span_id = Span_id.create ()
let with_ ?kind ?attrs name f =
if Atomic.get enabled then
Opentelemetry.Trace.with_ ?kind ?attrs name f
else (
(* do nothing *)
let scope =
{
Scope.trace_id = dummy_trace_id_;
span_id = dummy_span_id;
attrs = [];
events = [];
}
in
f scope
)
end
module Httpc : OTELC.Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
(** capture current GC metrics if {!needs_gc_metrics} is true
or it has been a long time since the last GC metrics collection,
and push them into {!gc_metrics} for later collection *)
let sample_gc_metrics_if_needed () =
let now = Mtime_clock.now () in
let alarm = Atomic.exchange needs_gc_metrics false in
let timeout () =
let elapsed = Mtime.span now (Atomic.get last_gc_metrics) in
Mtime.Span.compare elapsed timeout_gc_metrics > 0
in
if alarm || timeout () then (
Atomic.set last_gc_metrics now;
let l =
OT.Metrics.make_resource_metrics
~attrs:(Opentelemetry.GC_metrics.get_runtime_attributes ())
@@ Opentelemetry.GC_metrics.get_metrics ()
in
AList.add gc_metrics l
)
type t = Ezcurl_core.t
let n_errors = Atomic.make 0
let create () = Ezcurl.make ()
let n_dropped = Atomic.make 0
let cleanup = Ezcurl.delete
(** Something sent to the collector *)
module Event = struct
open Opentelemetry.Proto
let send (self : t) ~attempt_descr ~url ~headers:user_headers ~decode
(bod : string) : ('a, error) result =
let r =
let headers = user_headers in
Ezcurl.post ~client:self ~headers ~params:[] ~url ~content:(`String bod)
type t =
| E_metric of Metrics.resource_metrics list
| E_trace of Trace.resource_spans list
| E_logs of Logs.resource_logs list
| E_tick
| E_flush_all (** Flush all batches *)
end
(** Something to be sent via HTTP *)
module To_send = struct
open Opentelemetry.Proto
type t =
| Send_metric of Metrics.resource_metrics list list
| Send_trace of Trace.resource_spans list list
| Send_logs of Logs.resource_logs list list
end
(** start a thread in the background, running [f()] *)
let start_bg_thread (f : unit -> unit) : Thread.t =
let run () =
let signals =
[
Sys.sigusr1;
Sys.sigusr2;
Sys.sigterm;
Sys.sigpipe;
Sys.sigalrm;
Sys.sigstop;
]
in
ignore (Thread.sigmask Unix.SIG_BLOCK signals : _ list);
f ()
in
Thread.create run ()
let str_to_hex (s : string) : string =
let i_to_hex (i : int) =
if i < 10 then
Char.chr (i + Char.code '0')
else
Char.chr (i - 10 + Char.code 'a')
in
let res = Bytes.create (2 * String.length s) in
for i = 0 to String.length s - 1 do
let n = Char.code (String.get s i) in
Bytes.set res (2 * i) (i_to_hex ((n land 0xf0) lsr 4));
Bytes.set res ((2 * i) + 1) (i_to_hex (n land 0x0f))
done;
Bytes.unsafe_to_string res
module Backend_impl : sig
type t
val create : stop:bool Atomic.t -> config:Config.t -> unit -> t
val send_event : t -> Event.t -> unit
val shutdown : t -> unit
end = struct
open Opentelemetry.Proto
type t = {
stop: bool Atomic.t;
cleaned: bool Atomic.t; (** True when we cleaned up after closing *)
config: Config.t;
q: Event.t B_queue.t; (** Queue to receive data from the user's code *)
mutable main_th: Thread.t option; (** Thread that listens on [q] *)
send_q: To_send.t B_queue.t; (** Queue for the send worker threads *)
mutable send_threads: Thread.t array; (** Threads that send data via http *)
}
let send_http_ ~stop ~config (client : Curl.t) encoder ~path ~encode x : unit
=
let@ _sc =
Self_trace.with_ ~kind:Span.Span_kind_producer "otel-ocurl.send-http"
in
let data =
let@ _sc =
Self_trace.with_ ~kind:Span.Span_kind_internal "encode-proto"
in
Pbrt.Encoder.reset encoder;
encode x encoder;
Pbrt.Encoder.to_string encoder
in
let url =
let url = config.Config.url in
if url <> "" && String.get url (String.length url - 1) = '/' then
String.sub url 0 (String.length url - 1)
else
url
in
let url = url ^ path in
if !debug_ || config.debug then
Printf.eprintf "opentelemetry: send http POST to %s (%dB)\n%!" url
(String.length data);
let headers =
("Content-Type", "application/x-protobuf") :: config.headers
in
match
let@ _sc =
Self_trace.with_ ~kind:Span.Span_kind_internal "curl.post"
~attrs:[ "sz", `Int (String.length data); "url", `String url ]
in
Ezcurl.post ~headers ~client ~params:[] ~url ~content:(`String data) ()
with
| Ok { code; _ } when code >= 200 && code < 300 ->
if !debug_ || config.debug then
Printf.eprintf "opentelemetry: got response code=%d\n%!" code
| Ok { code; body; headers = _; info = _ } ->
Atomic.incr n_errors;
Self_trace.add_event _sc
@@ Opentelemetry.Event.make "error" ~attrs:[ "code", `Int code ];
if !debug_ || config.debug then (
let dec = Pbrt.Decoder.of_string body in
let body =
try
let status = Status.decode_pb_status dec in
Format.asprintf "%a" Status.pp_status status
with _ ->
spf "(could not decode status)\nraw bytes: %s" (str_to_hex body)
in
Printf.eprintf
"opentelemetry: error while sending:\n code=%d\n %s\n%!" code body
);
()
| exception Sys.Break ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set stop true
| Error (code, msg) ->
(* TODO: log error _via_ otel? *)
Atomic.incr n_errors;
Printf.eprintf
"opentelemetry: export failed:\n %s\n curl code: %s\n url: %s\n%!"
msg (Curl.strerror code) url;
(* avoid crazy error loop *)
Thread.delay 3.
let send_logs_http ~stop ~config (client : Curl.t) encoder
(l : Logs.resource_logs list list) : unit =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let@ _sp =
Self_trace.with_ ~kind:Span_kind_producer "send-logs"
~attrs:[ "n", `Int (List.length l) ]
in
let x =
Logs_service.default_export_logs_service_request ~resource_logs:l ()
in
send_http_ ~stop ~config client encoder ~path:"/v1/logs"
~encode:Logs_service.encode_pb_export_logs_service_request x
let send_metrics_http ~stop ~config curl encoder
(l : Metrics.resource_metrics list list) : unit =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let@ _sp =
Self_trace.with_ ~kind:Span_kind_producer "send-metrics"
~attrs:[ "n", `Int (List.length l) ]
in
let x =
Metrics_service.default_export_metrics_service_request ~resource_metrics:l
()
in
match r with
| Error (code, msg) ->
let err =
`Failure
(spf
"sending signals via http POST failed:\n\
\ %s\n\
\ curl code: %s\n\
\ url: %s\n\
%!"
msg (Curl.strerror code) url)
in
Error err
| Ok { code; body; _ } when code >= 200 && code < 300 ->
(match decode with
| `Ret x -> Ok x
| `Dec f ->
let dec = Pbrt.Decoder.of_string body in
(try Ok (f dec)
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e) bt))))
| Ok { code; body; _ } ->
let err =
OTELC.Export_error.decode_invalid_http_response ~attempt_descr ~url
~code body
in
Error err
send_http_ ~stop ~config curl encoder ~path:"/v1/metrics"
~encode:Metrics_service.encode_pb_export_metrics_service_request x
let send_traces_http ~stop ~config curl encoder
(l : Trace.resource_spans list list) : unit =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let@ _sp =
Self_trace.with_ ~kind:Span_kind_producer "send-traces"
~attrs:[ "n", `Int (List.length l) ]
in
let x =
Trace_service.default_export_trace_service_request ~resource_spans:l ()
in
send_http_ ~stop ~config curl encoder ~path:"/v1/traces"
~encode:Trace_service.encode_pb_export_trace_service_request x
let[@inline] send_event (self : t) ev : unit = B_queue.push self.q ev
(** Thread that, in a loop, reads from [q] to get the
next message to send via http *)
let bg_thread_loop (self : t) : unit =
Ezcurl.with_client ?set_opts:None @@ fun client ->
let stop = self.stop in
let config = self.config in
let encoder = Pbrt.Encoder.create () in
try
while not (Atomic.get stop) do
let msg = B_queue.pop self.send_q in
match msg with
| To_send.Send_trace tr ->
send_traces_http ~stop ~config client encoder tr
| To_send.Send_metric ms ->
send_metrics_http ~stop ~config client encoder ms
| To_send.Send_logs logs ->
send_logs_http ~stop ~config client encoder logs
done
with B_queue.Closed -> ()
type batches = {
traces: Proto.Trace.resource_spans Batch.t;
logs: Proto.Logs.resource_logs Batch.t;
metrics: Proto.Metrics.resource_metrics Batch.t;
}
let batch_max_size_ = 200
let should_send_batch_ ~config ~now (b : _ Batch.t) : bool =
Batch.len b > 0
&& (Batch.len b >= batch_max_size_
||
let timeout = Mtime.Span.(config.Config.batch_timeout_ms * ms) in
let elapsed = Mtime.span now (Batch.time_started b) in
Mtime.Span.compare elapsed timeout >= 0)
let main_thread_loop (self : t) : unit =
let local_q = Queue.create () in
let config = self.config in
(* keep track of batches *)
let batches =
{
traces = Batch.create ();
logs = Batch.create ();
metrics = Batch.create ();
}
in
let send_metrics () =
let metrics = AList.pop_all gc_metrics :: Batch.pop_all batches.metrics in
B_queue.push self.send_q (To_send.Send_metric metrics)
in
let send_logs () =
B_queue.push self.send_q (To_send.Send_logs (Batch.pop_all batches.logs))
in
let send_traces () =
B_queue.push self.send_q
(To_send.Send_trace (Batch.pop_all batches.traces))
in
try
while not (Atomic.get self.stop) do
(* read multiple events at once *)
B_queue.pop_all self.q local_q;
(* are we asked to flush all events? *)
let must_flush_all = ref false in
(* how to process a single event *)
let process_ev (ev : Event.t) : unit =
match ev with
| Event.E_metric m -> Batch.push batches.metrics m
| Event.E_trace tr -> Batch.push batches.traces tr
| Event.E_logs logs -> Batch.push batches.logs logs
| Event.E_tick ->
(* the only impact of "tick" is that it wakes us up regularly *)
()
| Event.E_flush_all -> must_flush_all := true
in
Queue.iter process_ev local_q;
Queue.clear local_q;
if !must_flush_all then (
if Batch.len batches.metrics > 0 then send_metrics ();
if Batch.len batches.logs > 0 then send_logs ();
if Batch.len batches.traces > 0 then send_traces ()
) else (
let now = Mtime_clock.now () in
if should_send_batch_ ~config ~now batches.metrics then
send_metrics ();
if should_send_batch_ ~config ~now batches.traces then send_traces ();
if should_send_batch_ ~config ~now batches.logs then send_logs ()
)
done
with B_queue.Closed -> ()
let create ~stop ~config () : t =
let n_send_threads = max 2 config.Config.bg_threads in
let self =
{
stop;
config;
q = B_queue.create ();
send_threads = [||];
send_q = B_queue.create ();
cleaned = Atomic.make false;
main_th = None;
}
in
let main_th = start_bg_thread (fun () -> main_thread_loop self) in
self.main_th <- Some main_th;
self.send_threads <-
Array.init n_send_threads (fun _i ->
start_bg_thread (fun () -> bg_thread_loop self));
self
let shutdown self : unit =
Atomic.set self.stop true;
if not (Atomic.exchange self.cleaned true) then (
(* empty batches *)
send_event self Event.E_flush_all;
(* close the incoming queue, wait for the thread to finish
before we start cutting off the background threads, so that they
have time to receive the final batches *)
B_queue.close self.q;
Option.iter Thread.join self.main_th;
(* close send queues, then wait for all threads *)
B_queue.close self.send_q;
Array.iter Thread.join self.send_threads
)
end
module Consumer_impl = OTELC.Generic_http_consumer.Make (IO) (Notifier) (Httpc)
let create_backend ?(stop = Atomic.make false)
?(config : Config.t = Config.make ()) () : (module Collector.BACKEND) =
let module M = struct
open Opentelemetry.Proto
open Opentelemetry.Collector
let consumer ?(config = Config.make ()) () :
Opentelemetry_client.Consumer.any_signal_l_builder =
let n_workers = max 2 (min 32 config.bg_threads) in
let ticker_task =
if config.ticker_thread then
Some (float config.ticker_interval_ms /. 1000.)
else
None
let backend = Backend_impl.create ~stop ~config ()
let send_trace : Trace.resource_spans list sender =
{
send =
(fun l ~ret ->
Backend_impl.send_event backend (Event.E_trace l);
ret ());
}
let last_sent_metrics = Atomic.make (Mtime_clock.now ())
(* send metrics from time to time *)
let timeout_sent_metrics = Mtime.Span.(5 * s)
let signal_emit_gc_metrics () =
if !debug_ || config.debug then
Printf.eprintf "opentelemetry: emit GC metrics requested\n%!";
Atomic.set needs_gc_metrics true
let additional_metrics () : Metrics.resource_metrics list =
(* add exporter metrics to the lot? *)
let last_emit = Atomic.get last_sent_metrics in
let now = Mtime_clock.now () in
let add_own_metrics =
let elapsed = Mtime.span last_emit now in
Mtime.Span.compare elapsed timeout_sent_metrics > 0
in
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_errors);
];
];
]
) else
[]
let send_metrics : Metrics.resource_metrics list sender =
{
send =
(fun m ~ret ->
let m = List.rev_append (additional_metrics ()) m in
Backend_impl.send_event backend (Event.E_metric m);
ret ());
}
let send_logs : Logs.resource_logs list sender =
{
send =
(fun m ~ret ->
Backend_impl.send_event backend (Event.E_logs m);
ret ());
}
let on_tick_cbs_ = Atomic.make (AList.make ())
let set_on_tick_callbacks = Atomic.set on_tick_cbs_
let tick () =
sample_gc_metrics_if_needed ();
Backend_impl.send_event backend Event.E_tick;
List.iter (fun f -> f ()) (AList.get @@ Atomic.get on_tick_cbs_)
let cleanup () = Backend_impl.shutdown backend
end in
(module M)
(** thread that calls [tick()] regularly, to help enforce timeouts *)
let setup_ticker_thread ~stop ~sleep_ms (module B : Collector.BACKEND) () =
let sleep_s = float sleep_ms /. 1000. in
let tick_loop () =
try
while not @@ Atomic.get stop do
Thread.delay sleep_s;
B.tick ()
done
with B_queue.Closed -> ()
in
Consumer_impl.consumer ~override_n_workers:n_workers ~on_tick:OTEL.Sdk.tick
~ticker_task ~config:config.common ()
start_bg_thread tick_loop
let create_exporter ?(config = Config.make ()) () : OTEL.Exporter.t =
let consumer = consumer ~config () in
let bq =
Opentelemetry_client_sync.Bounded_queue_sync.create
~high_watermark:OTELC.Bounded_queue.Defaults.high_watermark ()
in
let setup_ ?(stop = Atomic.make false) ?(config : Config.t = Config.make ()) ()
=
let ((module B) as backend) = create_backend ~stop ~config () in
Opentelemetry.Collector.set_backend backend;
OTELC.Exporter_queued.create ~clock:OTEL.Clock.ptime_clock ~q:bq ~consumer ()
if config.url <> get_url () then set_url config.url;
Atomic.set Self_trace.enabled config.self_trace;
let create_backend = create_exporter
if config.ticker_thread then (
(* at most a minute *)
let sleep_ms = min 60_000 (max 2 config.ticker_interval_ms) in
ignore (setup_ticker_thread ~stop ~sleep_ms backend () : Thread.t)
);
let setup_ ~config () : OTEL.Exporter.t =
let exporter = create_exporter ~config () in
OTEL.Sdk.set ~traces:config.common.traces ~metrics:config.common.metrics
~logs:config.common.logs exporter;
B.cleanup
Option.iter
(fun min_level -> OTEL.Self_debug.to_stderr ~min_level ())
config.common.log_level;
let setup ?stop ?config ?(enable = true) () =
if enable then (
let cleanup = setup_ ?stop ?config () in
at_exit cleanup
)
OTEL.Self_debug.log OTEL.Self_debug.Info (fun () ->
"opentelemetry: ocurl exporter installed");
OTELC.Self_trace.set_enabled config.common.self_trace;
if config.common.self_metrics then Opentelemetry.Sdk.setup_self_metrics ();
exporter
let remove_exporter () : unit =
let open Opentelemetry_client_sync in
(* used to wait *)
let sq = Sync_queue.create () in
OTEL.Sdk.remove () ~on_done:(fun () -> Sync_queue.push sq ());
Sync_queue.pop sq
let remove_backend = remove_exporter
let setup ?(config : Config.t = Config.make ()) ?(enable = true) () =
if enable && not config.common.sdk_disabled then
ignore (setup_ ~config () : OTEL.Exporter.t)
let with_setup ?(after_shutdown = ignore) ?(config : Config.t = Config.make ())
?(enable = true) () f =
if enable && not config.common.sdk_disabled then (
let exp = setup_ ~config () in
Fun.protect f ~finally:(fun () ->
remove_exporter ();
after_shutdown exp)
let with_setup ?stop ?config ?(enable = true) () f =
if enable then (
let cleanup = setup_ ?stop ?config () in
Fun.protect ~finally:cleanup f
) else
f ()

View file

@ -3,41 +3,44 @@
https://opentelemetry.io/docs/reference/specification/protocol/exporter/
*)
val get_url : unit -> string
val set_url : string -> unit
(** Url of the endpoint. Default is "http://localhost:4318",
or "OTEL_EXPORTER_OTLP_ENDPOINT" if set. *)
val get_headers : unit -> (string * string) list
val set_headers : (string * string) list -> unit
(** Set http headers that are sent on every http query to the collector. *)
module Atomic = Opentelemetry_atomic.Atomic
module Config = Config
val consumer :
?config:Config.t -> unit -> Opentelemetry_client.Consumer.any_signal_l_builder
(** Consumer that pulls from a queue *)
val create_backend :
?stop:bool Atomic.t ->
?config:Config.t ->
unit ->
(module Opentelemetry.Collector.BACKEND)
val create_exporter : ?config:Config.t -> unit -> Opentelemetry.Exporter.t
(** @since NEXT_RELEASE *)
val create_backend : ?config:Config.t -> unit -> Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup : ?config:Config.t -> ?enable:bool -> unit -> unit
val setup :
?stop:bool Atomic.t -> ?config:Config.t -> ?enable:bool -> unit -> unit
(** Setup endpoint. This modifies {!Opentelemetry.Collector.backend}.
@param enable
actually setup the backend (default true). This can be used to
enable/disable the setup depending on CLI arguments or environment.
@param enable actually setup the backend (default true). This can
be used to enable/disable the setup depending on CLI arguments
or environment.
@param config configuration to use
@param stop
an atomic boolean. When it becomes true, background threads will all stop
after a little while. *)
val remove_exporter : unit -> unit
(** @since NEXT_RELEASE *)
val remove_backend : unit -> unit
[@@deprecated "use remove_exporter"]
(** @since 0.12 *)
@param stop an atomic boolean. When it becomes true, background threads
will all stop after a little while.
*)
val with_setup :
?after_shutdown:(Opentelemetry.Exporter.t -> unit) ->
?stop:bool Atomic.t ->
?config:Config.t ->
?enable:bool ->
unit ->
(unit -> 'a) ->
'a
(** [with_setup () f] is like [setup(); f()] but takes care of cleaning up after
[f()] returns See {!setup} for more details. *)
(** [with_setup () f] is like [setup(); f()] but takes care of cleaning up
after [f()] returns
See {!setup} for more details. *)

View file

@ -1,138 +0,0 @@
(** Interface for a thread-safe, bounded queue.
After the high watermark is reached, pushing items into the queue will
instead discard them. *)
exception Closed
(** Raised when pushing into a closed queue *)
type 'a pop_result =
[ `Empty
| `Closed
| `Item of 'a
]
module Common = struct
type t = {
closed: unit -> bool;
(** Is the queue closed {b for writing}. Consumers should only use
[try_pop] because a queue that's closed-for-writing might still
contain straggler items that need to be consumed.
This should be as fast and cheap as possible. *)
num_discarded: unit -> int; (** How many items were discarded? *)
size: unit -> int;
(** Snapshot of how many items are currently in the queue *)
high_watermark: unit -> int; (** Maximum size of the queue *)
}
let[@inline] num_discarded self = self.num_discarded ()
let[@inline] closed (self : t) : bool = self.closed ()
let[@inline] size (self : t) : int = self.size ()
let[@inline] high_watermark self = self.high_watermark ()
end
(** Receiving side *)
module Recv = struct
type 'a t = {
on_non_empty: (unit -> unit) -> unit;
(** [on_non_empty f] registers [f] to be called whenever the queue
transitions from empty to non-empty. *)
try_pop: unit -> 'a pop_result; (** Try to pop an item right now. *)
common: Common.t;
}
let[@inline] try_pop (self : _ t) : _ pop_result = self.try_pop ()
let[@inline] on_non_empty (self : _ t) f = self.on_non_empty f
let[@inline] closed (self : _ t) : bool = self.common.closed ()
let[@inline] num_discarded self = self.common.num_discarded ()
let[@inline] size self = self.common.size ()
let[@inline] high_watermark self = self.common.high_watermark ()
let map (type a b) (f : a -> b) (self : a t) : b t =
{
self with
try_pop =
(fun () ->
match self.try_pop () with
| (`Closed | `Empty) as r -> r
| `Item x -> `Item (f x));
}
end
(** Sending side *)
module Send = struct
type 'a t = {
push: 'a list -> unit;
(** Push items. This might discard some of them.
@raise Closed if the queue is closed. *)
close: unit -> unit;
(** Close the queue. Items currently in the queue will still be
accessible to consumers until the queue is emptied out. Idempotent.
*)
common: Common.t;
}
let[@inline] push (self : _ t) x : unit = self.push x
let[@inline] close (self : _ t) : unit = self.close ()
let[@inline] closed (self : _ t) : bool = self.common.closed ()
let[@inline] num_discarded self = self.common.num_discarded ()
let[@inline] size self = self.common.size ()
let[@inline] high_watermark self = self.common.high_watermark ()
let map (type a b) (f : a list -> b list) (self : b t) : a t =
{
self with
push =
(fun xs ->
match f xs with
| [] -> ()
| ys -> self.push ys);
}
(** Turn the writing end of the queue into an emitter.
@param close_queue_on_close
if true, closing the emitter will close the queue *)
let to_emitter ~signal_name ~close_queue_on_close (self : 'a t) :
'a Opentelemetry_emitter.Emitter.t =
let closed () = closed self in
let enabled () = not (closed ()) in
let emit x = if x <> [] then push self x in
let tick ~mtime:_ = () in
(* the exporter will emit these, the queue is shared *)
let self_metrics ~now:_ () = [] in
(* NOTE: we cannot actually flush, only close. Emptying the queue is
fundamentally asynchronous because it's done by consumers *)
let flush_and_close () = if close_queue_on_close then close self in
{ signal_name; closed; enabled; emit; tick; flush_and_close; self_metrics }
end
type 'a t = {
send: 'a Send.t;
recv: 'a Recv.t;
}
(** A bounded queue, with multiple producers and potentially multiple consumers.
All functions must be thread-safe except for [try_pop] which might not have
to be depending on the context (e.g. a Lwt-specific queue implementation
will consume only from the Lwt thread). *)
module Defaults = struct
(** The default high watermark *)
let high_watermark : int = 2048
end

View file

@ -1,6 +0,0 @@
module OTEL = Opentelemetry
module Proto = Opentelemetry_proto
let spf = Printf.sprintf
let ( let@ ) = ( @@ )

View file

@ -1,49 +0,0 @@
(** Consumer that accepts items from a bounded queue and processes them. *)
open Common_
type t = {
active: unit -> Aswitch.t;
shutdown: unit -> unit;
(** Shutdown the consumer as soon as possible. [active] will be turned off
once the consumer is fully shut down. *)
tick: unit -> unit;
(** Regularly called, eg to emit metrics, check timeouts, etc. Must be
thread safe. *)
self_metrics: clock:OTEL.Clock.t -> unit -> OTEL.Metrics.t list;
(** Self observing metrics *)
}
(** A consumer for signals of type ['a] *)
type consumer = t
let[@inline] active (self : t) : Aswitch.t = self.active ()
let[@inline] shutdown (self : t) : unit = self.shutdown ()
let[@inline] self_metrics ~clock self : _ list = self.self_metrics ~clock ()
(** [on_stop e f] calls [f()] when [e] stops, or now if it's already stopped *)
let on_stop self f = Aswitch.on_turn_off (self.active ()) f
module Builder = struct
type 'a t = { start_consuming: 'a Bounded_queue.Recv.t -> consumer }
(** A builder that will create a consumer for a given queue, start the
consumer so it starts consuming from the queue, and return the consumer.
*)
let start_consuming (self : _ t) bq = self.start_consuming bq
let map (type a b) (f : a -> b) (self : b t) : a t =
{
start_consuming =
(fun q ->
let q = Bounded_queue.Recv.map f q in
self.start_consuming q);
}
end
type any_signal_l_builder = OTEL.Any_signal_l.t Builder.t
type resource_signal_builder = Resource_signal.t Builder.t
(** The type that's useful for HTTP backends *)

View file

@ -1,18 +0,0 @@
(library
(name opentelemetry_client)
(public_name opentelemetry-client)
(flags :standard -open Opentelemetry_util -open Opentelemetry_atomic)
(libraries
opentelemetry
opentelemetry.util
opentelemetry.emitter
opentelemetry.proto
opentelemetry.domain
mtime
mtime.clock.os
unix
pbrt
yojson
threads.posix)
(synopsis
"Basic exporters, as well as common types and logic shared between exporters"))

View file

@ -1,43 +0,0 @@
(** Combine multiple emitters into one. *)
open Opentelemetry_emitter.Emitter
type closing_behavior =
[ `Close_when_all_closed
| `Close_when_one_closed
]
(** When to close the combined emitter:
- [`Close_when_all_closed]: closed when all the emitters that are combined
are closed
- [`Close_when_one_closed]: closed as soon as one of the emitters is closed
*)
(** [combine_l es] is an emitter that sends signals to every emitter in [es].
@param closing
when is this emitter closing. Default [`Close_when_all_closed]. *)
let combine_l ?(closing : closing_behavior = `Close_when_all_closed)
(es : 'a t list) : 'a t =
assert (es <> []);
let signal_name = (List.hd es).signal_name in
let closed =
fun () ->
match closing with
| `Close_when_all_closed -> List.for_all closed es
| `Close_when_one_closed -> List.exists closed es
in
let self_metrics ~now () =
List.flatten @@ List.map (fun e -> e.self_metrics ~now ()) es
in
let enabled () = not (closed ()) in
let emit x = if x <> [] then List.iter (fun e -> emit e x) es in
let tick ~mtime = List.iter (tick ~mtime) es in
let flush_and_close () = List.iter flush_and_close es in
{ signal_name; self_metrics; closed; enabled; emit; tick; flush_and_close }
let combine_l ?closing es : _ t =
match es with
| [] -> dummy
| _ -> combine_l ?closing es
let combine e1 e2 : _ t = combine_l [ e1; e2 ]

View file

@ -1,8 +0,0 @@
open Common_.OTEL
let add_interval_limiter il (e : _ Emitter.t) : _ Emitter.t =
let emit xs = if Interval_limiter.make_attempt il then Emitter.emit e xs in
{ e with emit }
let limit_interval ~min_interval (e : _ Emitter.t) : _ Emitter.t =
add_interval_limiter (Interval_limiter.create ~min_interval ()) e

View file

@ -1,25 +0,0 @@
(** Limit frequency at which the emitter emits.
This puts a hard floor on the interval between two consecutive successful
[emit]. Attempts to emit too early are simply discarded.
The use case for this is metrics: it's possible, for a gauge, to just drop
some entries if we've been emitting them too frequently.
{b NOTE}: it's better to do [limit_interval ~min_interval (add_batching e)]
than [add_batching (limit_interval ~min_interval e)], because in the later
case we might be dismissing a whole large batch at ine
@since NEXT_RELEASE *)
open Common_.OTEL
val add_interval_limiter : Interval_limiter.t -> 'a Emitter.t -> 'a Emitter.t
(** [add_interval_limiter il e] is a new emitter [e'] that can only emit signals
less frequently than [Interval_limiter.min_interval il].
Trying to emit too early will simply drop the signal. *)
val limit_interval : min_interval:Mtime.span -> 'a Emitter.t -> 'a Emitter.t
(** [limit_interval ~min_interval e] is
[add_interval_limiter (Interval_limiter.create ~min_interval ()) e] *)

View file

@ -1,34 +0,0 @@
open Opentelemetry_emitter
let add_sampler (self : Sampler.t) (e : _ Emitter.t) : _ Emitter.t =
let signal_name = e.signal_name in
let enabled () = e.enabled () in
let closed () = Emitter.closed e in
let flush_and_close () = Emitter.flush_and_close e in
let tick ~mtime = Emitter.tick e ~mtime in
let m_rate = Printf.sprintf "otel.sdk.%s.sampler.actual-rate" signal_name in
let self_metrics ~now () =
Opentelemetry_core.Metrics.(
gauge ~name:m_rate [ float ~now (Sampler.actual_rate self) ])
:: e.self_metrics ~now ()
in
let emit l =
if l <> [] && e.enabled () then (
let accepted = List.filter (fun _x -> Sampler.accept self) l in
if accepted <> [] then Emitter.emit e accepted
)
in
{
Emitter.closed;
self_metrics;
signal_name;
enabled;
flush_and_close;
tick;
emit;
}
let sample ~proba_accept e = add_sampler (Sampler.create ~proba_accept ()) e

View file

@ -1,10 +0,0 @@
open Opentelemetry_emitter
val add_sampler : Sampler.t -> 'a Emitter.t -> 'a Emitter.t
(** [add_sampler sampler e] is a new emitter that uses the [sampler] on each
individual signal before passing them to [e]. This means only
[Sampler.proba_accept sampler] of the signals will actually be emitted. *)
val sample : proba_accept:float -> 'a Emitter.t -> 'a Emitter.t
(** [sample ~proba_accept e] is
[add_sampler (Sampler.create ~proba_accept ()) e] *)

View file

@ -1,64 +0,0 @@
(** Error that can occur during export *)
type attempt_descr = string
type t =
[ `Status of int * Opentelemetry.Proto.Status.status * attempt_descr
| `Failure of string
| `Sysbreak
]
let str_to_hex (s : string) : string =
Opentelemetry_util.Util_bytes_.bytes_to_hex (Bytes.unsafe_of_string s)
(** Report the error on stderr. *)
let report_err ~level:(provided_level : [ `Debug | `Auto ]) (err : t) : unit =
let compute_level lvl =
match provided_level with
| `Debug -> Opentelemetry.Self_debug.Debug
| `Auto -> lvl
in
match err with
| `Sysbreak ->
Opentelemetry.Self_debug.log (compute_level Info) (fun () ->
"opentelemetry: ctrl-c captured, stopping")
| `Failure msg ->
Opentelemetry.Self_debug.log (compute_level Error) (fun () ->
Printf.sprintf "opentelemetry: export failed: %s" msg)
| `Status
( code,
{
Opentelemetry.Proto.Status.code = scode;
message;
details;
_presence = _;
},
descr ) ->
Opentelemetry.Self_debug.log (compute_level Error) (fun () ->
let pp_details out l =
List.iter
(fun s -> Format.fprintf out "%S;@ " (Bytes.unsafe_to_string s))
l
in
Format.asprintf
"@[<2>opentelemetry: export failed with@ http code=%d@ attempt: %s@ \
status {@[code=%ld;@ message=%S;@ details=[@[%a@]]@]}@]"
code descr scode
(Bytes.unsafe_to_string message)
pp_details details)
let decode_invalid_http_response ~attempt_descr ~code ~url (body : string) : t =
try
let dec = Pbrt.Decoder.of_string body in
let status = Opentelemetry.Proto.Status.decode_pb_status dec in
`Status (code, status, attempt_descr)
with e ->
let bt = Printexc.get_backtrace () in
`Failure
(Printf.sprintf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
HTTP body: %s\n\
%s"
url code (Printexc.to_string e) (str_to_hex body) bt)

View file

@ -1,30 +0,0 @@
(** Combine multiple exporters into one *)
open Common_
let combine_l (es : OTEL.Exporter.t list) : OTEL.Exporter.t =
match es with
| [] -> OTEL.Exporter.dummy ()
| _ ->
(* active turns off once all constituent exporters are off *)
let active, trigger = Aswitch.create () in
let remaining = Atomic.make (List.length es) in
List.iter
(fun e ->
Aswitch.on_turn_off (OTEL.Exporter.active e) (fun () ->
if Atomic.fetch_and_add remaining (-1) = 1 then
Aswitch.turn_off trigger))
es;
{
OTEL.Exporter.export =
(fun sig_ -> List.iter (fun e -> e.OTEL.Exporter.export sig_) es);
active = (fun () -> active);
shutdown = (fun () -> List.iter OTEL.Exporter.shutdown es);
self_metrics =
(fun () ->
List.flatten @@ List.map (fun e -> e.OTEL.Exporter.self_metrics ()) es);
}
(** [combine exp1 exp2] is the exporter that emits signals to both [exp1] and
[exp2]. *)
let combine exp1 exp2 : OTEL.Exporter.t = combine_l [ exp1; exp2 ]

View file

@ -1,390 +0,0 @@
type protocol =
| Http_protobuf
| Http_json
type log_level = Opentelemetry.Self_debug.level option
type rest = unit
type t = {
debug: bool;
log_level: log_level;
sdk_disabled: bool;
url_traces: string;
url_metrics: string;
url_logs: string;
headers: (string * string) list;
headers_traces: (string * string) list;
headers_metrics: (string * string) list;
headers_logs: (string * string) list;
protocol: protocol;
timeout_ms: int;
timeout_traces_ms: int;
timeout_metrics_ms: int;
timeout_logs_ms: int;
traces: Opentelemetry.Provider_config.t;
metrics: Opentelemetry.Provider_config.t;
logs: Opentelemetry.Provider_config.t;
self_trace: bool;
self_metrics: bool;
http_concurrency_level: int option;
retry_max_attempts: int;
retry_initial_delay_ms: float;
retry_max_delay_ms: float;
retry_backoff_multiplier: float;
_rest: rest;
}
open struct
let ppiopt out i =
match i with
| None -> Format.fprintf out "None"
| Some i -> Format.fprintf out "%d" i
let pp_header ppf (a, b) = Format.fprintf ppf "@[%s: @,%s@]@." a b
let ppheaders out l =
Format.fprintf out "[@[%a@]]" (Format.pp_print_list pp_header) l
let pp_protocol out = function
| Http_protobuf -> Format.fprintf out "http/protobuf"
| Http_json -> Format.fprintf out "http/json"
let pp_log_level out = function
| None -> Format.fprintf out "none"
| Some level ->
Format.fprintf out "%s" (Opentelemetry.Self_debug.string_of_level level)
let pp_provider_config out (c : Opentelemetry.Provider_config.t) =
Format.fprintf out "{batch=%a;@ timeout=%a}" ppiopt c.batch Mtime.Span.pp
c.timeout
end
let pp out (self : t) : unit =
let {
debug;
log_level;
sdk_disabled;
self_trace;
self_metrics;
url_traces;
url_metrics;
url_logs;
headers;
headers_traces;
headers_metrics;
headers_logs;
protocol;
timeout_ms;
timeout_traces_ms;
timeout_metrics_ms;
timeout_logs_ms;
traces;
metrics;
logs;
http_concurrency_level;
retry_max_attempts;
retry_initial_delay_ms;
retry_max_delay_ms;
retry_backoff_multiplier;
_rest = _;
} =
self
in
Format.fprintf out
"{@[ debug=%B;@ log_level=%a;@ sdk_disabled=%B;@ self_trace=%B;@ \
self_metrics=%B;@ url_traces=%S;@ url_metrics=%S;@ url_logs=%S;@ \
@[<2>headers=@,\
%a@];@ @[<2>headers_traces=@,\
%a@];@ @[<2>headers_metrics=@,\
%a@];@ @[<2>headers_logs=@,\
%a@];@ protocol=%a;@ timeout_ms=%d;@ timeout_traces_ms=%d;@ \
timeout_metrics_ms=%d;@ timeout_logs_ms=%d;@ traces=%a;@ metrics=%a;@ \
logs=%a;@ http_concurrency_level=%a;@ retry_max_attempts=%d;@ \
retry_initial_delay_ms=%.0f;@ retry_max_delay_ms=%.0f;@ \
retry_backoff_multiplier=%.1f @]}"
debug pp_log_level log_level sdk_disabled self_trace self_metrics url_traces
url_metrics url_logs ppheaders headers ppheaders headers_traces ppheaders
headers_metrics ppheaders headers_logs pp_protocol protocol timeout_ms
timeout_traces_ms timeout_metrics_ms timeout_logs_ms pp_provider_config
traces pp_provider_config metrics pp_provider_config logs ppiopt
http_concurrency_level retry_max_attempts retry_initial_delay_ms
retry_max_delay_ms retry_backoff_multiplier
let default_url = "http://localhost:4318"
type 'k make =
?debug:bool ->
?log_level:log_level ->
?sdk_disabled:bool ->
?url:string ->
?url_traces:string ->
?url_metrics:string ->
?url_logs:string ->
?batch_traces:int ->
?batch_metrics:int ->
?batch_logs:int ->
?batch_timeout_ms:int ->
?traces:Opentelemetry.Provider_config.t ->
?metrics:Opentelemetry.Provider_config.t ->
?logs:Opentelemetry.Provider_config.t ->
?headers:(string * string) list ->
?headers_traces:(string * string) list ->
?headers_metrics:(string * string) list ->
?headers_logs:(string * string) list ->
?protocol:protocol ->
?timeout_ms:int ->
?timeout_traces_ms:int ->
?timeout_metrics_ms:int ->
?timeout_logs_ms:int ->
?self_trace:bool ->
?self_metrics:bool ->
?http_concurrency_level:int ->
?retry_max_attempts:int ->
?retry_initial_delay_ms:float ->
?retry_max_delay_ms:float ->
?retry_backoff_multiplier:float ->
'k
module type ENV = sig
val make : (t -> 'a) -> 'a make
end
open struct
let get_debug_from_env () =
match Sys.getenv_opt "OTEL_OCAML_DEBUG" with
| Some ("1" | "true") -> true
| _ -> false
let get_log_level_from_env () : log_level =
match Sys.getenv_opt "OTEL_LOG_LEVEL" with
| Some "none" -> None
| Some "error" -> Some Error
| Some "warn" -> Some Warning
| Some "info" -> Some Info
| Some "debug" -> Some Debug
| Some s ->
Opentelemetry.Self_debug.log Warning (fun () ->
Printf.sprintf "unknown log level %S, defaulting to info" s);
Some Info
| None ->
if get_debug_from_env () then
Some Debug
else
Some Info
let get_sdk_disabled_from_env () =
match Sys.getenv_opt "OTEL_SDK_DISABLED" with
| Some ("true" | "1") -> true
| _ -> false
let get_protocol_from_env env_name =
match Sys.getenv_opt env_name with
| Some "http/protobuf" -> Http_protobuf
| Some "http/json" -> Http_json
| _ -> Http_protobuf
let get_timeout_from_env env_name default =
match Sys.getenv_opt env_name with
| Some s -> (try int_of_string s with _ -> default)
| None -> default
let make_get_from_env env_name =
let value = ref None in
fun () ->
match !value with
| None ->
value := Sys.getenv_opt env_name;
!value
| Some value -> Some value
let get_url_from_env = make_get_from_env "OTEL_EXPORTER_OTLP_ENDPOINT"
let get_url_traces_from_env =
make_get_from_env "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"
let get_url_metrics_from_env =
make_get_from_env "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"
let get_url_logs_from_env =
make_get_from_env "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT"
let remove_trailing_slash url =
if url <> "" && String.get url (String.length url - 1) = '/' then
String.sub url 0 (String.length url - 1)
else
url
let parse_headers s =
let parse_header s =
match String.split_on_char '=' s with
| [ key; value ] -> key, value
| _ -> failwith "Unexpected format for header"
in
String.split_on_char ',' s |> List.map parse_header
let get_headers_from_env env_name =
try parse_headers (Sys.getenv env_name) with _ -> []
let get_general_headers_from_env () =
try parse_headers (Sys.getenv "OTEL_EXPORTER_OTLP_HEADERS") with _ -> []
end
module Env () : ENV = struct
let merge_headers base specific =
(* Signal-specific headers override generic ones *)
let specific_keys = List.map fst specific in
let filtered_base =
List.filter (fun (k, _) -> not (List.mem k specific_keys)) base
in
List.rev_append specific filtered_base
let make k ?(debug = get_debug_from_env ())
?(log_level = get_log_level_from_env ())
?(sdk_disabled = get_sdk_disabled_from_env ()) ?url ?url_traces
?url_metrics ?url_logs ?batch_traces ?batch_metrics ?batch_logs
?(batch_timeout_ms = 2_000) ?traces ?metrics ?logs
?(headers = get_general_headers_from_env ()) ?headers_traces
?headers_metrics ?headers_logs
?(protocol = get_protocol_from_env "OTEL_EXPORTER_OTLP_PROTOCOL")
?(timeout_ms = get_timeout_from_env "OTEL_EXPORTER_OTLP_TIMEOUT" 10_000)
?timeout_traces_ms ?timeout_metrics_ms ?timeout_logs_ms
?(self_trace = false) ?(self_metrics = false) ?http_concurrency_level
?(retry_max_attempts = 3) ?(retry_initial_delay_ms = 100.)
?(retry_max_delay_ms = 5000.) ?(retry_backoff_multiplier = 2.0) =
let batch_timeout_ = Mtime.Span.(batch_timeout_ms * ms) in
let traces =
match traces with
| Some t -> t
| None ->
let batch =
match batch_traces with
| Some b -> b
| None -> get_timeout_from_env "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" 400
in
Opentelemetry.Provider_config.make ~batch ~timeout:batch_timeout_ ()
in
let metrics =
match metrics with
| Some m -> m
| None ->
let batch =
match batch_metrics with
| Some b -> b
| None -> get_timeout_from_env "OTEL_METRIC_EXPORT_INTERVAL" 200
in
Opentelemetry.Provider_config.make ~batch ~timeout:batch_timeout_ ()
in
let logs =
match logs with
| Some l -> l
| None ->
let batch = Option.value batch_logs ~default:400 in
Opentelemetry.Provider_config.make ~batch ~timeout:batch_timeout_ ()
in
let url_traces, url_metrics, url_logs =
let base_url =
let base_url =
match get_url_from_env () with
| None -> Option.value url ~default:default_url
| Some url -> remove_trailing_slash url
in
remove_trailing_slash base_url
in
let url_traces =
match get_url_traces_from_env () with
| None -> Option.value url_traces ~default:(base_url ^ "/v1/traces")
| Some url -> url
in
let url_metrics =
match get_url_metrics_from_env () with
| None -> Option.value url_metrics ~default:(base_url ^ "/v1/metrics")
| Some url -> url
in
let url_logs =
match get_url_logs_from_env () with
| None -> Option.value url_logs ~default:(base_url ^ "/v1/logs")
| Some url -> url
in
url_traces, url_metrics, url_logs
in
(* Get per-signal headers from env vars *)
let env_headers_traces =
get_headers_from_env "OTEL_EXPORTER_OTLP_TRACES_HEADERS"
in
let env_headers_metrics =
get_headers_from_env "OTEL_EXPORTER_OTLP_METRICS_HEADERS"
in
let env_headers_logs =
get_headers_from_env "OTEL_EXPORTER_OTLP_LOGS_HEADERS"
in
(* Merge with provided headers, env-specific takes precedence *)
let headers_traces =
match headers_traces with
| Some h -> h
| None -> merge_headers headers env_headers_traces
in
let headers_metrics =
match headers_metrics with
| Some h -> h
| None -> merge_headers headers env_headers_metrics
in
let headers_logs =
match headers_logs with
| Some h -> h
| None -> merge_headers headers env_headers_logs
in
(* Get per-signal timeouts from env vars with fallback to general timeout *)
let timeout_traces_ms =
match timeout_traces_ms with
| Some t -> t
| None ->
get_timeout_from_env "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT" timeout_ms
in
let timeout_metrics_ms =
match timeout_metrics_ms with
| Some t -> t
| None ->
get_timeout_from_env "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT" timeout_ms
in
let timeout_logs_ms =
match timeout_logs_ms with
| Some t -> t
| None ->
get_timeout_from_env "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT" timeout_ms
in
k
{
debug;
log_level;
sdk_disabled;
url_traces;
url_metrics;
url_logs;
headers;
headers_traces;
headers_metrics;
headers_logs;
protocol;
timeout_ms;
timeout_traces_ms;
timeout_metrics_ms;
timeout_logs_ms;
traces;
metrics;
logs;
self_trace;
self_metrics;
http_concurrency_level;
retry_max_attempts;
retry_initial_delay_ms;
retry_max_delay_ms;
retry_backoff_multiplier;
_rest = ();
}
end

View file

@ -1,200 +0,0 @@
(** Constructing and managing the configuration common to many (most?)
HTTP-based clients.
This is extended and reused by concrete client implementations that exports
signals over HTTP, depending on their needs. *)
type protocol =
| Http_protobuf
| Http_json
type log_level = Opentelemetry.Self_debug.level option
(** [None] disables internal diagnostic logging; [Some level] enables it at that
level and above. Maps to [OTEL_LOG_LEVEL] env var. *)
type rest
(** opaque type to force using {!make} while allowing record updates *)
type t = {
debug: bool; [@alert deprecated "Use log_level instead"]
(** @deprecated Use {!log_level} instead. Debug the client itself? *)
log_level: log_level;
(** Log level for internal diagnostics. Read from OTEL_LOG_LEVEL or falls
back to OTEL_OCAML_DEBUG for compatibility. *)
sdk_disabled: bool;
(** If true, the SDK is completely disabled and no-ops. Read from
OTEL_SDK_DISABLED. Default false. *)
url_traces: string; (** Url to send traces/spans *)
url_metrics: string; (** Url to send metrics*)
url_logs: string; (** Url to send logs *)
headers: (string * string) list;
(** Global API headers sent to all endpoints. Default is none or
"OTEL_EXPORTER_OTLP_HEADERS" if set. Signal-specific headers can
override these. *)
headers_traces: (string * string) list;
(** Headers for traces endpoint. Merges OTEL_EXPORTER_OTLP_HEADERS with
OTEL_EXPORTER_OTLP_TRACES_HEADERS (signal-specific takes precedence).
*)
headers_metrics: (string * string) list;
(** Headers for metrics endpoint. Merges OTEL_EXPORTER_OTLP_HEADERS with
OTEL_EXPORTER_OTLP_METRICS_HEADERS (signal-specific takes precedence).
*)
headers_logs: (string * string) list;
(** Headers for logs endpoint. Merges OTEL_EXPORTER_OTLP_HEADERS with
OTEL_EXPORTER_OTLP_LOGS_HEADERS (signal-specific takes precedence). *)
protocol: protocol;
(** Wire protocol to use. Read from OTEL_EXPORTER_OTLP_PROTOCOL. Default
Http_protobuf. *)
timeout_ms: int;
(** General timeout in milliseconds for exporter operations. Read from
OTEL_EXPORTER_OTLP_TIMEOUT. Default 10_000. *)
timeout_traces_ms: int;
(** Timeout for trace exports. Read from
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, falls back to timeout_ms. *)
timeout_metrics_ms: int;
(** Timeout for metric exports. Read from
OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, falls back to timeout_ms. *)
timeout_logs_ms: int;
(** Timeout for log exports. Read from OTEL_EXPORTER_OTLP_LOGS_TIMEOUT,
falls back to timeout_ms. *)
traces: Opentelemetry.Provider_config.t;
(** Per-provider batching config for traces. Default: batch=400,
timeout=2s. The batch size is read from OTEL_BSP_MAX_EXPORT_BATCH_SIZE
if set. *)
metrics: Opentelemetry.Provider_config.t;
(** Per-provider batching config for metrics. Default: batch=200,
timeout=2s. The batch size is read from OTEL_METRIC_EXPORT_INTERVAL if
set. *)
logs: Opentelemetry.Provider_config.t;
(** Per-provider batching config for logs. Default: batch=400, timeout=2s.
*)
self_trace: bool;
(** If true, the OTEL library will perform some self-instrumentation.
Default [false].
@since 0.7 *)
self_metrics: bool;
(** If true, the OTEL library will regularly emit metrics about itself.
Default [false].
@since NEXT_RELEASE *)
http_concurrency_level: int option;
(** How many HTTP requests can be done simultaneously (at most)? This can
be used to represent the size of a pool of workers where each worker
gets a batch to send, send it, and repeats.
@since NEXT_RELEASE *)
retry_max_attempts: int;
(** Maximum number of retry attempts for failed exports. 0 means no retry,
1 means one retry after initial failure. Default 3. *)
retry_initial_delay_ms: float;
(** Initial delay in milliseconds before first retry. Default 100ms. *)
retry_max_delay_ms: float;
(** Maximum delay in milliseconds between retries. Default 5000ms. *)
retry_backoff_multiplier: float;
(** Multiplier for exponential backoff. Default 2.0. *)
_rest: rest;
}
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
the future. *)
val default_url : string
(** The default base URL for the config. *)
val pp : Format.formatter -> t -> unit
type 'k make =
?debug:bool ->
?log_level:log_level ->
?sdk_disabled:bool ->
?url:string ->
?url_traces:string ->
?url_metrics:string ->
?url_logs:string ->
?batch_traces:int ->
?batch_metrics:int ->
?batch_logs:int ->
?batch_timeout_ms:int ->
?traces:Opentelemetry.Provider_config.t ->
?metrics:Opentelemetry.Provider_config.t ->
?logs:Opentelemetry.Provider_config.t ->
?headers:(string * string) list ->
?headers_traces:(string * string) list ->
?headers_metrics:(string * string) list ->
?headers_logs:(string * string) list ->
?protocol:protocol ->
?timeout_ms:int ->
?timeout_traces_ms:int ->
?timeout_metrics_ms:int ->
?timeout_logs_ms:int ->
?self_trace:bool ->
?self_metrics:bool ->
?http_concurrency_level:int ->
?retry_max_attempts:int ->
?retry_initial_delay_ms:float ->
?retry_max_delay_ms:float ->
?retry_backoff_multiplier:float ->
'k
(** A function that gathers all the values needed to construct a {!t}, and
produces a ['k]. ['k] is typically a continuation used to construct a
configuration that includes a {!t}.
@param url
base url used to construct per-signal urls. Per-signal url options take
precedence over this base url. If not provided, this defaults to
"OTEL_EXPORTER_OTLP_ENDPOINT" if set, or if not {!default_url}.
Example of constructed per-signal urls with the base url
http://localhost:4318
- Traces: http://localhost:4318/v1/traces
- Metrics: http://localhost:4318/v1/metrics
- Logs: http://localhost:4318/v1/logs
Use per-signal url options if different urls are needed for each signal
type.
@param url_traces
url to send traces, or "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT" if set. The
url is used as-is without any modification.
@param url_metrics
url to send metrics, or "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT" if set. The
url is used as-is without any modification.
@param url_logs
url to send logs, or "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT" if set. The url is
used as-is without any modification. *)
(** Construct, inspect, and update {!t} configurations, drawing defaults from
the environment *)
module type ENV = sig
val make : (t -> 'a) -> 'a make
(** [make f] is a {!type:make} function that will give [f] a safely
constructed {!t}.
Typically this is used to extend the constructor for {!t} with new
optional arguments.
E.g., we can construct a configuration that includes a {!t} alongside a
more specific field like so:
{[
type extended_config = {
new_field: string;
common: t;
}
let make : (new_field:string -> unit -> extended_config) make =
Env.make (fun common ~new_field () -> { new_field; common })
let _example : extended_config =
make ~new_field:"foo" ~url_traces:"foo/bar" ~debug:true ()
]}
As a special case, we can get the simple constructor function for {!t}
with [Env.make (fun common () -> common)] *)
end
(** A generative functor that produces a state-space that can read configuration
values from the environment, provide stateful configuration setting and
accessing operations, and a way to make a new {!t} configuration record *)
module Env : functor () -> ENV

View file

@ -1,32 +0,0 @@
(** Basic debug exporter, prints signals on stdout/stderr/...
As the name says, it's not intended for production but as a quick way to
export signals and eyeball them. *)
open Common_
(** [debug ?out ()] is an exporter that pretty-prints signals on [out].
@param out the formatter into which to print, default [stderr]. *)
let debug ?(clock = OTEL.Clock.ptime_clock) ?(out = Format.err_formatter) () :
OTEL.Exporter.t =
ignore clock;
let open Proto in
{
OTEL.Exporter.export =
(fun sig_ ->
match sig_ with
| OTEL.Any_signal_l.Spans sp ->
List.iter (Format.fprintf out "SPAN: %a@." Trace.pp_span) sp
| OTEL.Any_signal_l.Metrics ms ->
List.iter (Format.fprintf out "METRIC: %a@." Metrics.pp_metric) ms
| OTEL.Any_signal_l.Logs logs ->
List.iter
(Format.fprintf out "LOG: %a@." Proto.Logs.pp_log_record)
logs);
active = (fun () -> Aswitch.dummy);
shutdown =
(fun () ->
Format.fprintf out "CLEANUP@.";
());
self_metrics = (fun () -> []);
}

View file

@ -1,60 +0,0 @@
(** Build an exporter from a queue and a consumer.
The exporter will send signals into the queue (possibly dropping them if the
queue is full), and the consumer is responsible for actually exporting the
signals it reads from the other end of the queue.
At shutdown time, the queue is closed for writing, but only once it's empty
will the consumer properly shutdown. *)
open Common_
module BQ = Bounded_queue
(** Pair a queue with a consumer to build an exporter.
The resulting exporter will emit logs, spans, and traces directly into the
bounded queue; while the consumer takes them from the queue to forward them
somewhere else, store them, etc.
@param resource_attributes attributes added to every "resource" batch *)
let create ~clock ~(q : OTEL.Any_signal_l.t Bounded_queue.t)
~(consumer : Consumer.any_signal_l_builder) () : OTEL.Exporter.t =
let shutdown_started = Atomic.make false in
let active, trigger = Aswitch.create () in
let consumer = consumer.start_consuming q.recv in
let self_metrics () : _ list =
let now = OTEL.Clock.now clock in
let m_size =
OTEL.Metrics.gauge ~name:"otel.sdk.exporter.queue.size"
[ OTEL.Metrics.int ~now (Bounded_queue.Recv.size q.recv) ]
and m_cap =
OTEL.Metrics.gauge ~name:"otel.sdk.exporter.queue.capacity"
[ OTEL.Metrics.int ~now (Bounded_queue.Recv.high_watermark q.recv) ]
and m_discarded =
OTEL.Metrics.sum ~is_monotonic:true
~name:"otel.sdk.exporter_queue.discarded"
[ OTEL.Metrics.int ~now (Bounded_queue.Recv.num_discarded q.recv) ]
in
m_size :: m_cap :: m_discarded :: Consumer.self_metrics consumer ~clock
in
let export (sig_ : OTEL.Any_signal_l.t) =
if Aswitch.is_on active then BQ.Send.push q.send [ sig_ ]
in
let shutdown () =
if Aswitch.is_on active && not (Atomic.exchange shutdown_started true) then (
(* first, prevent further pushes to the queue. Consumer workers
can still drain it. *)
Bounded_queue.Send.close q.send;
(* shutdown consumer; once it's down it'll turn our switch off too *)
Aswitch.link (Consumer.active consumer) trigger;
Consumer.shutdown consumer
)
in
(* if consumer shuts down for some reason, we also must *)
Aswitch.on_turn_off (Consumer.active consumer) shutdown;
{ OTEL.Exporter.export; active = (fun () -> active); self_metrics; shutdown }

View file

@ -1,63 +0,0 @@
(** A simple exporter that prints on stdout. *)
open Common_
open struct
let pp_span out (sp : OTEL.Span.t) =
let open OTEL in
Format.fprintf out
"@[<2>SPAN {@ trace_id: %a@ span_id: %a@ name: %S@ start: %a@ end: %a@ \
dur: %.6fs@]}"
Trace_id.pp
(Trace_id.of_bytes sp.trace_id)
Span_id.pp
(Span_id.of_bytes sp.span_id)
sp.name Timestamp_ns.pp_debug sp.start_time_unix_nano
Timestamp_ns.pp_debug sp.end_time_unix_nano
((Int64.to_float sp.end_time_unix_nano
-. Int64.to_float sp.start_time_unix_nano)
/. 1e9)
let pp_log out l =
Format.fprintf out "@[<2>LOG %a@]" Proto.Logs.pp_log_record l
let pp_metric out m =
Format.fprintf out "@[<2>METRICS %a@]" Proto.Metrics.pp_metric m
let pp_vlist mutex pp out l =
if l != [] then (
let@ () = Util_mutex.protect mutex in
Format.fprintf out "@[<v>";
List.iteri
(fun i x ->
if i > 0 then Format.fprintf out "@,";
pp out x)
l;
Format.fprintf out "@]@."
)
end
let stdout ?(clock = OTEL.Clock.ptime_clock) () : OTEL.Exporter.t =
let open Opentelemetry_util in
ignore clock;
let out = Format.std_formatter in
let mutex = Mutex.create () in
let export (sig_ : OTEL.Any_signal_l.t) =
match sig_ with
| OTEL.Any_signal_l.Spans sp -> pp_vlist mutex pp_span out sp
| OTEL.Any_signal_l.Logs logs -> pp_vlist mutex pp_log out logs
| OTEL.Any_signal_l.Metrics ms -> pp_vlist mutex pp_metric out ms
in
let shutdown () =
let@ () = Util_mutex.protect mutex in
Format.pp_print_flush out ()
in
{
OTEL.Exporter.export;
active = (fun () -> Aswitch.dummy);
shutdown;
self_metrics = (fun () -> []);
}

View file

@ -1,254 +0,0 @@
(** A consumer: pulls signals from a queue, sends them somewhere else *)
open Common_
type error = Export_error.t
(** Number of errors met during export *)
let n_errors = Atomic.make 0
module type IO = Generic_io.S_WITH_CONCURRENCY
(** Generic sender: where to send signals *)
module type SENDER = sig
module IO : IO
type t
(** Sender state *)
type config
val create : config:config -> unit -> t
val cleanup : t -> unit
(** Cleanup resources once we are done. The sender cannot be used anymore
after this is called on it *)
val send : t -> OTEL.Any_signal_l.t -> (unit, error) result IO.t
end
module Make
(IO : IO)
(Notifier : Generic_notifier.S with type 'a IO.t = 'a IO.t)
(Sender : SENDER with type 'a IO.t = 'a IO.t) : sig
val consumer :
sender_config:Sender.config ->
n_workers:int ->
ticker_task:float option ->
?on_tick:(unit -> unit) ->
unit ->
Consumer.any_signal_l_builder
(** Make a consumer builder, ie. a builder function that will take a bounded
queue of signals, and start a consumer to process these signals and send
them somewhere using HTTP. *)
end = struct
open IO
type config = {
n_workers: int;
ticker_task: float option;
on_tick: unit -> unit;
}
type status =
| Active
| Shutting_down
| Stopped
type state = {
active: Aswitch.t; (** Public facing switch *)
q: OTEL.Any_signal_l.t Bounded_queue.Recv.t;
status: status Atomic.t;
(** Internal status, including the shutting down process *)
notify: Notifier.t;
n_workers: int Atomic.t; (** Current number of workers *)
active_trigger: Aswitch.trigger;
config: config;
sender_config: Sender.config;
m_spans: int Atomic.t;
m_logs: int Atomic.t;
}
let shutdown self : unit =
let old_status =
Util_atomic.update_cas self.status @@ fun status ->
match status with
| Stopped -> status, status
| Shutting_down -> status, status
| Active -> status, Shutting_down
in
match old_status with
| Stopped -> ()
| Shutting_down ->
(* last worker to stop will call [on_done] *)
()
| Active ->
(* notify potentially asleep workers *)
Notifier.trigger self.notify;
Notifier.delete self.notify
let tick (self : state) =
if Aswitch.is_on self.active then Notifier.trigger self.notify
(** Shutdown one worker, when the queue is closed *)
let shutdown_worker (self : state) : unit =
if Atomic.fetch_and_add self.n_workers (-1) = 1 then (
(* we were the last worker, we can shut down the whole consumer *)
Atomic.set self.status Stopped;
Aswitch.turn_off self.active_trigger;
(* sanity check about the queue, which should be drained *)
let size_q = Bounded_queue.Recv.size self.q in
if size_q > 0 then
OTEL.Self_debug.log OTEL.Self_debug.Warning (fun () ->
Printf.sprintf
"otel: warning: workers exited but work queue still contains %d \
elements"
size_q)
)
let send_signals (self : state) (sender : Sender.t) ~backoff
(sigs : OTEL.Any_signal_l.t) : unit IO.t =
(match sigs with
| Spans l ->
ignore (Atomic.fetch_and_add self.m_spans (List.length l) : int)
| Logs l -> ignore (Atomic.fetch_and_add self.m_logs (List.length l) : int)
| Metrics _l -> ());
let* r = Sender.send sender sigs in
match r with
| Ok () ->
Util_net_backoff.on_success backoff;
IO.return ()
| Error `Sysbreak ->
OTEL.Self_debug.log OTEL.Self_debug.Info (fun () ->
"ctrl-c captured, stopping");
shutdown self;
IO.return ()
| Error err ->
Atomic.incr n_errors;
Export_error.report_err ~level:`Auto err;
(* avoid crazy error loop *)
let dur_s = Util_net_backoff.on_error backoff in
IO.sleep_s (dur_s +. Random.float (dur_s /. 10.))
let start_worker (self : state) : unit =
let sender = Sender.create ~config:self.sender_config () in
let backoff = Util_net_backoff.create () in
OTEL.Self_debug.log OTEL.Self_debug.Debug (fun () -> "otel worker started");
(* loop on [q] *)
let rec loop () : unit IO.t =
(* first look at the queue, to drain it *)
match Bounded_queue.Recv.try_pop self.q with
| `Closed ->
(* this worker shuts down, others might still be busy *)
shutdown_worker self;
IO.return ()
| `Item sigs ->
let* () = send_signals ~backoff self sender sigs in
loop ()
| `Empty ->
(* Printf.eprintf "worker %d: empty queue\n%!" tid; *)
(match Atomic.get self.status with
| Stopped ->
assert false
(* shouldn't happen without us going through [Shutting_down] *)
| Shutting_down ->
shutdown_worker self;
IO.return ()
| Active ->
let* () =
Notifier.wait self.notify ~should_keep_waiting:(fun () ->
Bounded_queue.Recv.size self.q = 0
&& Atomic.get self.status = Active)
in
loop ())
in
IO.spawn (fun () ->
IO.protect loop ~finally:(fun () ->
Sender.cleanup sender;
IO.return ()))
let start_ticker (self : state) ~(interval_s : float) : unit =
let rec loop () : unit IO.t =
match Atomic.get self.status with
| Stopped | Shutting_down -> IO.return ()
| Active ->
let* () = IO.sleep_s interval_s in
if Aswitch.is_on self.active then (
tick self;
self.config.on_tick ()
);
loop ()
in
IO.spawn loop
let create_state ~sender_config ~n_workers ~ticker_task ~on_tick ~q () : state
=
let active, active_trigger = Aswitch.create () in
let config = { n_workers; ticker_task; on_tick } in
let self =
{
active;
active_trigger;
status = Atomic.make Active;
n_workers = Atomic.make 0;
q;
notify = Notifier.create ();
config;
sender_config;
m_spans = Atomic.make 0;
m_logs = Atomic.make 0;
}
in
(* start workers *)
let n_workers = max 2 (min 500 self.config.n_workers) in
ignore (Atomic.fetch_and_add self.n_workers n_workers : int);
for _i = 1 to n_workers do
start_worker self
done;
Notifier.register_bounded_queue self.notify q;
(* start ticker *)
(match self.config.ticker_task with
| None -> ()
| Some interval_s -> start_ticker self ~interval_s);
self
let self_metrics ~clock (self : state) : OTEL.Metrics.t list =
let open OTEL.Metrics in
let now = OTEL.Clock.now clock in
let attrs = [ "otel.component.name", `String "otel_ocaml" ] in
[
sum ~name:"otel.sdk.exporter.errors" ~is_monotonic:true
[ int ~now (Atomic.get n_errors) ~attrs ];
sum ~name:"otel.sdk.exporter.span.exported" ~is_monotonic:true
[ int ~now (Atomic.get self.m_spans) ~attrs ];
sum ~name:"otel.sdk.exporter.log.exported" ~is_monotonic:true
[ int ~now (Atomic.get self.m_logs) ~attrs ];
]
let to_consumer (self : state) : Consumer.t =
let shutdown () = shutdown self in
let tick () = tick self in
let self_metrics ~clock () = self_metrics self ~clock in
{ active = (fun () -> self.active); tick; shutdown; self_metrics }
let consumer ~sender_config ~n_workers ~ticker_task ?(on_tick = ignore) () :
Consumer.any_signal_l_builder =
{
start_consuming =
(fun q ->
let st =
create_state ~sender_config ~n_workers ~ticker_task ~on_tick ~q ()
in
to_consumer st);
}
end

View file

@ -1,131 +0,0 @@
(** A consumer that just calls another exporter.
This is useful to introduce queueing behavior using {!Exporter_queued}, but
simply forwarding to another (presumably non-queue) exporter.
It is generic because we need some sort of threading/concurrency to run the
consumer. *)
open Common_
module type IO = Generic_io.S_WITH_CONCURRENCY
module Make
(IO : IO)
(Notifier : Generic_notifier.S with type 'a IO.t = 'a IO.t) : sig
val consumer : OTEL.Exporter.t -> OTEL.Any_signal_l.t Consumer.Builder.t
end = struct
open IO
type status =
| Active
| Shutting_down
| Stopped
type state = {
active: Aswitch.t; (** Public facing switch *)
active_trigger: Aswitch.trigger;
status: status Atomic.t; (** Internal state, including shutdown *)
q: OTEL.Any_signal_l.t Bounded_queue.Recv.t;
notify: Notifier.t;
exp: OTEL.Exporter.t;
}
let shutdown self : unit =
let old_status =
Util_atomic.update_cas self.status @@ fun status ->
match status with
| Stopped -> status, status
| Shutting_down -> status, status
| Active -> status, Shutting_down
in
match old_status with
| Stopped -> ()
| Shutting_down ->
(* when the worker stops it will call [on_done] *)
()
| Active ->
(* notify potentially asleep workers *)
Notifier.trigger self.notify;
Notifier.delete self.notify
let tick (self : state) = Notifier.trigger self.notify
(** Shutdown worker *)
let shutdown_worker (self : state) : unit =
(* only one worker, so, turn off exporter *)
OTEL.Exporter.shutdown self.exp;
(* and we are shut down! *)
Atomic.set self.status Stopped;
Aswitch.turn_off self.active_trigger
let start_worker (self : state) : unit =
(* loop on [q] *)
let rec loop () : unit IO.t =
match Bounded_queue.Recv.try_pop self.q with
| `Closed ->
shutdown_worker self;
IO.return ()
| `Item sig_ ->
self.exp.OTEL.Exporter.export sig_;
loop ()
| `Empty ->
(match Atomic.get self.status with
| Stopped ->
assert false
(* shouldn't happen without us going through [Shutting_down] *)
| Shutting_down ->
shutdown_worker self;
IO.return ()
| Active ->
let* () =
Notifier.wait self.notify ~should_keep_waiting:(fun () ->
Bounded_queue.Recv.size self.q = 0
&& Atomic.get self.status = Active)
in
loop ())
in
IO.spawn loop
let create_state ~q ~exporter () : state =
let active, active_trigger = Aswitch.create () in
let self =
{
active;
active_trigger;
status = Atomic.make Active;
q;
exp = exporter;
notify = Notifier.create ();
}
in
start_worker self;
self
let self_metrics (self : state) ~clock : OTEL.Metrics.t list =
let open OTEL.Metrics in
let now = OTEL.Clock.now clock in
[
sum ~name:"otel_ocaml.export.batches_discarded_by_bounded_queue"
~is_monotonic:true
[ int ~now (Bounded_queue.Recv.num_discarded self.q) ];
]
let to_consumer (self : state) : Consumer.t =
let shutdown () = shutdown self in
let tick () = tick self in
let self_metrics ~clock () = self_metrics self ~clock in
{ active = (fun () -> self.active); tick; shutdown; self_metrics }
let consumer exporter : _ Consumer.Builder.t =
{
start_consuming =
(fun q ->
let st = create_state ~q ~exporter () in
to_consumer st);
}
end

View file

@ -1,152 +0,0 @@
open Common_
type error = Export_error.t
module type IO = Generic_io.S_WITH_CONCURRENCY
module type HTTPC = sig
module IO : IO
type t
val create : unit -> t
val cleanup : t -> unit
val send :
t ->
attempt_descr:string ->
url:string ->
headers:(string * string) list ->
decode:[ `Dec of Pbrt.Decoder.t -> 'a | `Ret of 'a ] ->
string ->
('a, error) result IO.t
(** Send a HTTP request.
@param attempt_descr included in error message if this fails *)
end
module Make
(IO : IO)
(Notifier : Generic_notifier.S with type 'a IO.t = 'a IO.t)
(Httpc : HTTPC with type 'a IO.t = 'a IO.t) : sig
val consumer :
?override_n_workers:int ->
ticker_task:float option ->
?on_tick:(unit -> unit) ->
config:Http_config.t ->
unit ->
Consumer.any_signal_l_builder
(** Make a consumer builder, ie. a builder function that will take a bounded
queue of signals, and start a consumer to process these signals and send
them somewhere using HTTP.
@param ticker_task
controls whether we start a task to call [tick] at the given interval in
seconds, or [None] to not start such a task at all. *)
end = struct
module Sender :
Generic_consumer.SENDER with module IO = IO and type config = Http_config.t =
struct
module IO = IO
type config = Http_config.t
type t = {
config: config;
encoder: Pbrt.Encoder.t;
http: Httpc.t;
}
let create ~config () : t =
{ config; http = Httpc.create (); encoder = Pbrt.Encoder.create () }
let cleanup self = Httpc.cleanup self.http
(** Should we retry, based on the HTTP response code? *)
let should_retry = function
| `Failure _ -> true (* Network errors, connection issues *)
| `Status (code, _, _) ->
(* Retry on server errors, rate limits, timeouts *)
code >= 500 || code = 429 || code = 408
| `Sysbreak -> false (* User interrupt, don't retry *)
(** Retry loop over [f()] with exponential backoff *)
let rec retry_loop_ (self : t) attempt delay_ms
~(f : attempt_descr:string -> unit -> _ result IO.t) : _ result IO.t =
let open IO in
let attempt_descr =
spf "try(%d/%d)" attempt self.config.retry_max_attempts
in
let* result = f ~attempt_descr () in
match result with
| Ok x -> return (Ok x)
| Error err
when should_retry err && attempt < self.config.retry_max_attempts ->
let delay_s = delay_ms /. 1000. in
Export_error.report_err ~level:`Debug err;
let* () = sleep_s delay_s in
let next_delay =
min self.config.retry_max_delay_ms
(delay_ms *. self.config.retry_backoff_multiplier)
in
retry_loop_ self (attempt + 1) next_delay ~f
| Error _ as err -> return err
let send (self : t) (sigs : OTEL.Any_signal_l.t) : (unit, error) result IO.t
=
let res = Resource_signal.of_signal_l sigs in
let url, signal_headers =
match res with
| Logs _ -> self.config.url_logs, self.config.headers_logs
| Traces _ -> self.config.url_traces, self.config.headers_traces
| Metrics _ -> self.config.url_metrics, self.config.headers_metrics
in
(* Merge general headers with signal-specific ones (signal-specific takes precedence) *)
let signal_keys = List.map fst signal_headers in
let filtered_general =
List.filter
(fun (k, _) -> not (List.mem k signal_keys))
self.config.headers
in
let content_type =
match self.config.protocol with
| Http_protobuf -> "application/x-protobuf"
| Http_json -> "application/json"
in
let headers =
("Content-Type", content_type)
:: ("Accept", content_type)
:: List.rev_append signal_headers filtered_general
in
let data =
Resource_signal.Encode.any ~encoder:self.encoder
~protocol:self.config.protocol res
in
let do_once ~attempt_descr () =
Httpc.send self.http ~attempt_descr ~url ~headers ~decode:(`Ret ()) data
in
if self.config.retry_max_attempts > 0 then
retry_loop_ self 0 self.config.retry_initial_delay_ms ~f:do_once
else
do_once ~attempt_descr:"single_attempt" ()
end
module C = Generic_consumer.Make (IO) (Notifier) (Sender)
let default_n_workers = 50
let consumer ?override_n_workers ~ticker_task ?(on_tick = ignore)
~(config : Http_config.t) () : Consumer.any_signal_l_builder =
let n_workers =
max 2
(min 500
(match override_n_workers, config.http_concurrency_level with
| Some n, _ -> n
| None, Some n -> n
| None, None -> default_n_workers))
in
C.consumer ~sender_config:config ~n_workers ~ticker_task ~on_tick ()
end

View file

@ -1,31 +0,0 @@
(** Generic IO monad.
This factors out some logic between various concurrency frameworks. *)
module type S = sig
type 'a t
val return : 'a -> 'a t
val ( let* ) : 'a t -> ('a -> 'b t) -> 'b t
val protect : finally:(unit -> unit t) -> (unit -> 'a t) -> 'a t
end
module type S_WITH_CONCURRENCY = sig
include S
val sleep_s : float -> unit t
val spawn : (unit -> unit t) -> unit
end
module Direct_style : S with type 'a t = 'a = struct
type 'a t = 'a
let[@inline] return x = x
let[@inline] ( let* ) x f = f x
let protect = Fun.protect
end

View file

@ -1,19 +0,0 @@
(** Generic notifier (used to signal when a bounded queue is empty) *)
module type IO = Generic_io.S
module type S = sig
module IO : IO
type t
val create : unit -> t
val delete : t -> unit
val trigger : t -> unit
val wait : t -> should_keep_waiting:(unit -> bool) -> unit IO.t
val register_bounded_queue : t -> _ Bounded_queue.Recv.t -> unit
end

View file

@ -1,3 +0,0 @@
(** @deprecated Use {!Exporter_config} instead *)
include Exporter_config

View file

@ -1,5 +0,0 @@
(** @deprecated Use {!Exporter_config} instead *)
[@@@deprecated "use Exporter_config instead"]
include module type of Exporter_config

View file

@ -1 +0,0 @@
module OTEL = Opentelemetry

View file

@ -1,27 +0,0 @@
(library
(name opentelemetry_client_lwt)
(public_name opentelemetry-client.lwt)
(flags
:standard
-open
Opentelemetry_util
-open
Opentelemetry_client
-open
Opentelemetry_atomic)
(optional) ; lwt
(libraries
opentelemetry.util
opentelemetry.atomic
opentelemetry.emitter
(re_export opentelemetry.core)
(re_export opentelemetry)
(re_export opentelemetry.ambient-context)
ambient-context-lwt
(re_export opentelemetry-client)
(re_export lwt)
threads
mtime
mtime.clock.os
lwt.unix)
(synopsis "Lwt-specific helpers for opentelemetry-client"))

View file

@ -1,11 +0,0 @@
type 'a t = 'a Lwt.t
let return = Lwt.return
let ( let* ) = Lwt.Syntax.( let* )
let sleep_s = Lwt_unix.sleep
let spawn = Lwt.async
let[@inline] protect ~finally f = Lwt.finalize f finally

View file

@ -1 +0,0 @@
include Generic_io.S_WITH_CONCURRENCY with type 'a t = 'a Lwt.t

View file

@ -1,49 +0,0 @@
(** Notification that can be used on the consumer side of a bounded queue *)
module IO = Io_lwt
type t = {
notified: bool Atomic.t;
cond: unit Lwt_condition.t;
notification: int;
lwt_tid: int; (** thread ID where lwt runs *)
deleted: bool Atomic.t;
}
let create () : t =
let notified = Atomic.make false in
let cond = Lwt_condition.create () in
let notification =
Lwt_unix.make_notification (fun () ->
Atomic.set notified false;
Lwt_condition.broadcast cond ())
in
let lwt_tid = Thread.id @@ Thread.self () in
{ notified; notification; cond; lwt_tid; deleted = Atomic.make false }
let delete self : unit =
if not (Atomic.exchange self.deleted true) then
Lwt_unix.stop_notification self.notification
let trigger (self : t) : unit =
let tid = Thread.id @@ Thread.self () in
if tid = self.lwt_tid then
(* in lwt thread, directly use the condition *)
Lwt_condition.broadcast self.cond ()
else if not (Atomic.exchange self.notified true) then
Lwt_unix.send_notification self.notification
let wait (self : t) ~should_keep_waiting : unit Lwt.t =
let open Lwt.Syntax in
let rec loop () =
if should_keep_waiting () then
let* () = Lwt_condition.wait self.cond in
loop ()
else
Lwt.return_unit
in
loop ()
let register_bounded_queue (self : t) (q : _ Bounded_queue.Recv.t) : unit =
Bounded_queue.Recv.on_non_empty q (fun () -> trigger self)

View file

@ -1 +0,0 @@
include Generic_notifier.S with module IO = Io_lwt

View file

@ -1,3 +0,0 @@
(** Setup Lwt as the ambient context *)
let setup_ambient_context () =
Opentelemetry_ambient_context.set_current_storage Ambient_context_lwt.storage

View file

@ -1,17 +0,0 @@
open Lwt.Syntax
(** Lwt task that calls [tick()] regularly, to help enforce timeouts.
@param frequency_s how often in seconds does the tick tock? *)
let start_ticker_thread ?(finally = ignore) ~(stop : bool Atomic.t)
~(frequency_s : float) ~(tick : unit -> unit) () : unit =
let frequency_s = max frequency_s 0.5 in
let rec tick_loop () =
if Atomic.get stop then (
finally ();
Lwt.return ()
) else
let* () = Lwt_unix.sleep frequency_s in
tick ();
tick_loop ()
in
Lwt.async tick_loop

View file

@ -1,204 +0,0 @@
open Common_
module Trace_service = Opentelemetry.Proto.Trace_service
module Metrics_service = Opentelemetry.Proto.Metrics_service
module Logs_service = Opentelemetry.Proto.Logs_service
module Span = Opentelemetry.Span
open struct
let of_x_or_empty ?service_name ?attrs ~f l =
if l = [] then
[]
else
[ f ?service_name ?attrs l ]
end
type t =
| Traces of Proto.Trace.resource_spans list
| Metrics of Proto.Metrics.resource_metrics list
| Logs of Proto.Logs.resource_logs list
let of_logs ?service_name ?attrs logs : t =
Logs [ Util_resources.make_resource_logs ?service_name ?attrs logs ]
let of_logs_or_empty ?service_name ?attrs logs =
of_x_or_empty ?service_name ?attrs ~f:of_logs logs
let of_spans ?service_name ?attrs spans : t =
Traces [ Util_resources.make_resource_spans ?service_name ?attrs spans ]
let of_spans_or_empty ?service_name ?attrs spans =
of_x_or_empty ?service_name ?attrs ~f:of_spans spans
let of_metrics ?service_name ?attrs m : t =
Metrics [ Util_resources.make_resource_metrics ?service_name ?attrs m ]
let of_metrics_or_empty ?service_name ?attrs ms =
of_x_or_empty ?service_name ?attrs ~f:of_metrics ms
let to_traces = function
| Traces xs -> Some xs
| _ -> None
let to_metrics = function
| Metrics xs -> Some xs
| _ -> None
let to_logs = function
| Logs xs -> Some xs
| _ -> None
let is_traces = function
| Traces _ -> true
| _ -> false
let is_metrics = function
| Metrics _ -> true
| _ -> false
let is_logs = function
| Logs _ -> true
| _ -> false
let of_signal_l ?service_name ?attrs (s : OTEL.Any_signal_l.t) : t =
match s with
| Logs logs -> of_logs ?service_name ?attrs logs
| Spans sp -> of_spans ?service_name ?attrs sp
| Metrics ms -> of_metrics ?service_name ?attrs ms
type protocol = Exporter_config.protocol =
| Http_protobuf
| Http_json
module Encode = struct
let resource_to_pb_string ~encoder ~ctor ~enc resource : string =
let encoder =
match encoder with
| Some e ->
Pbrt.Encoder.reset e;
e
| None -> Pbrt.Encoder.create ()
in
let x = ctor resource in
let data =
let@ _sc =
Self_trace.with_ ~kind:Span.Span_kind_internal "encode-proto"
in
enc x encoder;
let data = Pbrt.Encoder.to_string encoder in
Span.add_attrs _sc [ "size", `Int (String.length data) ];
Pbrt.Encoder.reset encoder;
data
in
data
let resource_to_json_string ~ctor ~enc resource : string =
let x = ctor resource in
let data =
let@ _sc = Self_trace.with_ ~kind:Span.Span_kind_internal "encode-json" in
let json = enc x in
let data = Yojson.Basic.to_string json in
Span.add_attrs _sc [ "size", `Int (String.length data) ];
data
in
data
let logs_pb ?encoder resource_logs =
resource_to_pb_string ~encoder resource_logs
~ctor:(fun r ->
Logs_service.make_export_logs_service_request ~resource_logs:r ())
~enc:Logs_service.encode_pb_export_logs_service_request
let logs_json resource_logs =
resource_to_json_string resource_logs
~ctor:(fun r ->
Logs_service.make_export_logs_service_request ~resource_logs:r ())
~enc:Logs_service.encode_json_export_logs_service_request
let metrics_pb ?encoder resource_metrics =
resource_to_pb_string ~encoder resource_metrics
~ctor:(fun r ->
Metrics_service.make_export_metrics_service_request ~resource_metrics:r
())
~enc:Metrics_service.encode_pb_export_metrics_service_request
let metrics_json resource_metrics =
resource_to_json_string resource_metrics
~ctor:(fun r ->
Metrics_service.make_export_metrics_service_request ~resource_metrics:r
())
~enc:Metrics_service.encode_json_export_metrics_service_request
let traces_pb ?encoder resource_spans =
resource_to_pb_string ~encoder resource_spans
~ctor:(fun r ->
Trace_service.make_export_trace_service_request ~resource_spans:r ())
~enc:Trace_service.encode_pb_export_trace_service_request
let traces_json resource_spans =
resource_to_json_string resource_spans
~ctor:(fun r ->
Trace_service.make_export_trace_service_request ~resource_spans:r ())
~enc:Trace_service.encode_json_export_trace_service_request
let logs ?encoder ?(protocol = Http_protobuf) resource_logs =
match protocol with
| Http_protobuf -> logs_pb ?encoder resource_logs
| Http_json -> logs_json resource_logs
let metrics ?encoder ?(protocol = Http_protobuf) resource_metrics =
match protocol with
| Http_protobuf -> metrics_pb ?encoder resource_metrics
| Http_json -> metrics_json resource_metrics
let traces ?encoder ?(protocol = Http_protobuf) resource_spans =
match protocol with
| Http_protobuf -> traces_pb ?encoder resource_spans
| Http_json -> traces_json resource_spans
let any ?encoder ?(protocol = Http_protobuf) (r : t) : string =
match r with
| Logs l -> logs ?encoder ~protocol l
| Traces sp -> traces ?encoder ~protocol sp
| Metrics ms -> metrics ?encoder ~protocol ms
end
module Decode = struct
let resource_of_string ~dec s = Pbrt.Decoder.of_string s |> dec
let logs data =
(resource_of_string ~dec:Logs_service.decode_pb_export_logs_service_request
data)
.resource_logs
let metrics data =
(resource_of_string
~dec:Metrics_service.decode_pb_export_metrics_service_request data)
.resource_metrics
let traces data =
(resource_of_string
~dec:Trace_service.decode_pb_export_trace_service_request data)
.resource_spans
end
module Pp = struct
let pp_sep fmt () = Format.fprintf fmt ",@."
let pp_signal pp fmt t =
Format.fprintf fmt "[@ @[";
Format.pp_print_list ~pp_sep pp fmt t;
Format.fprintf fmt "@ ]@]@."
let logs = pp_signal Proto.Logs.pp_resource_logs
let metrics = pp_signal Proto.Metrics.pp_resource_metrics
let traces = pp_signal Proto.Trace.pp_resource_spans
let pp fmt = function
| Logs ls -> logs fmt ls
| Metrics ms -> metrics fmt ms
| Traces ts -> traces fmt ts
end
let pp = Pp.pp

View file

@ -1,144 +0,0 @@
(** Constructing and managing OTel
{{:https://opentelemetry.io/docs/concepts/signals/} signals} at the resource
(batch) level *)
open Common_
(** The type of signals
This is not the principle type of signals from the perspective of what gets
encoded and sent via protocl buffers, but it is the principle type that
collector clients needs to reason about. *)
type t =
| Traces of Opentelemetry_proto.Trace.resource_spans list
| Metrics of Opentelemetry_proto.Metrics.resource_metrics list
| Logs of Opentelemetry_proto.Logs.resource_logs list
val pp : Format.formatter -> t -> unit
val of_logs :
?service_name:string ->
?attrs:OTEL.Key_value.t list ->
Proto.Logs.log_record list ->
t
val of_logs_or_empty :
?service_name:string ->
?attrs:OTEL.Key_value.t list ->
Proto.Logs.log_record list ->
t list
val of_spans :
?service_name:string -> ?attrs:OTEL.Key_value.t list -> OTEL.Span.t list -> t
val of_spans_or_empty :
?service_name:string ->
?attrs:OTEL.Key_value.t list ->
OTEL.Span.t list ->
t list
val of_metrics :
?service_name:string ->
?attrs:OTEL.Key_value.t list ->
Proto.Metrics.metric list ->
t
val of_metrics_or_empty :
?service_name:string ->
?attrs:OTEL.Key_value.t list ->
Proto.Metrics.metric list ->
t list
val of_signal_l :
?service_name:string ->
?attrs:OTEL.Key_value.t list ->
OTEL.Any_signal_l.t ->
t
val to_traces : t -> Opentelemetry_proto.Trace.resource_spans list option
val to_metrics : t -> Opentelemetry_proto.Metrics.resource_metrics list option
val to_logs : t -> Opentelemetry_proto.Logs.resource_logs list option
val is_traces : t -> bool
val is_metrics : t -> bool
val is_logs : t -> bool
type protocol = Exporter_config.protocol =
| Http_protobuf
| Http_json
(** Encode signals to protobuf or JSON encoded strings, ready to be sent over
the wire *)
module Encode : sig
val logs :
?encoder:Pbrt.Encoder.t ->
?protocol:protocol ->
Opentelemetry_proto.Logs.resource_logs list ->
string
(** [logs ls] is an encoded string of the logs [ls].
@param encoder provide an encoder state to reuse (protobuf only)
@param protocol encoding protocol to use (default: Http_protobuf) *)
val metrics :
?encoder:Pbrt.Encoder.t ->
?protocol:protocol ->
Opentelemetry_proto.Metrics.resource_metrics list ->
string
(** [metrics ms] is an encoded string of the metrics [ms].
@param encoder provide an encoder state to reuse (protobuf only)
@param protocol encoding protocol to use (default: Http_protobuf) *)
val traces :
?encoder:Pbrt.Encoder.t ->
?protocol:protocol ->
Opentelemetry_proto.Trace.resource_spans list ->
string
(** [traces ts] is an encoded string of the traces [ts].
@param encoder provide an encoder state to reuse (protobuf only)
@param protocol encoding protocol to use (default: Http_protobuf) *)
val any : ?encoder:Pbrt.Encoder.t -> ?protocol:protocol -> t -> string
end
(** Decode signals from protobuf encoded strings, received over the wire *)
module Decode : sig
val logs : string -> Opentelemetry_proto.Logs.resource_logs list
(** [logs s] is a list of log resources decoded from the protobuf encoded
string [s].
@raise Pbrt.Decoder.Failure if [s] is not a valid protobuf encoding. *)
val metrics : string -> Opentelemetry_proto.Metrics.resource_metrics list
(** [metrics s] is a list of metrics resources decoded from the protobuf
encoded string [s].
@raise Pbrt.Decoder.Failure if [s] is not a valid protobuf encoding. *)
val traces : string -> Opentelemetry_proto.Trace.resource_spans list
(** [traces s] is a list of span resources decoded from the protobuf encoded
string [s].
@raise Pbrt.Decoder.Failure if [s] is not a valid protobuf encoding. *)
end
module Pp : sig
val logs :
Format.formatter -> Opentelemetry_proto.Logs.resource_logs list -> unit
val metrics :
Format.formatter ->
Opentelemetry_proto.Metrics.resource_metrics list ->
unit
val traces :
Format.formatter -> Opentelemetry_proto.Trace.resource_spans list -> unit
val pp : Format.formatter -> t -> unit
end

View file

@ -1,39 +0,0 @@
type t = {
proba_accept: float;
rng: Random.State.t;
n_seen: int Atomic.t;
n_accepted: int Atomic.t;
}
let create ~proba_accept () : t =
if proba_accept < 0. || proba_accept > 1. then
invalid_arg "sampler: proba_accept must be in [0., 1.]";
{
proba_accept;
rng = Random.State.make_self_init ();
n_seen = Atomic.make 0;
n_accepted = Atomic.make 0;
}
let[@inline] proba_accept self = self.proba_accept
let actual_rate (self : t) : float =
let accept = Atomic.get self.n_accepted in
let total = Atomic.get self.n_seen in
if total = 0 then
1.
else
float accept /. float total
let accept (self : t) : bool =
Atomic.incr self.n_seen;
(* WARNING: Random.State.float is not safe to call concurrently on the
same state from multiple domains. If a sampler is shared across domains,
consider creating one sampler per domain. *)
let n = Random.State.float self.rng 1. in
let res = n < self.proba_accept in
if res then Atomic.incr self.n_accepted;
res

View file

@ -1,20 +0,0 @@
(** Basic random sampling. *)
type t
val create : proba_accept:float -> unit -> t
(** [create ~proba_accept:n ()] makes a new sampler.
The sampler will accept signals with probability [n] (must be between 0 and
1).
@raise Invalid_argument if [n] is not between 0 and 1. *)
val accept : t -> bool
(** Do we accept a sample? This returns [true] with probability [proba_accept].
*)
val proba_accept : t -> float
val actual_rate : t -> float
(** The ratio of signals we actually accepted so far. This should asymptotically
be equal to {!proba_accept} if the random generator is good. *)

View file

@ -1,13 +0,0 @@
open Common_
let enabled = Atomic.make false
let[@inline] add_event (scope : OTEL.Span.t) ev = OTEL.Span.add_event scope ev
let set_enabled b = Atomic.set enabled b
let with_ ?kind ?attrs name f =
if Atomic.get enabled then
OTEL.Tracer.with_ ~tracer:(OTEL.Trace_provider.get ()) ?kind ?attrs name f
else
f OTEL.Span.dummy

View file

@ -1,20 +0,0 @@
(** Mini tracing module for OTEL itself.
When enabled via {!set_enabled}, emits spans via the current
{!OTEL.Trace_provider}. Disabled by default. *)
open Common_
val add_event : OTEL.Span.t -> OTEL.Event.t -> unit
val with_ :
?kind:OTEL.Span_kind.t ->
?attrs:(string * OTEL.value) list ->
string ->
(OTEL.Span.t -> 'a) ->
'a
(** Instrument a section of SDK code with a span. No-ops when disabled. *)
val set_enabled : bool -> unit
(** Enable or disable self-tracing. When enabled, uses the current
{!OTEL.Trace_provider} to emit spans. *)

View file

@ -1,141 +0,0 @@
module BQ = Bounded_queue
type push_res =
| Closed
| Pushed of { num_discarded: int }
(* a variant of {!Sync_queue} with more bespoke pushing behavior *)
module Q : sig
type 'a t
val create : unit -> 'a t
val close : _ t -> unit
val size : _ t -> int
val closed : _ t -> bool
val try_pop : 'a t -> 'a BQ.pop_result
val push_while_not_full : high_watermark:int -> 'a t -> 'a list -> push_res
(** [push_while_not_full q ~high_watermark xs] tries to push each item of [x]
into [q].
An item is not pushed if the queue is "full" (size >= high_watermark).
This returns a pair [num_discarded, old_size] where [num_discarded] is the
number of items that could not be pushed, and [old_size] is the size
before anything was pushed. *)
end = struct
module UM = Util_mutex
type 'a t = {
mutex: Mutex.t;
q: 'a Queue.t;
mutable closed: bool;
}
let create () : _ t =
{ mutex = Mutex.create (); q = Queue.create (); closed = false }
(* NOTE: the race condition here is benign, assuming no tearing of
a value of type [bool] which OCaml's memory model should guarantee. *)
let[@inline] closed self = self.closed
let[@inline] size self = UM.protect self.mutex (fun () -> Queue.length self.q)
let close (self : _ t) =
UM.protect self.mutex @@ fun () ->
if not self.closed then self.closed <- true
let try_pop (self : 'a t) : 'a BQ.pop_result =
UM.protect self.mutex @@ fun () ->
(* first, try to pop the queue. We want to drain it even if it's closed. *)
try `Item (Queue.pop self.q)
with Queue.Empty ->
if self.closed then
`Closed
else
`Empty
let push_while_not_full ~high_watermark (self : 'a t) (xs : 'a list) :
push_res =
UM.protect self.mutex @@ fun () ->
if self.closed then
Closed
else (
let to_push = ref xs in
let continue = ref true in
while !continue && Queue.length self.q < high_watermark do
match !to_push with
| [] -> continue := false
| x :: tl_xs ->
to_push := tl_xs;
Queue.push x self.q
done;
let num_discarded = List.length !to_push in
(* Printf.eprintf "bq: pushed %d items (discarded: %d)\n%!" (List.length xs - num_discarded) num_discarded; *)
Pushed { num_discarded }
)
end
type 'a state = {
n_discarded: int Atomic.t;
high_watermark: int;
q: 'a Q.t;
on_non_empty: Cb_set.t;
}
let push (self : _ state) x =
if x <> [] then (
match
Q.push_while_not_full self.q ~high_watermark:self.high_watermark x
with
| Closed ->
ignore (Atomic.fetch_and_add self.n_discarded (List.length x) : int)
| Pushed { num_discarded } ->
if num_discarded > 0 then (
ignore (Atomic.fetch_and_add self.n_discarded num_discarded : int);
Opentelemetry.Self_debug.log Warning (fun () ->
Printf.sprintf "otel: dropped %d signals (exporter queue full)"
num_discarded)
);
(* wake up potentially asleep consumers *)
Cb_set.trigger self.on_non_empty
)
let[@inline] try_pop (self : _ state) : _ BQ.pop_result = Q.try_pop self.q
let to_bounded_queue (self : 'a state) : 'a BQ.t =
let closed () = Q.closed self.q in
let num_discarded () = Atomic.get self.n_discarded in
let push x = push self x in
let on_non_empty = Cb_set.register self.on_non_empty in
let try_pop () = try_pop self in
let size () = Q.size self.q in
let high_watermark () = self.high_watermark in
let close () =
Q.close self.q;
(* waiters will want to know *)
Cb_set.trigger self.on_non_empty
in
let common = { BQ.Common.closed; num_discarded; size; high_watermark } in
{
BQ.send = { push; close; common };
recv = { try_pop; on_non_empty; common };
}
let create ~high_watermark () : _ BQ.t =
let st =
{
high_watermark;
q = Q.create ();
n_discarded = Atomic.make 0;
on_non_empty = Cb_set.create ();
}
in
to_bounded_queue st

View file

@ -1,7 +0,0 @@
(** Bounded queue based on simple synchronization primitives.
This is not the fastest queue but it should be versatile. *)
val create : high_watermark:int -> unit -> 'a Bounded_queue.t
(** [create ~high_watermark ()] creates a new bounded queue based on
{!Sync_queue} *)

View file

@ -1 +0,0 @@
module OTEL = Opentelemetry

View file

@ -1,23 +0,0 @@
(library
(name opentelemetry_client_sync)
(public_name opentelemetry-client.sync)
(flags
:standard
-open
Opentelemetry_util
-open
Opentelemetry_client
-open
Opentelemetry_atomic)
(libraries
opentelemetry.util
opentelemetry.atomic
opentelemetry.emitter
(re_export opentelemetry.core)
(re_export opentelemetry)
(re_export opentelemetry-client)
(re_export threads)
mtime
mtime.clock.os
unix)
(synopsis "Synchronous/threading-related helpers for opentelemetry-client"))

View file

@ -1,5 +0,0 @@
include Generic_io.Direct_style
let sleep_s = Thread.delay
let[@inline] spawn f = ignore (Util_thread.start_bg_thread f : Thread.t)

View file

@ -1,4 +0,0 @@
(** Synchronous IOs, with threads for concurrency *)
include Generic_io.S_WITH_CONCURRENCY with type 'a t = 'a
(** Generic IO with [spawn] starting a background thread *)

Some files were not shown because too many files have changed in this diff Show more