Duplicate lwt client code

The port to eio will be evolved on top of this basis.
This commit is contained in:
Shon Feder 2025-05-16 11:34:44 -04:00
parent 885d0b6a75
commit 8d1cb1ac9f
No known key found for this signature in database
12 changed files with 1143 additions and 0 deletions

View file

@ -70,6 +70,25 @@
(tags (tags
(instrumentation tracing opentelemetry datadog lwt))) (instrumentation tracing opentelemetry datadog lwt)))
(package
; TODO
(name opentelemetry-eio)
(synopsis "Eio-compatible instrumentation for https://opentelemetry.io")
(depends
(ocaml
(>= "4.08"))
(opentelemetry
(= :version))
(cohttp-lwt-unix :with-test)
(odoc :with-doc)
(lwt
(>= "5.3"))
(lwt_ppx
(>= "2.0"))
(alcotest :with-test))
(tags
(instrumentation tracing opentelemetry datadog lwt)))
(package (package
(name opentelemetry-client-ocurl) (name opentelemetry-client-ocurl)
(depends (depends
@ -105,6 +124,26 @@
(alcotest :with-test)) (alcotest :with-test))
(synopsis "Opentelemetry tracing for Cohttp HTTP servers")) (synopsis "Opentelemetry tracing for Cohttp HTTP servers"))
(package
; TODO: Not sure this is needed.
(name opentelemetry-cohttp-eio)
(depends
(ocaml
(>= "4.08"))
(opentelemetry
(= :version))
(opentelemetry-eio
(= :version))
(odoc :with-doc)
(lwt
(>= "5.3"))
(cohttp-lwt
(and
(>= "4.0.0")
(< "6")))
(alcotest :with-test))
(synopsis "Opentelemetry tracing for Cohttp HTTP servers"))
(package (package
(name opentelemetry-client-cohttp-lwt) (name opentelemetry-client-cohttp-lwt)
(depends (depends
@ -124,3 +163,24 @@
cohttp-lwt-unix cohttp-lwt-unix
(alcotest :with-test)) (alcotest :with-test))
(synopsis "Collector client for opentelemetry, using cohttp + lwt")) (synopsis "Collector client for opentelemetry, using cohttp + lwt"))
(package
; TODO
(name opentelemetry-client-cohttp-eio)
(depends
(ocaml
(>= "4.08"))
(mtime
(>= "1.4"))
; for spans
(opentelemetry
(= :version))
(odoc :with-doc)
(lwt
(>= "5.3"))
(lwt_ppx
(>= "2.0"))
cohttp-lwt
cohttp-lwt-unix
(alcotest :with-test))
(synopsis "Collector client for opentelemetry, using cohttp + eio"))

View file

@ -0,0 +1,42 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.11.2"
synopsis: "Collector client for opentelemetry, using cohttp + eio"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"ocaml" {>= "4.08"}
"mtime" {>= "1.4"}
"opentelemetry" {= version}
"odoc" {with-doc}
"lwt" {>= "5.3"}
"lwt_ppx" {>= "2.0"}
"cohttp-lwt"
"cohttp-lwt-unix"
"alcotest" {with-test}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

View file

@ -0,0 +1,40 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.11.2"
synopsis: "Opentelemetry tracing for Cohttp HTTP servers"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"ocaml" {>= "4.08"}
"opentelemetry" {= version}
"opentelemetry-eio" {= version}
"odoc" {with-doc}
"lwt" {>= "5.3"}
"cohttp-lwt" {>= "4.0.0" & < "6"}
"alcotest" {with-test}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

41
opentelemetry-eio.opam Normal file
View file

@ -0,0 +1,41 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.11.2"
synopsis: "Eio-compatible instrumentation for https://opentelemetry.io"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
tags: ["instrumentation" "tracing" "opentelemetry" "datadog" "lwt"]
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"ocaml" {>= "4.08"}
"opentelemetry" {= version}
"cohttp-lwt-unix" {with-test}
"odoc" {with-doc}
"lwt" {>= "5.3"}
"lwt_ppx" {>= "2.0"}
"alcotest" {with-test}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

View file

@ -0,0 +1,59 @@
module Atomic = Opentelemetry_atomic.Atomic
let[@inline] ( let@ ) f x = f x
let spf = Printf.sprintf
let tid () = Thread.id @@ Thread.self ()
let debug_ =
ref
(match Sys.getenv_opt "OTEL_OCAML_DEBUG" with
| Some ("1" | "true") -> true
| _ -> false)
let default_url = "http://localhost:4318"
let make_get_from_env env_name =
let value = ref None in
fun () ->
match !value with
| None ->
value := Sys.getenv_opt env_name;
!value
| Some value -> Some value
let get_url_from_env = make_get_from_env "OTEL_EXPORTER_OTLP_ENDPOINT"
let get_url_traces_from_env =
make_get_from_env "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"
let get_url_metrics_from_env =
make_get_from_env "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"
let get_url_logs_from_env = make_get_from_env "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT"
let remove_trailing_slash url =
if url <> "" && String.get url (String.length url - 1) = '/' then
String.sub url 0 (String.length url - 1)
else
url
let parse_headers s =
let parse_header s =
match String.split_on_char '=' s with
| [ key; value ] -> key, value
| _ -> failwith "Unexpected format for header"
in
String.split_on_char ',' s |> List.map parse_header
let default_headers = []
let headers =
ref
(try parse_headers (Sys.getenv "OTEL_EXPORTER_OTLP_HEADERS")
with _ -> default_headers)
let get_headers () = !headers
let set_headers s = headers := s

View file

@ -0,0 +1,79 @@
open Common_
type t = {
debug: bool;
url_traces: string;
url_metrics: string;
url_logs: string;
headers: (string * string) list;
batch_traces: int option;
batch_metrics: int option;
batch_logs: int option;
batch_timeout_ms: int;
}
let pp out self : unit =
let ppiopt = Format.pp_print_option Format.pp_print_int in
let pp_header ppf (a, b) = Format.fprintf ppf "@[%s: @,%s@]@." a b in
let ppheaders = Format.pp_print_list pp_header in
let {
debug;
url_traces;
url_metrics;
url_logs;
headers;
batch_traces;
batch_metrics;
batch_logs;
batch_timeout_ms;
} =
self
in
Format.fprintf out
"{@[ debug=%B;@ url_traces=%S;@ url_metrics=%S;@ url_logs=%S;@ \
headers=%a;@ batch_traces=%a;@ batch_metrics=%a;@ batch_logs=%a;@ \
batch_timeout_ms=%d; @]}"
debug url_traces url_metrics url_logs ppheaders headers ppiopt batch_traces
ppiopt batch_metrics ppiopt batch_logs batch_timeout_ms
let make ?(debug = !debug_) ?url ?url_traces ?url_metrics ?url_logs
?(headers = get_headers ()) ?(batch_traces = Some 400)
?(batch_metrics = Some 20) ?(batch_logs = Some 400)
?(batch_timeout_ms = 500) () : t =
let url_traces, url_metrics, url_logs =
let base_url =
let base_url =
match get_url_from_env () with
| None -> Option.value url ~default:default_url
| Some url -> remove_trailing_slash url
in
remove_trailing_slash base_url
in
let url_traces =
match get_url_traces_from_env () with
| None -> Option.value url_traces ~default:(base_url ^ "/v1/traces")
| Some url -> url
in
let url_metrics =
match get_url_metrics_from_env () with
| None -> Option.value url_metrics ~default:(base_url ^ "/v1/metrics")
| Some url -> url
in
let url_logs =
match get_url_logs_from_env () with
| None -> Option.value url_logs ~default:(base_url ^ "/v1/logs")
| Some url -> url
in
url_traces, url_metrics, url_logs
in
{
debug;
url_traces;
url_metrics;
url_logs;
headers;
batch_traces;
batch_metrics;
batch_timeout_ms;
batch_logs;
}

View file

@ -0,0 +1,78 @@
type t = private {
debug: bool;
url_traces: string; (** Url to send traces *)
url_metrics: string; (** Url to send metrics*)
url_logs: string; (** Url to send logs *)
headers: (string * string) list;
(** API headers sent to the endpoint. Default is none or
"OTEL_EXPORTER_OTLP_HEADERS" if set. *)
batch_traces: int option;
(** Batch traces? If [Some i], then this produces batches of (at most) [i]
items. If [None], there is no batching.
Note that traces and metrics are batched separately. Default
[Some 400]. *)
batch_metrics: int option;
(** Batch metrics? If [Some i], then this produces batches of (at most)
[i] items. If [None], there is no batching.
Note that traces and metrics are batched separately. Default [None].
*)
batch_logs: int option;
(** Batch logs? See {!batch_metrics} for details. Default [Some 400] *)
batch_timeout_ms: int;
(** Number of milliseconds after which we will emit a batch, even
incomplete. Note that the batch might take longer than that, because
this is only checked when a new event occurs. Default 500. *)
}
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
the future. *)
val make :
?debug:bool ->
?url:string ->
?url_traces:string ->
?url_metrics:string ->
?url_logs:string ->
?headers:(string * string) list ->
?batch_traces:int option ->
?batch_metrics:int option ->
?batch_logs:int option ->
?batch_timeout_ms:int ->
unit ->
t
(** Make a configuration.
@param thread
if true and [bg_threads] is not provided, we will pick a number of bg
threads. Otherwise the number of [bg_threads] superseeds this option.
@param url
base url used to construct per-signal urls. Per-signal url options take
precedence over this base url. Default is "http://localhost:4318", or
"OTEL_EXPORTER_OTLP_ENDPOINT" if set.
Example of constructed per-signal urls with the base url
http://localhost:4318
- Traces: http://localhost:4318/v1/traces
- Metrics: http://localhost:4318/v1/metrics
- Logs: http://localhost:4318/v1/logs
Use per-signal url options if different urls are needed for each signal
type.
@param url_traces
url to send traces, or "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT" if set. The
url is used as-is without any modification.
@param url_metrics
url to send metrics, or "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT" if set. The
url is used as-is without any modification.
@param url_logs
url to send logs, or "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT" if set. The url is
used as-is without any modification. *)
val pp : Format.formatter -> t -> unit

View file

@ -0,0 +1,14 @@
(library
(name opentelemetry_client_cohttp_eio)
(public_name opentelemetry-client-cohttp-eio)
(synopsis "Opentelemetry collector using cohttp+eio+unix")
(preprocess
(pps lwt_ppx))
(libraries
opentelemetry
lwt
cohttp-lwt
cohttp-lwt-unix
pbrt
mtime
mtime.clock.os))

View file

@ -0,0 +1,602 @@
(*
https://github.com/open-telemetry/oteps/blob/main/text/0035-opentelemetry-protocol.md
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module OT = Opentelemetry
module Config = Config
open Opentelemetry
include Common_
external reraise : exn -> 'a = "%reraise"
(** This is equivalent to [Lwt.reraise]. We inline it here so we don't force to
use Lwt's latest version *)
let needs_gc_metrics = Atomic.make false
let last_gc_metrics = Atomic.make (Mtime_clock.now ())
let timeout_gc_metrics = Mtime.Span.(20 * s)
let gc_metrics = ref []
(* side channel for GC, appended to {!E_metrics}'s data *)
(* capture current GC metrics if {!needs_gc_metrics} is true,
or it has been a long time since the last GC metrics collection,
and push them into {!gc_metrics} for later collection *)
let sample_gc_metrics_if_needed () =
let now = Mtime_clock.now () in
let alarm = Atomic.compare_and_set needs_gc_metrics true false in
let timeout () =
let elapsed = Mtime.span now (Atomic.get last_gc_metrics) in
Mtime.Span.compare elapsed timeout_gc_metrics > 0
in
if alarm || timeout () then (
Atomic.set last_gc_metrics now;
let l =
OT.Metrics.make_resource_metrics
~attrs:(Opentelemetry.GC_metrics.get_runtime_attributes ())
@@ Opentelemetry.GC_metrics.get_metrics ()
in
gc_metrics := l :: !gc_metrics
)
type error =
[ `Status of int * Opentelemetry.Proto.Status.status
| `Failure of string
| `Sysbreak
]
let n_errors = Atomic.make 0
let n_dropped = Atomic.make 0
let report_err_ = function
| `Sysbreak -> Printf.eprintf "opentelemetry: ctrl-c captured, stopping\n%!"
| `Failure msg ->
Format.eprintf "@[<2>opentelemetry: export failed: %s@]@." msg
| `Status (code, { Opentelemetry.Proto.Status.code = scode; message; details })
->
let pp_details out l =
List.iter
(fun s -> Format.fprintf out "%S;@ " (Bytes.unsafe_to_string s))
l
in
Format.eprintf
"@[<2>opentelemetry: export failed with@ http code=%d@ status \
{@[code=%ld;@ message=%S;@ details=[@[%a@]]@]}@]@."
code scode
(Bytes.unsafe_to_string message)
pp_details details
module Httpc : sig
type t
val create : unit -> t
val send :
t ->
url:string ->
decode:[ `Dec of Pbrt.Decoder.t -> 'a | `Ret of 'a ] ->
string ->
('a, error) result Lwt.t
val cleanup : t -> unit
end = struct
open Opentelemetry.Proto
open Lwt.Syntax
module Httpc = Cohttp_lwt_unix.Client
type t = unit
let create () : t = ()
let cleanup _self = ()
(* send the content to the remote endpoint/path *)
let send (_self : t) ~url ~decode (bod : string) : ('a, error) result Lwt.t =
let uri = Uri.of_string url in
let open Cohttp in
let headers = Header.(add_list (init ()) !headers) in
let headers =
Header.(add headers "Content-Type" "application/x-protobuf")
in
let body = Cohttp_lwt.Body.of_string bod in
let* r =
try%lwt
let+ r = Httpc.post ~headers ~body uri in
Ok r
with e -> Lwt.return @@ Error e
in
match r with
| Error e ->
let err =
`Failure
(spf "sending signals via http POST to %S\nfailed with:\n%s" url
(Printexc.to_string e))
in
Lwt.return @@ Error err
| Ok (resp, body) ->
let* body = Cohttp_lwt.Body.to_string body in
let code = Response.status resp |> Code.code_of_status in
if not (Code.is_error code) then (
match decode with
| `Ret x -> Lwt.return @@ Ok x
| `Dec f ->
let dec = Pbrt.Decoder.of_string body in
let r =
try Ok (f dec)
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e)
bt))
in
Lwt.return r
) else (
let dec = Pbrt.Decoder.of_string body in
let r =
try
let status = Status.decode_pb_status dec in
Error (`Status (code, status))
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
status: %S\n\
%s"
url code (Printexc.to_string e) body bt))
in
Lwt.return r
)
end
(** Batch of resources to be pushed later.
This type is thread-safe. *)
module Batch : sig
type 'a t
val push' : 'a t -> 'a -> unit
val pop_if_ready : ?force:bool -> now:Mtime.t -> 'a t -> 'a list option
(** Is the batch ready to be emitted? If batching is disabled, this is true as
soon as {!is_empty} is false. If a timeout is provided for this batch,
then it will be ready if an element has been in it for at least the
timeout.
@param now passed to implement timeout *)
val make : ?batch:int -> ?timeout:Mtime.span -> unit -> 'a t
(** Create a new batch *)
end = struct
type 'a t = {
mutable size: int;
mutable q: 'a list;
batch: int option;
high_watermark: int;
timeout: Mtime.span option;
mutable start: Mtime.t;
}
let make ?batch ?timeout () : _ t =
Option.iter (fun b -> assert (b > 0)) batch;
let high_watermark = Option.fold ~none:100 ~some:(fun x -> x * 10) batch in
{
size = 0;
start = Mtime_clock.now ();
q = [];
batch;
timeout;
high_watermark;
}
let timeout_expired_ ~now self : bool =
match self.timeout with
| Some t ->
let elapsed = Mtime.span now self.start in
Mtime.Span.compare elapsed t >= 0
| None -> false
let is_full_ self : bool =
match self.batch with
| None -> self.size > 0
| Some b -> self.size >= b
let pop_if_ready ?(force = false) ~now (self : _ t) : _ list option =
if self.size > 0 && (force || is_full_ self || timeout_expired_ ~now self)
then (
let l = self.q in
self.q <- [];
self.size <- 0;
assert (l <> []);
Some l
) else
None
let push (self : _ t) x : bool =
if self.size >= self.high_watermark then (
(* drop this to prevent queue from growing too fast *)
Atomic.incr n_dropped;
true
) else (
if self.size = 0 && Option.is_some self.timeout then
(* current batch starts now *)
self.start <- Mtime_clock.now ();
(* add to queue *)
self.size <- 1 + self.size;
self.q <- x :: self.q;
let ready = is_full_ self in
ready
)
let push' self x = ignore (push self x : bool)
end
(** An emitter. This is used by {!Backend} below to forward traces/metrics/…
from the program to whatever collector client we have. *)
module type EMITTER = sig
open Opentelemetry.Proto
val push_trace : Trace.resource_spans list -> unit
val push_metrics : Metrics.resource_metrics list -> unit
val push_logs : Logs.resource_logs list -> unit
val set_on_tick_callbacks : (unit -> unit) AList.t -> unit
val tick : unit -> unit
val cleanup : on_done:(unit -> unit) -> unit -> unit
end
(* make an emitter.
exceptions inside should be caught, see
https://opentelemetry.io/docs/reference/specification/error-handling/ *)
let mk_emitter ~stop ~(config : Config.t) () : (module EMITTER) =
let open Proto in
let open Lwt.Syntax in
(* local helpers *)
let open struct
let timeout =
if config.batch_timeout_ms > 0 then
Some Mtime.Span.(config.batch_timeout_ms * ms)
else
None
let batch_traces : Trace.resource_spans list Batch.t =
Batch.make ?batch:config.batch_traces ?timeout ()
let batch_metrics : Metrics.resource_metrics list Batch.t =
Batch.make ?batch:config.batch_metrics ?timeout ()
let batch_logs : Logs.resource_logs list Batch.t =
Batch.make ?batch:config.batch_logs ?timeout ()
let on_tick_cbs_ = Atomic.make (AList.make ())
let set_on_tick_callbacks = Atomic.set on_tick_cbs_
let send_http_ (httpc : Httpc.t) encoder ~url ~encode x : unit Lwt.t =
Pbrt.Encoder.reset encoder;
encode x encoder;
let data = Pbrt.Encoder.to_string encoder in
let* r = Httpc.send httpc ~url ~decode:(`Ret ()) data in
match r with
| Ok () -> Lwt.return ()
| Error `Sysbreak ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set stop true;
Lwt.return ()
| Error err ->
(* TODO: log error _via_ otel? *)
Atomic.incr n_errors;
report_err_ err;
(* avoid crazy error loop *)
Lwt_unix.sleep 3.
let send_metrics_http curl encoder (l : Metrics.resource_metrics list list)
=
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let x =
Metrics_service.default_export_metrics_service_request
~resource_metrics:l ()
in
let url = config.Config.url_metrics in
send_http_ curl encoder ~url
~encode:Metrics_service.encode_pb_export_metrics_service_request x
let send_traces_http curl encoder (l : Trace.resource_spans list list) =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let x =
Trace_service.default_export_trace_service_request ~resource_spans:l ()
in
let url = config.Config.url_traces in
send_http_ curl encoder ~url
~encode:Trace_service.encode_pb_export_trace_service_request x
let send_logs_http curl encoder (l : Logs.resource_logs list list) =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] l in
let x =
Logs_service.default_export_logs_service_request ~resource_logs:l ()
in
let url = config.Config.url_logs in
send_http_ curl encoder ~url
~encode:Logs_service.encode_pb_export_logs_service_request x
(* emit metrics, if the batch is full or timeout lapsed *)
let emit_metrics_maybe ~now ?force httpc encoder : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_metrics with
| None -> Lwt.return false
| Some l ->
let batch = !gc_metrics :: l in
gc_metrics := [];
let+ () = send_metrics_http httpc encoder batch in
true
let emit_traces_maybe ~now ?force httpc encoder : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_traces with
| None -> Lwt.return false
| Some l ->
let+ () = send_traces_http httpc encoder l in
true
let emit_logs_maybe ~now ?force httpc encoder : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_logs with
| None -> Lwt.return false
| Some l ->
let+ () = send_logs_http httpc encoder l in
true
let[@inline] guard_exn_ where f =
try f ()
with e ->
let bt = Printexc.get_backtrace () in
Printf.eprintf
"opentelemetry-curl: uncaught exception in %s: %s\n%s\n%!" where
(Printexc.to_string e) bt
let emit_all_force (httpc : Httpc.t) encoder : unit Lwt.t =
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now ~force:true httpc encoder
and+ (_ : bool) = emit_logs_maybe ~now ~force:true httpc encoder
and+ (_ : bool) = emit_metrics_maybe ~now ~force:true httpc encoder in
()
let tick_common_ () =
if !debug_ then Printf.eprintf "tick (from %d)\n%!" (tid ());
sample_gc_metrics_if_needed ();
List.iter
(fun f ->
try f ()
with e ->
Printf.eprintf "on tick callback raised: %s\n"
(Printexc.to_string e))
(AList.get @@ Atomic.get on_tick_cbs_);
()
(* thread that calls [tick()] regularly, to help enforce timeouts *)
let setup_ticker_thread ~tick ~finally () =
let rec tick_thread () =
if Atomic.get stop then (
finally ();
Lwt.return ()
) else
let* () = Lwt_unix.sleep 0.5 in
let* () = tick () in
tick_thread ()
in
Lwt.async tick_thread
end in
let httpc = Httpc.create () in
let encoder = Pbrt.Encoder.create () in
let module M = struct
(* we make sure that this is thread-safe, even though we don't have a
background thread. There can still be a ticker thread, and there
can also be several user threads that produce spans and call
the emit functions. *)
let push_trace e =
let@ () = guard_exn_ "push trace" in
Batch.push' batch_traces e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_traces_maybe ~now httpc encoder in
())
let push_metrics e =
let@ () = guard_exn_ "push metrics" in
sample_gc_metrics_if_needed ();
Batch.push' batch_metrics e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_metrics_maybe ~now httpc encoder in
())
let push_logs e =
let@ () = guard_exn_ "push logs" in
Batch.push' batch_logs e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_logs_maybe ~now httpc encoder in
())
let set_on_tick_callbacks = set_on_tick_callbacks
let tick_ () =
tick_common_ ();
sample_gc_metrics_if_needed ();
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now httpc encoder
and+ (_ : bool) = emit_logs_maybe ~now httpc encoder
and+ (_ : bool) = emit_metrics_maybe ~now httpc encoder in
()
let () = setup_ticker_thread ~tick:tick_ ~finally:ignore ()
(* if called in a blocking context: work in the background *)
let tick () = Lwt.async tick_
let cleanup ~on_done () =
if !debug_ then Printf.eprintf "opentelemetry: exiting…\n%!";
Lwt.async (fun () ->
let* () = emit_all_force httpc encoder in
Httpc.cleanup httpc;
on_done ();
Lwt.return ())
end in
(module M)
module Backend
(Arg : sig
val stop : bool Atomic.t
val config : Config.t
end)
() : Opentelemetry.Collector.BACKEND = struct
include (val mk_emitter ~stop:Arg.stop ~config:Arg.config ())
open Opentelemetry.Proto
open Opentelemetry.Collector
let send_trace : Trace.resource_spans list sender =
{
send =
(fun l ~ret ->
(if !debug_ then
let@ () = Lock.with_lock in
Format.eprintf "send spans %a@."
(Format.pp_print_list Trace.pp_resource_spans)
l);
push_trace l;
ret ());
}
let last_sent_metrics = Atomic.make (Mtime_clock.now ())
let timeout_sent_metrics = Mtime.Span.(5 * s)
(* send metrics from time to time *)
let signal_emit_gc_metrics () =
if !debug_ then
Printf.eprintf "opentelemetry: emit GC metrics requested\n%!";
Atomic.set needs_gc_metrics true
let additional_metrics () : Metrics.resource_metrics list =
(* add exporter metrics to the lot? *)
let last_emit = Atomic.get last_sent_metrics in
let now = Mtime_clock.now () in
let add_own_metrics =
let elapsed = Mtime.span last_emit now in
Mtime.Span.compare elapsed timeout_sent_metrics > 0
in
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_errors);
];
];
]
) else
[]
let send_metrics : Metrics.resource_metrics list sender =
{
send =
(fun m ~ret ->
(if !debug_ then
let@ () = Lock.with_lock in
Format.eprintf "send metrics %a@."
(Format.pp_print_list Metrics.pp_resource_metrics)
m);
let m = List.rev_append (additional_metrics ()) m in
push_metrics m;
ret ());
}
let send_logs : Logs.resource_logs list sender =
{
send =
(fun m ~ret ->
(if !debug_ then
let@ () = Lock.with_lock in
Format.eprintf "send logs %a@."
(Format.pp_print_list Logs.pp_resource_logs)
m);
push_logs m;
ret ());
}
end
let create_backend ?(stop = Atomic.make false) ?(config = Config.make ()) () =
debug_ := config.debug;
let module B =
Backend
(struct
let stop = stop
let config = config
end)
()
in
(module B : OT.Collector.BACKEND)
let setup_ ?stop ?config () : unit =
let backend = create_backend ?stop ?config () in
OT.Collector.set_backend backend;
()
let setup ?stop ?config ?(enable = true) () =
if enable then setup_ ?stop ?config ()
let remove_backend () : unit Lwt.t =
let done_fut, done_u = Lwt.wait () in
OT.Collector.remove_backend ~on_done:(fun () -> Lwt.wakeup_later done_u ()) ();
done_fut
let with_setup ?stop ?(config = Config.make ()) ?(enable = true) () f : _ Lwt.t
=
if enable then (
let open Lwt.Syntax in
setup_ ?stop ~config ();
Lwt.catch
(fun () ->
let* res = f () in
let+ () = remove_backend () in
res)
(fun exn ->
let* () = remove_backend () in
reraise exn)
) else
f ()

View file

@ -0,0 +1,47 @@
(*
TODO: more options from
https://opentelemetry.io/docs/reference/specification/protocol/exporter/
*)
open Common_
val get_headers : unit -> (string * string) list
val set_headers : (string * string) list -> unit
(** Set http headers that are sent on every http query to the collector. *)
module Config = Config
val create_backend :
?stop:bool Atomic.t ->
?config:Config.t ->
unit ->
(module Opentelemetry.Collector.BACKEND)
(** Create a new backend using lwt and cohttp
NOTE [after_cleanup] optional parameter removed since NEXT_RELEASE *)
val setup :
?stop:bool Atomic.t -> ?config:Config.t -> ?enable:bool -> unit -> unit
(** Setup endpoint. This modifies {!Opentelemetry.Collector.backend}.
@param enable
actually setup the backend (default true). This can be used to
enable/disable the setup depending on CLI arguments or environment.
@param config configuration to use
@param stop
an atomic boolean. When it becomes true, background threads will all stop
after a little while. *)
val remove_backend : unit -> unit Lwt.t
(** Shutdown current backend
@since NEXT_RELEASE *)
val with_setup :
?stop:bool Atomic.t ->
?config:Config.t ->
?enable:bool ->
unit ->
(unit -> 'a Lwt.t) ->
'a Lwt.t
(** [with_setup () f] is like [setup(); f()] but takes care of cleaning up after
[f()] returns See {!setup} for more details. *)

View file

@ -0,0 +1,73 @@
module T = Opentelemetry
module Otel_lwt = Opentelemetry_lwt
let spf = Printf.sprintf
let ( let@ ) f x = f x
let sleep_inner = ref 0.1
let sleep_outer = ref 2.0
let mk_client ~scope =
Opentelemetry_cohttp_lwt.client ~scope (module Cohttp_lwt_unix.Client)
let run () =
let open Lwt.Syntax in
let rec go () =
let@ scope =
Otel_lwt.Trace.with_ ~kind:T.Span.Span_kind_producer "loop.outer"
in
let* () = Lwt_unix.sleep !sleep_outer in
let module C = (val mk_client ~scope) in
let* _res, body =
C.get (Uri.of_string "https://enec1hql02hz.x.pipedream.net")
in
let* () = Cohttp_lwt.Body.drain_body body in
go ()
in
go ()
let () =
Sys.catch_break true;
T.Globals.service_name := "ocaml-otel-cohttp-client";
T.Globals.service_namespace := Some "ocaml-otel.test";
let debug = ref false in
let batch_traces = ref 400 in
let batch_metrics = ref 3 in
let opts =
[
"--debug", Arg.Bool (( := ) debug), " enable debug output";
"--batch-traces", Arg.Int (( := ) batch_traces), " size of traces batch";
( "--batch-metrics",
Arg.Int (( := ) batch_metrics),
" size of metrics batch" );
"--sleep-inner", Arg.Set_float sleep_inner, " sleep (in s) in inner loop";
"--sleep-outer", Arg.Set_float sleep_outer, " sleep (in s) in outer loop";
]
|> Arg.align
in
Arg.parse opts (fun _ -> ()) "emit1 [opt]*";
let some_if_nzero r =
if !r > 0 then
Some !r
else
None
in
let config =
Opentelemetry_client_cohttp_eio.Config.make ~debug:!debug
~batch_traces:(some_if_nzero batch_traces)
~batch_metrics:(some_if_nzero batch_metrics)
()
in
Format.printf "@[<2>sleep outer: %.3fs,@ sleep inner: %.3fs,@ config: %a@]@."
!sleep_outer !sleep_inner Opentelemetry_client_cohttp_eio.Config.pp config;
Format.printf
"Check HTTP requests at \
https://requestbin.com/r/enec1hql02hz/26qShWryt5vJc1JfrOwalhr5vQt@.";
Opentelemetry_client_cohttp_eio.with_setup ~config () run |> Lwt_main.run

View file

@ -23,3 +23,11 @@
opentelemetry opentelemetry
opentelemetry-client-cohttp-lwt opentelemetry-client-cohttp-lwt
opentelemetry-cohttp-lwt)) opentelemetry-cohttp-lwt))
(executable
(name cohttp_client_eio)
(modules cohttp_client_eio)
(libraries
opentelemetry
opentelemetry-client-cohttp-eio
opentelemetry-cohttp-lwt))