This commit is contained in:
Simon Cruanes 2025-12-08 16:16:48 +00:00 committed by GitHub
commit 8da8ba9f47
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
180 changed files with 4756 additions and 4622 deletions

1
.gitignore vendored
View file

@ -6,3 +6,4 @@ _opam
.merlin
*.install
*.exe
*.tmp

View file

@ -13,6 +13,9 @@ clean:
protoc-gen:
FORCE_GENPROTO=true dune build @lint
update-submodules:
git submodule update --init
format:
@dune build @fmt --auto-promote

View file

@ -28,11 +28,6 @@
(>= "4.08"))
ptime
hmap
atomic
(thread-local-storage
(and
(>= 0.2)
(< 0.3)))
(odoc :with-doc)
(alcotest :with-test)
(pbrt
@ -46,14 +41,33 @@
(>= 0.27)
(< 0.28)))
(mtime
(>= "1.4")))
(depopts trace lwt eio)
(>= "1.4")))
(depopts atomic trace thread-local-storage lwt eio)
(conflicts
(trace
(< 0.10)))
(tags
(instrumentation tracing opentelemetry datadog jaeger)))
(package
(name opentelemetry-client)
(synopsis "Client SDK for https://opentelemetry.io")
(depends
(opentelemetry
(= :version))
(odoc :with-doc)
(alcotest :with-test)
(saturn
(and
(>= 1.0)
(< 2.0)))
(thread-local-storage
(and
(>= 0.2)
(< 0.3))))
(tags
(tracing opentelemetry sdk)))
(package
(name opentelemetry-lwt)
(synopsis "Lwt-compatible instrumentation for https://opentelemetry.io")
@ -83,6 +97,8 @@
; atomic ; vendored
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(odoc :with-doc)
(ezcurl
(>= 0.2.3))
@ -99,6 +115,8 @@
(>= "1.4"))
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(odoc :with-doc)
(ezcurl-lwt
(>= 0.2.3))
@ -124,10 +142,14 @@
(containers :with-test)
(cohttp-lwt-unix :with-test)
(opentelemetry-client-cohttp-lwt
(and :with-test (= :version)))
(and
:with-test
(= :version)))
(opentelemetry-cohttp-lwt
(and :with-test (= :version))))
(synopsis "Opentelemetry tracing for Cohttp HTTP servers"))
(and
:with-test
(= :version))))
(synopsis "Opentelemetry-based reporter for Logs"))
(package
(name opentelemetry-cohttp-lwt)
@ -156,6 +178,8 @@
; for spans
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(odoc :with-doc)
(lwt
(>= "5.3"))
@ -165,7 +189,10 @@
cohttp-lwt-unix
(alcotest :with-test)
(containers :with-test)
(opentelemetry-lwt (and :with-test (= :version))))
(opentelemetry-lwt
(and
:with-test
(= :version))))
(synopsis "Collector client for opentelemetry, using cohttp + lwt"))
(package
@ -179,6 +206,8 @@
mirage-crypto-rng-eio
(opentelemetry
(= :version))
(opentelemetry-client
(= :version))
(odoc :with-doc)
(cohttp-eio
(>= 6.1.0))

View file

@ -18,6 +18,7 @@ depends: [
"ca-certs"
"mirage-crypto-rng-eio"
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"odoc" {with-doc}
"cohttp-eio" {>= "6.1.0"}
"eio_main" {with-test}

View file

@ -16,6 +16,7 @@ depends: [
"ocaml" {>= "4.08"}
"mtime" {>= "1.4"}
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"odoc" {with-doc}
"lwt" {>= "5.3"}
"lwt_ppx" {>= "2.0"}

View file

@ -16,6 +16,7 @@ depends: [
"ocaml" {>= "4.08"}
"mtime" {>= "1.4"}
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"odoc" {with-doc}
"ezcurl-lwt" {>= "0.2.3"}
"ocurl"

View file

@ -16,6 +16,7 @@ depends: [
"ocaml" {>= "4.08"}
"mtime" {>= "1.4"}
"opentelemetry" {= version}
"opentelemetry-client" {= version}
"odoc" {with-doc}
"ezcurl" {>= "0.2.3"}
"ocurl"

39
opentelemetry-client.opam Normal file
View file

@ -0,0 +1,39 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
synopsis: "Client SDK for https://opentelemetry.io"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"
"ELLIOTTCABLE <opam@ell.io>"
]
authors: ["the Imandra team and contributors"]
license: "MIT"
tags: ["tracing" "opentelemetry" "sdk"]
homepage: "https://github.com/imandra-ai/ocaml-opentelemetry"
bug-reports: "https://github.com/imandra-ai/ocaml-opentelemetry/issues"
depends: [
"dune" {>= "2.9"}
"opentelemetry" {= version}
"odoc" {with-doc}
"alcotest" {with-test}
"saturn" {>= "1.0" & < "2.0"}
"thread-local-storage" {>= "0.2" & < "0.3"}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"--promote-install-files=false"
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
["dune" "install" "-p" name "--create-install-files" name]
]
dev-repo: "git+https://github.com/imandra-ai/ocaml-opentelemetry.git"

View file

@ -1,7 +1,7 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.12"
synopsis: "Opentelemetry tracing for Cohttp HTTP servers"
synopsis: "Opentelemetry-based reporter for Logs"
maintainer: [
"Simon Cruanes <simon.cruanes.2007@m4x.org>"
"Matt Bray <mattjbray@gmail.com>"

View file

@ -17,8 +17,6 @@ depends: [
"ocaml" {>= "4.08"}
"ptime"
"hmap"
"atomic"
"thread-local-storage" {>= "0.2" & < "0.3"}
"odoc" {with-doc}
"alcotest" {with-test}
"pbrt" {>= "4.0" & < "5.0"}
@ -26,7 +24,7 @@ depends: [
"ocamlformat" {with-dev-setup & >= "0.27" & < "0.28"}
"mtime" {>= "1.4"}
]
depopts: ["trace" "lwt" "eio"]
depopts: ["atomic" "trace" "thread-local-storage" "lwt" "eio"]
conflicts: [
"trace" {< "0.10"}
]

View file

@ -0,0 +1,55 @@
(** Extremely basic storage using a map from thread id to context *)
open Opentelemetry_ambient_context_core
open struct
module Atomic = Opentelemetry_atomic.Atomic
module Int_map = Map.Make (struct
type t = int
let compare : t -> t -> int = Stdlib.compare
end)
type st = { m: Context.t ref Int_map.t Atomic.t } [@@unboxed]
let get (self : st) : Context.t =
let tid = Thread.id @@ Thread.self () in
match Int_map.find tid (Atomic.get self.m) with
| exception Not_found -> Context.empty
| ctx_ref -> !ctx_ref
let with_context (self : st) ctx f =
let tid = Thread.id @@ Thread.self () in
let ctx_ref =
try Int_map.find tid (Atomic.get self.m)
with Not_found ->
let r = ref Context.empty in
while
let m = Atomic.get self.m in
let m' = Int_map.add tid r m in
not (Atomic.compare_and_set self.m m m')
do
()
done;
r
in
let old_ctx = !ctx_ref in
ctx_ref := ctx;
let finally () = ctx_ref := old_ctx in
Fun.protect ~finally f
end
let create_storage () : Storage.t =
let st = { m = Atomic.make Int_map.empty } in
{
name = "basic-map";
get_context = (fun () -> get st);
with_context = (fun ctx f -> with_context st ctx f);
}
(** Default storage *)
let storage : Storage.t = create_storage ()

View file

@ -0,0 +1,7 @@
type t = Hmap.t
type 'a key = 'a Hmap.key
let empty : t = Hmap.empty
let[@inline] new_key () : _ key = Hmap.Key.create ()

View file

@ -0,0 +1,5 @@
(library
(name opentelemetry_ambient_context_core)
(public_name opentelemetry.ambient-context.core)
(synopsis "Core definitions for ambient-context")
(libraries hmap))

View file

@ -0,0 +1,44 @@
(** Storage implementation.
There is a singleton storage for a given program, responsible for providing
ambient context to the rest of the program. *)
type t = {
name: string;
get_context: unit -> Context.t;
with_context: 'a. Context.t -> (unit -> 'a) -> 'a;
}
(** Storage type *)
(** Name of the storage implementation. *)
let[@inline] name self = self.name
(** Get the context from the current storage, or [Hmap.empty] if there is no
ambient context. *)
let[@inline] get_context self = self.get_context ()
(** [with_context storage ctx f] calls [f()] in an ambient context in which
[get_context()] will return [ctx]. Once [f()] returns, the storage is reset
to its previous value. *)
let[@inline] with_context self ctx f = self.with_context ctx f
(** Get the ambient context and then look up [k] in it *)
let[@inline] get self (k : 'a Context.key) : 'a option =
Hmap.find k (get_context self)
(** [with_key_bound_to storage k v f] calls [f()] in a context updated to have
[k] map to [v]. *)
let with_key_bound_to self k v f =
let ctx = get_context self in
let new_ctx = Hmap.add k v ctx in
self.with_context new_ctx f
(** [with_key_unbound storage k f] calls [f()] in a context updated to have [k]
bound to no value. *)
let with_key_unbound self k f =
let ctx = get_context self in
if Hmap.mem k ctx then (
let new_ctx = Hmap.rem k ctx in
self.with_context new_ctx f
) else
f ()

View file

@ -0,0 +1 @@
let storage = Basic_map.storage

View file

@ -0,0 +1,2 @@
val storage : Storage.t
(** Default storage. *)

View file

@ -0,0 +1 @@
let storage = Opentelemetry_ambient_context_tls.storage

View file

@ -0,0 +1,46 @@
(* TODO: conditional compilation, and use Basic_map in each DLS *)
(** Storage using DLS. *)
open Opentelemetry_ambient_context_core
open struct
module DLS = Domain.DLS
module Int_map = Map.Make (struct
type t = int
let compare : t -> t -> int = Stdlib.compare
end)
(* key used to access the context *)
let dls_k_context : Context.t ref Int_map.t DLS.key =
DLS.new_key
~split_from_parent:(fun _ -> Int_map.empty)
(fun _ -> Int_map.empty)
let dls_get () =
let tid = Thread.id @@ Thread.self () in
let map_ref = DLS.get dls_k_context in
try !(Int_map.find tid map_ref) with Not_found -> Hmap.empty
let dls_with ctx f =
let tid = Thread.id @@ Thread.self () in
let map = DLS.get dls_k_context in
let ctx_ref =
try Int_map.find tid map
with Not_found ->
let r = ref Context.empty in
DLS.set dls_k_context (Int_map.add tid r map);
r
in
let old_ctx = !ctx_ref in
ctx_ref := ctx;
let finally () = ctx_ref := old_ctx in
Fun.protect ~finally f
end
let storage : Storage.t =
{ name = "dls-int-map"; get_context = dls_get; with_context = dls_with }

View file

@ -3,13 +3,18 @@
(public_name opentelemetry.ambient-context)
(synopsis
"Abstraction over thread-local storage and fiber-local storage mechanisms")
(private_modules hmap_key_)
(flags
:standard
-open
Opentelemetry_ambient_context_core
-open
Opentelemetry_atomic)
(libraries
thread-local-storage
threads
atomic
opentelemetry.ambient-context.types
hmap
opentelemetry.ambient-context.core
opentelemetry.atomic
(select
hmap_key_.ml
default_.ml
from
(-> hmap_key_.new.ml))))
(opentelemetry.ambient-context.tls -> default_.tls.ml)
(-> default_.map.ml))))

View file

@ -4,4 +4,4 @@
(synopsis
"Storage backend for ambient-context using Eio's fibre-local storage")
(optional) ; eio
(libraries eio hmap opentelemetry.ambient-context thread-local-storage))
(libraries eio hmap opentelemetry.ambient-context.core))

View file

@ -1,39 +1,15 @@
open Opentelemetry_ambient_context_core
module Fiber = Eio.Fiber
open struct
let _internal_key : Hmap.t Fiber.key = Fiber.create_key ()
let ( let* ) = Option.bind
let fiber_context_key : Context.t Fiber.key = Fiber.create_key ()
end
module M = struct
let name = "Storage_eio"
let[@inline] get_map () = Fiber.get _internal_key
let[@inline] with_map m cb = Fiber.with_binding _internal_key m cb
let create_key = Hmap.Key.create
let get k =
let* context = get_map () in
Hmap.find k context
let with_binding k v cb =
let new_context =
match get_map () with
| None -> Hmap.singleton k v
| Some old_context -> Hmap.add k v old_context
in
with_map new_context cb
let without_binding k cb =
let new_context =
match get_map () with
| None -> Hmap.empty
| Some old_context -> Hmap.rem k old_context
in
with_map new_context cb
end
let storage () : Opentelemetry_ambient_context.storage = (module M)
let storage : Storage.t =
{
name = "eio";
get_context =
(fun () ->
Fiber.get fiber_context_key |> Option.value ~default:Hmap.empty);
with_context = (fun ctx f -> Fiber.with_binding fiber_context_key ctx f);
}

View file

@ -1,2 +0,0 @@
val storage : unit -> Opentelemetry_ambient_context.storage
(** Storage using Eio's fibers local storage *)

View file

@ -1 +0,0 @@
let key : Hmap.t Thread_local_storage.t = Thread_local_storage.create ()

View file

@ -4,4 +4,4 @@
(optional) ; lwt
(synopsis
"Storage backend for ambient-context using Lwt's sequence-associated storage")
(libraries lwt opentelemetry.ambient-context thread-local-storage))
(libraries lwt opentelemetry.ambient-context.core))

View file

@ -1,37 +1,15 @@
(** Storage using Lwt keys *)
open Opentelemetry_ambient_context_core
open struct
let _internal_key : Hmap.t Lwt.key = Lwt.new_key ()
let ( let* ) = Option.bind
let lwt_context_key : Context.t Lwt.key = Lwt.new_key ()
end
module M = struct
let name = "Storage_lwt"
let[@inline] get_map () = Lwt.get _internal_key
let[@inline] with_map m cb = Lwt.with_value _internal_key (Some m) cb
let create_key = Hmap.Key.create
let get k =
let* context = get_map () in
Hmap.find k context
let with_binding k v cb =
let new_context =
match get_map () with
| None -> Hmap.singleton k v
| Some old_context -> Hmap.add k v old_context
in
with_map new_context cb
let without_binding k cb =
let new_context =
match get_map () with
| None -> Hmap.empty
| Some old_context -> Hmap.rem k old_context
in
with_map new_context cb
end
let storage () : Opentelemetry_ambient_context.storage = (module M)
let storage : Storage.t =
{
name = "lwt";
get_context =
(fun () -> Lwt.get lwt_context_key |> Option.value ~default:Hmap.empty);
with_context = (fun ctx f -> Lwt.with_value lwt_context_key (Some ctx) f);
}

View file

@ -1,2 +0,0 @@
val storage : unit -> Opentelemetry_ambient_context.storage
(** Storage using Lwt keys *)

View file

@ -1,124 +1,49 @@
module TLS = Thread_local_storage
include Opentelemetry_ambient_context_types
include Opentelemetry_ambient_context_core
type 'a key = int * 'a Hmap.key
let default_storage = Default_.storage
let debug =
match Sys.getenv_opt "OCAML_AMBIENT_CONTEXT_DEBUG" with
| Some ("1" | "true") -> true
| _ -> false
let _debug_id_ = Atomic.make 0
let[@inline] generate_debug_id () = Atomic.fetch_and_add _debug_id_ 1
let compare_key : int -> int -> int = Stdlib.compare
module Storage_tls_hmap = struct
let[@inline] ( let* ) o f =
match o with
| None -> None
| Some x -> f x
let key : Hmap.t TLS.t = Hmap_key_.key
let name = "Storage_tls"
let[@inline] get_map () = TLS.get_opt key
let[@inline] with_map m cb =
let old = TLS.get_opt key |> Option.value ~default:Hmap.empty in
TLS.set key m;
Fun.protect ~finally:(fun () -> TLS.set key old) cb
let create_key = Hmap.Key.create
let get k =
let* context = get_map () in
Hmap.find k context
let with_binding k v cb =
let new_context =
match get_map () with
| None -> Hmap.singleton k v
| Some old_context -> Hmap.add k v old_context
in
with_map new_context @@ fun _context -> cb ()
let without_binding k cb =
match get_map () with
| None -> cb ()
| Some old_context ->
let new_context = Hmap.rem k old_context in
with_map new_context @@ fun _context -> cb ()
open struct
(** The current ambient-context storage. *)
let cur_storage : Storage.t Atomic.t = Atomic.make Default_.storage
end
let default_storage : storage = (module Storage_tls_hmap)
let[@inline] get_current_storage () = Atomic.get cur_storage
let k_current_storage : storage TLS.t = TLS.create ()
(* NOTE: we can't really "map" each local context from the old
to the new. Maybe the old storage is TLS based and the new one
is per-lwt-task. *)
let set_current_storage (storage : Storage.t) = Atomic.set cur_storage storage
let get_current_storage () =
match TLS.get_exn k_current_storage with
| v -> v
| exception TLS.Not_set ->
let v = default_storage in
TLS.set k_current_storage v;
v
(** {2 Functions operating with the current storage} *)
let create_key () =
let (module Store : STORAGE) = get_current_storage () in
if not debug then
0, Store.create_key ()
else (
let id = generate_debug_id () in
Printf.printf "%s: create_key %i\n%!" Store.name id;
id, Store.create_key ()
)
(** Get the context from the current storage, or [Hmap.empty] if there is no
ambient context. *)
let[@inline] get_context () = Storage.get_context (Atomic.get cur_storage)
let get (id, k) =
let (module Store : STORAGE) = get_current_storage () in
if not debug then
Store.get k
else (
let rv = Store.get k in
(match rv with
| Some _ -> Printf.printf "%s: get %i -> Some\n%!" Store.name id
| None -> Printf.printf "%s: get %i -> None\n%!" Store.name id);
rv
)
(** [with_context ctx f] calls [f()] in an ambient context in which
[get_context()] will return [ctx]. Once [f()] returns, the storage is reset
to its previous value. *)
let[@inline] with_context ctx f =
Storage.with_context (Atomic.get cur_storage) ctx f
let with_binding : 'a key -> 'a -> (unit -> 'r) -> 'r =
fun (id, k) v cb ->
let (module Store : STORAGE) = get_current_storage () in
if not debug then
Store.with_binding k v cb
else (
Printf.printf "%s: with_binding %i enter\n%!" Store.name id;
let rv = Store.with_binding k v cb in
Printf.printf "%s: with_binding %i exit\n%!" Store.name id;
rv
)
(** Get the ambient context and then look up [k] in it *)
let[@inline] get (k : 'a Context.key) : 'a option = Hmap.find k (get_context ())
let without_binding (id, k) cb =
let (module Store : STORAGE) = get_current_storage () in
if not debug then
Store.without_binding k cb
else (
Printf.printf "%s: without_binding %i enter\n%!" Store.name id;
let rv = Store.without_binding k cb in
Printf.printf "%s: without_binding %i exit\n%!" Store.name id;
rv
)
(** [with_key_bound_to storage k v f] calls [f()] in a context updated to have
[k] map to [v]. *)
let with_key_bound_to k v f =
let storage = get_current_storage () in
let ctx = Storage.get_context storage in
let new_ctx = Hmap.add k v ctx in
Storage.with_context storage new_ctx f
let set_storage_provider store_new =
let store_before = get_current_storage () in
if store_new == store_before then
()
else
TLS.set k_current_storage store_new;
if debug then (
let (module Store_before : STORAGE) = store_before in
let (module Store_new : STORAGE) = store_new in
Printf.printf "set_storage_provider %s (previously %s)\n%!" Store_new.name
Store_before.name
)
(** [with_key_unbound k f] calls [f()] in a context updated to have [k] bound to
no value. *)
let with_key_unbound k f =
let storage = Atomic.get cur_storage in
let ctx = Storage.get_context storage in
if Hmap.mem k ctx then (
let new_ctx = Hmap.rem k ctx in
Storage.with_context storage new_ctx f
) else
f ()

View file

@ -1,55 +0,0 @@
(** Ambient context.
The ambient context, like the Matrix, is everywhere around you.
It is responsible for keeping track of that context in a manner that's
consistent with the program's choice of control flow paradigm:
- for synchronous/threaded/direct style code, {b TLS} ("thread local
storage") keeps track of a global variable per thread. Each thread has its
own copy of the variable and updates it independently of other threads.
- for Lwt, any ['a Lwt.t] created inside the [with_binding k v (fun _ -> )]
will inherit the [k := v] assignment.
- for Eio, fibers created inside [with_binding k v (fun () -> )] will
inherit the [k := v] assignment. This is consistent with the structured
concurrency approach of Eio.
The only data stored by this storage is a {!Hmap.t}, ie a heterogeneous map.
Various users (libraries, user code, etc.) can create their own {!key} to
store what they are interested in, without affecting other parts of the
storage. *)
module Types := Opentelemetry_ambient_context_types
module type STORAGE = Types.STORAGE
type storage = (module STORAGE)
val default_storage : storage
val get_current_storage : unit -> storage
val set_storage_provider : storage -> unit
type 'a key
(** A key that can be mapped to values of type ['a] in the ambient context. *)
val compare_key : int -> int -> int
(** Total order on keys *)
val create_key : unit -> 'a key
(** Create a new fresh key, distinct from any previously created key. *)
val get : 'a key -> 'a option
(** Get the current value for a given key, or [None] if no value was associated
with the key in the ambient context. *)
val with_binding : 'a key -> 'a -> (unit -> 'r) -> 'r
(** [with_binding k v cb] calls [cb()] in a context in which [k] is bound to
[v]. This does not affect storage outside of [cb()]. *)
val without_binding : 'a key -> (unit -> 'b) -> 'b
(** [without_binding k cb] calls [cb()] in a context where [k] has no binding
(possibly shadowing the current ambient binding of [k] if it exists). *)

View file

@ -0,0 +1,6 @@
(library
(name opentelemetry_ambient_context_tls)
(public_name opentelemetry.ambient-context.tls)
(synopsis "Implementation of ambient-context from thread-local-storage")
(optional) ; TLS
(libraries opentelemetry.ambient-context.core thread-local-storage))

View file

@ -0,0 +1,23 @@
open Opentelemetry_ambient_context_core
open struct
module TLS = Thread_local_storage
(* key used to access the context *)
let tls_k_context : Context.t TLS.t = TLS.create ()
end
let storage : Storage.t =
{
name = "tls";
get_context =
(fun () -> try TLS.get_exn tls_k_context with TLS.Not_set -> Hmap.empty);
with_context =
(fun ctx f ->
let old =
try TLS.get_exn tls_k_context with TLS.Not_set -> Hmap.empty
in
let finally () = TLS.set tls_k_context old in
TLS.set tls_k_context ctx;
Fun.protect ~finally f);
}

View file

@ -1,4 +0,0 @@
(library
(name opentelemetry_ambient_context_types)
(public_name opentelemetry.ambient-context.types)
(libraries hmap thread-local-storage))

View file

@ -1,19 +0,0 @@
type 'a key = 'a Hmap.key
module type STORAGE = sig
val name : string
val get_map : unit -> Hmap.t option
val with_map : Hmap.t -> (unit -> 'b) -> 'b
val create_key : unit -> 'a key
val get : 'a key -> 'a option
val with_binding : 'a key -> 'a -> (unit -> 'b) -> 'b
val without_binding : 'a key -> (unit -> 'b) -> 'b
end
type storage = (module STORAGE)

View file

@ -1,32 +0,0 @@
(** Storage implementation.
There is a singleton storage for a given program, responsible for providing
ambient context to the rest of the program. *)
type 'a key = 'a Hmap.key
module type STORAGE = sig
val name : string
(** Name of the storage implementation. *)
val get_map : unit -> Hmap.t option
(** Get the hmap from the current ambient context, or [None] if there is no
ambient context. *)
val with_map : Hmap.t -> (unit -> 'b) -> 'b
(** [with_hmap h cb] calls [cb()] in an ambient context in which [get_map()]
will return [h]. Once [cb()] returns, the storage is reset to its previous
value. *)
val create_key : unit -> 'a key
(** Create a new storage key, guaranteed to be distinct from any previously
created key. *)
val get : 'a key -> 'a option
val with_binding : 'a key -> 'a -> (unit -> 'b) -> 'b
val without_binding : 'a key -> (unit -> 'b) -> 'b
end
type storage = (module STORAGE)

View file

@ -1,7 +1,7 @@
type t = Opentelemetry_client.Config.t
type t = Opentelemetry_client.Client_config.t
module Env = Opentelemetry_client.Config.Env ()
module Env = Opentelemetry_client.Client_config.Env ()
let pp = Opentelemetry_client.Config.pp
let pp = Opentelemetry_client.Client_config.pp
let make = Env.make (fun common () -> common)

View file

@ -1,4 +1,4 @@
type t = Opentelemetry_client.Config.t
type t = Opentelemetry_client.Client_config.t
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
@ -6,7 +6,7 @@ type t = Opentelemetry_client.Config.t
val pp : Format.formatter -> t -> unit
val make : (unit -> t) Opentelemetry_client.Config.make
val make : (unit -> t) Opentelemetry_client.Client_config.make
(** Make a configuration {!t}. *)
module Env : Opentelemetry_client.Config.ENV
module Env : Opentelemetry_client.Client_config.ENV

View file

@ -6,7 +6,7 @@
(>= %{ocaml_version} 5.0))
(libraries
opentelemetry
opentelemetry.client
opentelemetry-client
eio
eio.unix
cohttp

View file

@ -5,13 +5,9 @@ open Eio.Std
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module OT = Opentelemetry
module Config = Config
module Signal = Opentelemetry_client.Signal
module Batch = Opentelemetry_client.Batch
open Opentelemetry
let ( let@ ) = ( @@ )
open Opentelemetry_client
let spf = Printf.sprintf
@ -19,459 +15,180 @@ let set_headers = Config.Env.set_headers
let get_headers = Config.Env.get_headers
let needs_gc_metrics = Atomic.make false
module Make (CTX : sig
val sw : Eio.Switch.t
let last_gc_metrics = Atomic.make (Mtime_clock.now ())
val env : Eio_unix.Stdenv.base
end) =
struct
module IO : Generic_io.S_WITH_CONCURRENCY with type 'a t = 'a = struct
include Generic_io.Direct_style
let timeout_gc_metrics = Mtime.Span.(20 * s)
(* NOTE: This is only used in the main consumer thread, even though producers
might be in other domains *)
(* Cross-domain, thread-safe storage for GC metrics gathered from different fibres. *)
module GC_metrics : sig
val add : Proto.Metrics.resource_metrics -> unit
let sleep_s n = Eio.Time.sleep CTX.env#clock n
val drain : unit -> Proto.Metrics.resource_metrics list
end = struct
(* Used to prevent data races across domains *)
let mutex = Eio.Mutex.create ()
let spawn f = Eio.Fiber.fork ~sw:CTX.sw f
end
let gc_metrics = ref []
module Notifier : Generic_notifier.S with module IO = IO = struct
module IO = IO
let add m =
Eio.Mutex.use_rw ~protect:true mutex (fun () ->
gc_metrics := m :: !gc_metrics)
type t = {
mutex: Eio.Mutex.t;
cond: Eio.Condition.t;
}
let drain () =
Eio.Mutex.use_rw ~protect:true mutex (fun () ->
let metrics = !gc_metrics in
gc_metrics := [];
metrics)
end
let create () : t =
{ mutex = Eio.Mutex.create (); cond = Eio.Condition.create () }
(* capture current GC metrics if {!needs_gc_metrics} is true,
or it has been a long time since the last GC metrics collection,
and push them into {!gc_metrics} for later collection *)
let sample_gc_metrics_if_needed () =
let now = Mtime_clock.now () in
let alarm = Atomic.compare_and_set needs_gc_metrics true false in
let timeout () =
let elapsed = Mtime.span now (Atomic.get last_gc_metrics) in
Mtime.Span.compare elapsed timeout_gc_metrics > 0
in
if alarm || timeout () then (
Atomic.set last_gc_metrics now;
let l =
OT.Metrics.make_resource_metrics
~attrs:(Opentelemetry.GC_metrics.get_runtime_attributes ())
@@ Opentelemetry.GC_metrics.get_metrics ()
in
GC_metrics.add l
)
let trigger self =
(* FIXME: this might be triggered from other threads!! how do we
ensure it runs in the Eio thread? *)
Eio.Condition.broadcast self.cond
type error =
[ `Status of int * Opentelemetry.Proto.Status.status
| `Failure of string
| `Sysbreak
]
let delete = ignore
let n_errors = Atomic.make 0
let wait self =
Eio.Mutex.lock self.mutex;
Eio.Condition.await self.cond self.mutex;
Eio.Mutex.unlock self.mutex
let n_dropped = Atomic.make 0
(** Ensure we get signalled when the queue goes from empty to non-empty *)
let register_bounded_queue (self : t) (bq : _ Bounded_queue.t) : unit =
Bounded_queue.on_non_empty bq (fun () -> trigger self)
end
let report_err_ = function
| `Sysbreak -> Printf.eprintf "opentelemetry: ctrl-c captured, stopping\n%!"
| `Failure msg ->
Format.eprintf "@[<2>opentelemetry: export failed: %s@]@." msg
| `Status
( code,
{
Opentelemetry.Proto.Status.code = scode;
message;
details;
_presence = _;
} ) ->
let pp_details out l =
List.iter
(fun s -> Format.fprintf out "%S;@ " (Bytes.unsafe_to_string s))
l
in
Format.eprintf
"@[<2>opentelemetry: export failed with@ http code=%d@ status \
{@[code=%ld;@ message=%S;@ details=[@[%a@]]@]}@]@."
code scode
(Bytes.unsafe_to_string message)
pp_details details
module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
open Opentelemetry.Proto
module Httpc = Cohttp_eio.Client
module Httpc : sig
type t
type t = Httpc.t
val create : _ Eio.Net.t -> t
let authenticator =
match Ca_certs.authenticator () with
| Ok x -> x
| Error (`Msg m) ->
Fmt.failwith "Failed to create system store X509 authenticator: %s" m
val send :
t ->
url:string ->
decode:[ `Dec of Pbrt.Decoder.t -> 'a | `Ret of 'a ] ->
string ->
('a, error) result
end = struct
open Opentelemetry.Proto
module Httpc = Cohttp_eio.Client
type t = Httpc.t
let authenticator =
match Ca_certs.authenticator () with
| Ok x -> x
| Error (`Msg m) ->
Fmt.failwith "Failed to create system store X509 authenticator: %s" m
let https ~authenticator =
let tls_config =
match Tls.Config.client ~authenticator () with
| Error (`Msg msg) -> failwith ("tls configuration problem: " ^ msg)
| Ok tls_config -> tls_config
in
fun uri raw ->
let host =
Uri.host uri
|> Option.map (fun x -> Domain_name.(host_exn (of_string_exn x)))
let https ~authenticator =
let tls_config =
match Tls.Config.client ~authenticator () with
| Error (`Msg msg) -> failwith ("tls configuration problem: " ^ msg)
| Ok tls_config -> tls_config
in
Tls_eio.client_of_flow ?host tls_config raw
fun uri raw ->
let host =
Uri.host uri
|> Option.map (fun x -> Domain_name.(host_exn (of_string_exn x)))
in
Tls_eio.client_of_flow ?host tls_config raw
let create net = Httpc.make ~https:(Some (https ~authenticator)) net
let create () = Httpc.make ~https:(Some (https ~authenticator)) CTX.env#net
(* send the content to the remote endpoint/path *)
let send (client : t) ~url ~decode (body : string) : ('a, error) result =
Switch.run @@ fun sw ->
let uri = Uri.of_string url in
let cleanup = ignore
let open Cohttp in
let headers = Header.(add_list (init ()) (Config.Env.get_headers ())) in
let headers =
Header.(add headers "Content-Type" "application/x-protobuf")
in
(* send the content to the remote endpoint/path *)
let send (client : t) ~url ~decode (body : string) :
('a, Export_error.t) result =
Switch.run @@ fun sw ->
let uri = Uri.of_string url in
let body = Cohttp_eio.Body.of_string body in
let r =
try
let r = Httpc.post client ~sw ~headers ~body uri in
Ok r
with e -> Error e
in
match r with
| Error e ->
let err =
`Failure
(spf "sending signals via http POST to %S\nfailed with:\n%s" url
(Printexc.to_string e))
let open Cohttp in
let headers = Header.(add_list (init ()) (Config.Env.get_headers ())) in
let headers =
Header.(add headers "Content-Type" "application/x-protobuf")
in
Error err
| Ok (resp, body) ->
let body = Eio.Buf_read.(parse_exn take_all) body ~max_size:max_int in
let code = Response.status resp |> Code.code_of_status in
if not (Code.is_error code) then (
match decode with
| `Ret x -> Ok x
| `Dec f ->
let body = Cohttp_eio.Body.of_string body in
let r =
try
let r = Httpc.post client ~sw ~headers ~body uri in
Ok r
with e -> Error e
in
match r with
| Error e ->
let err =
`Failure
(spf "sending signals via http POST to %S\nfailed with:\n%s" url
(Printexc.to_string e))
in
Error err
| Ok (resp, body) ->
let body = Eio.Buf_read.(parse_exn take_all) body ~max_size:max_int in
let code = Response.status resp |> Code.code_of_status in
if not (Code.is_error code) then (
match decode with
| `Ret x -> Ok x
| `Dec f ->
let dec = Pbrt.Decoder.of_string body in
let r =
try Ok (f dec)
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e)
bt))
in
r
) else (
let dec = Pbrt.Decoder.of_string body in
let r =
try Ok (f dec)
try
let status = Status.decode_pb_status dec in
Error (`Status (code, status))
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e)
bt))
(spf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
status: %S\n\
%s"
url code (Printexc.to_string e) body bt))
in
r
) else (
let dec = Pbrt.Decoder.of_string body in
let r =
try
let status = Status.decode_pb_status dec in
Error (`Status (code, status))
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
status: %S\n\
%s"
url code (Printexc.to_string e) body bt))
in
r
)
)
end
end
(** An emitter. This is used by {!Backend} below to forward traces/metrics/…
from the program to whatever collector client we have. *)
module type EMITTER = sig
open Opentelemetry.Proto
let create_consumer ?(stop = Atomic.make false) ?(config = Config.make ()) ~sw
~env () : Consumer.any_resource_builder =
let module M = Make (struct
let sw = sw
val push_trace : Trace.resource_spans list -> unit
let env = env
end) in
let module C = Generic_http_consumer.Make (M.IO) (M.Notifier) (M.Httpc) in
C.consumer ~ticker_task:(Some 0.5) ~stop ~config ()
val push_metrics : Metrics.resource_metrics list -> unit
let create_exporter ?stop ?(config = Config.make ()) ~sw ~env () =
let consumer = create_consumer ?stop ~config ~sw ~env () in
let bq =
Bounded_queue_sync.create
~high_watermark:Bounded_queue.Defaults.high_watermark ()
in
Exporter_queued.create ~q:bq ~consumer ()
|> Exporter_add_batching.add_batching ~config
val push_logs : Logs.resource_logs list -> unit
val set_on_tick_callbacks : (unit -> unit) AList.t -> unit
val tick : unit -> unit
val cleanup : on_done:(unit -> unit) -> unit -> unit
end
(* make an emitter.
exceptions inside should be caught, see
https://opentelemetry.io/docs/reference/specification/error-handling/ *)
let mk_emitter ~stop ~net (config : Config.t) : (module EMITTER) =
(* local helpers *)
let open struct
let client =
(* Prime RNG state for TLS *)
Mirage_crypto_rng_unix.use_default ();
Httpc.create net
let send_http ~url data : unit =
let r = Httpc.send client ~url ~decode:(`Ret ()) data in
match r with
| Ok () -> ()
| Error `Sysbreak ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set stop true
| Error err ->
(* TODO: log error _via_ otel? *)
Atomic.incr n_errors;
report_err_ err;
(* avoid crazy error loop *)
Eio_unix.sleep 3.
let timeout =
if config.batch_timeout_ms > 0 then
Some Mtime.Span.(config.batch_timeout_ms * ms)
else
None
let batch_traces : Proto.Trace.resource_spans Batch.t =
Batch.make ?batch:config.batch_traces ?timeout ()
let batch_metrics : Proto.Metrics.resource_metrics Batch.t =
Batch.make ?batch:config.batch_metrics ?timeout ()
let batch_logs : Proto.Logs.resource_logs Batch.t =
Batch.make ?batch:config.batch_logs ?timeout ()
let push_to_batch b e =
match Batch.push b e with
| `Ok -> ()
| `Dropped -> Atomic.incr n_errors
let[@inline] guard_exn_ where f =
try f ()
with e ->
let bt = Printexc.get_backtrace () in
Printf.eprintf "opentelemetry-eio: uncaught exception in %s: %s\n%s\n%!"
where (Printexc.to_string e) bt
let push_traces x =
let@ () = guard_exn_ "push trace" in
push_to_batch batch_traces x
let push_metrics x =
let@ () = guard_exn_ "push metrics" in
sample_gc_metrics_if_needed ();
push_to_batch batch_metrics x
let push_logs x =
let@ () = guard_exn_ "push logs" in
push_to_batch batch_logs x
let maybe_emit (batch : 'a Batch.t) url (f : 'a list -> string) ~now ~force
() : unit =
Batch.pop_if_ready ~force ~now batch
|> Option.iter (fun signals -> f signals |> send_http ~url)
let emit_traces_maybe =
maybe_emit batch_traces config.url_traces Signal.Encode.traces
let emit_metrics_maybe =
maybe_emit batch_metrics config.url_metrics (fun collected_metrics ->
let gc_metrics = GC_metrics.drain () in
gc_metrics @ collected_metrics |> Signal.Encode.metrics)
let emit_logs_maybe =
maybe_emit batch_logs config.url_logs Signal.Encode.logs
let emit_all ~force : unit =
Switch.run @@ fun sw ->
let now = Mtime_clock.now () in
Fiber.fork ~sw @@ emit_logs_maybe ~now ~force;
Fiber.fork ~sw @@ emit_metrics_maybe ~now ~force;
Fiber.fork ~sw @@ emit_traces_maybe ~now ~force
let on_tick_cbs_ = Atomic.make (AList.make ())
let run_tick_callbacks () =
List.iter
(fun f ->
try f ()
with e ->
Printf.eprintf "on tick callback raised: %s\n"
(Printexc.to_string e))
(AList.get @@ Atomic.get on_tick_cbs_)
end in
let module M = struct
let set_on_tick_callbacks = Atomic.set on_tick_cbs_
let push_trace e = push_traces e
let push_metrics e = push_metrics e
let push_logs e = push_logs e
let tick () =
if Config.Env.get_debug () then
Printf.eprintf "tick (from domain %d)\n%!" (Domain.self () :> int);
run_tick_callbacks ();
sample_gc_metrics_if_needed ();
emit_all ~force:false
let cleanup ~on_done () =
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: exiting…\n%!";
Atomic.set stop true;
run_tick_callbacks ();
sample_gc_metrics_if_needed ();
emit_all ~force:true;
on_done ()
end in
(module M : EMITTER)
module Backend (Emitter : EMITTER) : Opentelemetry.Collector.BACKEND = struct
open Opentelemetry.Proto
open Opentelemetry.Collector
open Emitter
let send_trace : Trace.resource_spans list sender =
{
send =
(fun l ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send spans %a@."
(Format.pp_print_list Trace.pp_resource_spans)
l);
push_trace l;
ret ());
}
let last_sent_metrics = Atomic.make (Mtime_clock.now ())
let timeout_sent_metrics = Mtime.Span.(5 * s)
(* send metrics from time to time *)
let signal_emit_gc_metrics () =
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: emit GC metrics requested\n%!";
Atomic.set needs_gc_metrics true
let additional_metrics () : Metrics.resource_metrics list =
(* add exporter metrics to the lot? *)
let last_emit = Atomic.get last_sent_metrics in
let now = Mtime_clock.now () in
let add_own_metrics =
let elapsed = Mtime.span last_emit now in
Mtime.Span.compare elapsed timeout_sent_metrics > 0
in
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_errors);
];
];
]
) else
[]
let send_metrics : Metrics.resource_metrics list sender =
{
send =
(fun m ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send metrics %a@."
(Format.pp_print_list Metrics.pp_resource_metrics)
m);
let m = List.rev_append (additional_metrics ()) m in
push_metrics m;
ret ());
}
let send_logs : Logs.resource_logs list sender =
{
send =
(fun m ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send logs %a@."
(Format.pp_print_list Logs.pp_resource_logs)
m);
push_logs m;
ret ());
}
let tick = Emitter.tick
let cleanup = Emitter.cleanup
let set_on_tick_callbacks = Emitter.set_on_tick_callbacks
end
let create_backend ~sw ?(stop = Atomic.make false) ?(config = Config.make ())
env : (module OT.Collector.BACKEND) =
let module E = (val mk_emitter ~stop ~net:env#net config) in
let module B = Backend (E) in
(* Run a background fiber to keep the backend ticking regularly.
NOTE: This cannot be located inside the [Backend], because switches
are not thread safe, and cannot be used accross domains, but the
backend is accessed across domains. *)
Eio.Fiber.fork ~sw (fun () ->
while not @@ Atomic.get stop do
Eio.Time.sleep env#clock 0.5;
B.tick ()
done);
(module B)
let create_backend = create_exporter
let setup_ ~sw ?stop ?config env : unit =
let backend = create_backend ?stop ?config ~sw env in
OT.Collector.set_backend backend
let backend = create_backend ?stop ?config ~sw ~env () in
Main_exporter.set backend
let setup ?stop ?config ?(enable = true) ~sw env =
if enable then setup_ ~sw ?stop ?config env
let remove_backend () = OT.Collector.remove_backend ~on_done:ignore ()
let remove_exporter () = Main_exporter.remove ~on_done:ignore ()
let remove_backend = remove_exporter
let with_setup ?stop ?config ?(enable = true) f env =
if enable then

View file

@ -10,15 +10,32 @@ val set_headers : (string * string) list -> unit
module Config = Config
val create_backend :
sw:Eio.Switch.t ->
val create_consumer :
?stop:bool Atomic.t ->
?config:Config.t ->
Eio_unix.Stdenv.base ->
(module Opentelemetry.Collector.BACKEND)
(** Create a new backend using Cohttp_eio
sw:Eio.Switch.t ->
env:Eio_unix.Stdenv.base ->
unit ->
Opentelemetry_client.Consumer.any_resource_builder
(** Consumer that pulls from a queue *)
NOTE [after_cleanup] optional parameter removed @since 0.12 *)
val create_exporter :
?stop:bool Atomic.t ->
?config:Config.t ->
sw:Eio.Switch.t ->
env:Eio_unix.Stdenv.base ->
unit ->
Opentelemetry.Exporter.t
(** NOTE [after_cleanup] optional parameter removed @since 0.12 *)
val create_backend :
?stop:bool Atomic.t ->
?config:Config.t ->
sw:Eio.Switch.t ->
env:Eio_unix.Stdenv.base ->
unit ->
Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup :
?stop:bool Atomic.t ->

View file

@ -1,7 +1,7 @@
type t = Opentelemetry_client.Config.t
type t = Opentelemetry_client.Client_config.t
module Env = Opentelemetry_client.Config.Env ()
module Env = Opentelemetry_client.Client_config.Env ()
let pp = Opentelemetry_client.Config.pp
let pp = Opentelemetry_client.Client_config.pp
let make = Env.make (fun common () -> common)

View file

@ -1,4 +1,4 @@
type t = Opentelemetry_client.Config.t
type t = Opentelemetry_client.Client_config.t
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
@ -6,7 +6,7 @@ type t = Opentelemetry_client.Config.t
val pp : Format.formatter -> t -> unit
val make : (unit -> t) Opentelemetry_client.Config.make
val make : (unit -> t) Opentelemetry_client.Client_config.make
(** Make a configuration {!t}. *)
module Env : Opentelemetry_client.Config.ENV
module Env : Opentelemetry_client.Client_config.ENV

View file

@ -6,7 +6,8 @@
(pps lwt_ppx))
(libraries
opentelemetry
opentelemetry.client
opentelemetry-client
opentelemetry-client.lwt
lwt
cohttp-lwt
cohttp-lwt-unix

View file

@ -3,10 +3,8 @@
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module OT = Opentelemetry
module Config = Config
module Signal = Opentelemetry_client.Signal
module Batch = Opentelemetry_client.Batch
open Opentelemetry_client
open Opentelemetry
open Common_
@ -14,87 +12,14 @@ let set_headers = Config.Env.set_headers
let get_headers = Config.Env.get_headers
external reraise : exn -> 'a = "%reraise"
(** This is equivalent to [Lwt.reraise]. We inline it here so we don't force to
use Lwt's latest version *)
type error = Export_error.t
let needs_gc_metrics = Atomic.make false
open struct
module IO = Opentelemetry_client_lwt.Io_lwt
end
let last_gc_metrics = Atomic.make (Mtime_clock.now ())
let timeout_gc_metrics = Mtime.Span.(20 * s)
let gc_metrics = ref []
(* side channel for GC, appended to {!E_metrics}'s data *)
(* capture current GC metrics if {!needs_gc_metrics} is true,
or it has been a long time since the last GC metrics collection,
and push them into {!gc_metrics} for later collection *)
let sample_gc_metrics_if_needed () =
let now = Mtime_clock.now () in
let alarm = Atomic.compare_and_set needs_gc_metrics true false in
let timeout () =
let elapsed = Mtime.span now (Atomic.get last_gc_metrics) in
Mtime.Span.compare elapsed timeout_gc_metrics > 0
in
if alarm || timeout () then (
Atomic.set last_gc_metrics now;
let l =
OT.Metrics.make_resource_metrics
~attrs:(Opentelemetry.GC_metrics.get_runtime_attributes ())
@@ Opentelemetry.GC_metrics.get_metrics ()
in
gc_metrics := l :: !gc_metrics
)
type error =
[ `Status of int * Opentelemetry.Proto.Status.status
| `Failure of string
| `Sysbreak
]
let n_errors = Atomic.make 0
let n_dropped = Atomic.make 0
let report_err_ = function
| `Sysbreak -> Printf.eprintf "opentelemetry: ctrl-c captured, stopping\n%!"
| `Failure msg ->
Format.eprintf "@[<2>opentelemetry: export failed: %s@]@." msg
| `Status
( code,
{
Opentelemetry.Proto.Status.code = scode;
message;
details;
_presence = _;
} ) ->
let pp_details out l =
List.iter
(fun s -> Format.fprintf out "%S;@ " (Bytes.unsafe_to_string s))
l
in
Format.eprintf
"@[<2>opentelemetry: export failed with@ http code=%d@ status \
{@[code=%ld;@ message=%S;@ details=[@[%a@]]@]}@]@."
code scode
(Bytes.unsafe_to_string message)
pp_details details
module Httpc : sig
type t
val create : unit -> t
val send :
t ->
url:string ->
decode:[ `Dec of Pbrt.Decoder.t -> 'a | `Ret of 'a ] ->
string ->
('a, error) result Lwt.t
val cleanup : t -> unit
end = struct
module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
open Opentelemetry.Proto
open Lwt.Syntax
module Httpc = Cohttp_lwt_unix.Client
@ -176,325 +101,39 @@ end = struct
)
end
(** An emitter. This is used by {!Backend} below to forward traces/metrics/…
from the program to whatever collector client we have. *)
module type EMITTER = sig
open Opentelemetry.Proto
module Consumer_impl =
Generic_http_consumer.Make (IO) (Opentelemetry_client_lwt.Notifier_lwt)
(Httpc)
val push_trace : Trace.resource_spans list -> unit
let create_consumer ?(stop = Atomic.make false) ?(config = Config.make ()) () =
Consumer_impl.consumer ~ticker_task:(Some 0.5) ~stop ~config ()
val push_metrics : Metrics.resource_metrics list -> unit
val push_logs : Logs.resource_logs list -> unit
val set_on_tick_callbacks : (unit -> unit) AList.t -> unit
val tick : unit -> unit
val cleanup : on_done:(unit -> unit) -> unit -> unit
end
(* make an emitter.
exceptions inside should be caught, see
https://opentelemetry.io/docs/reference/specification/error-handling/ *)
let mk_emitter ~stop ~(config : Config.t) () : (module EMITTER) =
let open Proto in
let open Lwt.Syntax in
(* local helpers *)
let open struct
let timeout =
if config.batch_timeout_ms > 0 then
Some Mtime.Span.(config.batch_timeout_ms * ms)
else
None
let batch_traces : Trace.resource_spans Batch.t =
Batch.make ?batch:config.batch_traces ?timeout ()
let batch_metrics : Metrics.resource_metrics Batch.t =
Batch.make ?batch:config.batch_metrics ?timeout ()
let batch_logs : Logs.resource_logs Batch.t =
Batch.make ?batch:config.batch_logs ?timeout ()
let on_tick_cbs_ = Atomic.make (AList.make ())
let set_on_tick_callbacks = Atomic.set on_tick_cbs_
let send_http_ (httpc : Httpc.t) ~url data : unit Lwt.t =
let* r = Httpc.send httpc ~url ~decode:(`Ret ()) data in
match r with
| Ok () -> Lwt.return ()
| Error `Sysbreak ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set stop true;
Lwt.return ()
| Error err ->
(* TODO: log error _via_ otel? *)
Atomic.incr n_errors;
report_err_ err;
(* avoid crazy error loop *)
Lwt_unix.sleep 3.
let send_metrics_http client (l : Metrics.resource_metrics list) =
Signal.Encode.metrics l |> send_http_ client ~url:config.url_metrics
let send_traces_http client (l : Trace.resource_spans list) =
Signal.Encode.traces l |> send_http_ client ~url:config.url_traces
let send_logs_http client (l : Logs.resource_logs list) =
Signal.Encode.logs l |> send_http_ client ~url:config.url_logs
(* emit metrics, if the batch is full or timeout lapsed *)
let emit_metrics_maybe ~now ?force httpc : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_metrics with
| None -> Lwt.return false
| Some l ->
let batch = !gc_metrics @ l in
gc_metrics := [];
let+ () = send_metrics_http httpc batch in
true
let emit_traces_maybe ~now ?force httpc : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_traces with
| None -> Lwt.return false
| Some l ->
let+ () = send_traces_http httpc l in
true
let emit_logs_maybe ~now ?force httpc : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_logs with
| None -> Lwt.return false
| Some l ->
let+ () = send_logs_http httpc l in
true
let[@inline] guard_exn_ where f =
try f ()
with e ->
let bt = Printexc.get_backtrace () in
Printf.eprintf
"opentelemetry-cohttp-lwt: uncaught exception in %s: %s\n%s\n%!" where
(Printexc.to_string e) bt
let emit_all_force (httpc : Httpc.t) : unit Lwt.t =
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now ~force:true httpc
and+ (_ : bool) = emit_logs_maybe ~now ~force:true httpc
and+ (_ : bool) = emit_metrics_maybe ~now ~force:true httpc in
()
(* thread that calls [tick()] regularly, to help enforce timeouts *)
let setup_ticker_thread ~tick ~finally () =
let rec tick_thread () =
if Atomic.get stop then (
finally ();
Lwt.return ()
) else
let* () = Lwt_unix.sleep 0.5 in
let* () = tick () in
tick_thread ()
in
Lwt.async tick_thread
end in
let httpc = Httpc.create () in
let module M = struct
(* we make sure that this is thread-safe, even though we don't have a
background thread. There can still be a ticker thread, and there
can also be several user threads that produce spans and call
the emit functions. *)
let push_to_batch b e =
match Batch.push b e with
| `Ok -> ()
| `Dropped -> Atomic.incr n_errors
let push_trace e =
let@ () = guard_exn_ "push trace" in
push_to_batch batch_traces e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_traces_maybe ~now httpc in
())
let push_metrics e =
let@ () = guard_exn_ "push metrics" in
sample_gc_metrics_if_needed ();
push_to_batch batch_metrics e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_metrics_maybe ~now httpc in
())
let push_logs e =
let@ () = guard_exn_ "push logs" in
push_to_batch batch_logs e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_logs_maybe ~now httpc in
())
let set_on_tick_callbacks = set_on_tick_callbacks
let tick_ () =
if Config.Env.get_debug () then
Printf.eprintf "tick (from %d)\n%!" (tid ());
sample_gc_metrics_if_needed ();
List.iter
(fun f ->
try f ()
with e ->
Printf.eprintf "on tick callback raised: %s\n"
(Printexc.to_string e))
(AList.get @@ Atomic.get on_tick_cbs_);
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now httpc
and+ (_ : bool) = emit_logs_maybe ~now httpc
and+ (_ : bool) = emit_metrics_maybe ~now httpc in
()
let () = setup_ticker_thread ~tick:tick_ ~finally:ignore ()
(* if called in a blocking context: work in the background *)
let tick () = Lwt.async tick_
let cleanup ~on_done () =
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: exiting…\n%!";
Lwt.async (fun () ->
let* () = emit_all_force httpc in
Httpc.cleanup httpc;
on_done ();
Lwt.return ())
end in
(module M)
module Backend
(Arg : sig
val stop : bool Atomic.t
val config : Config.t
end)
() : Opentelemetry.Collector.BACKEND = struct
include (val mk_emitter ~stop:Arg.stop ~config:Arg.config ())
open Opentelemetry.Proto
open Opentelemetry.Collector
let send_trace : Trace.resource_spans list sender =
{
send =
(fun l ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send spans %a@."
(Format.pp_print_list Trace.pp_resource_spans)
l);
push_trace l;
ret ());
}
let last_sent_metrics = Atomic.make (Mtime_clock.now ())
let timeout_sent_metrics = Mtime.Span.(5 * s)
(* send metrics from time to time *)
let signal_emit_gc_metrics () =
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: emit GC metrics requested\n%!";
Atomic.set needs_gc_metrics true
let additional_metrics () : Metrics.resource_metrics list =
(* add exporter metrics to the lot? *)
let last_emit = Atomic.get last_sent_metrics in
let now = Mtime_clock.now () in
let add_own_metrics =
let elapsed = Mtime.span last_emit now in
Mtime.Span.compare elapsed timeout_sent_metrics > 0
in
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_errors);
];
];
]
) else
[]
let send_metrics : Metrics.resource_metrics list sender =
{
send =
(fun m ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send metrics %a@."
(Format.pp_print_list Metrics.pp_resource_metrics)
m);
let m = List.rev_append (additional_metrics ()) m in
push_metrics m;
ret ());
}
let send_logs : Logs.resource_logs list sender =
{
send =
(fun m ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send logs %a@."
(Format.pp_print_list Logs.pp_resource_logs)
m);
push_logs m;
ret ());
}
end
let create_backend ?(stop = Atomic.make false) ?(config = Config.make ()) () =
let module B =
Backend
(struct
let stop = stop
let config = config
end)
()
let create_exporter ?stop ?(config = Config.make ()) () =
let consumer = create_consumer ?stop ~config () in
let bq =
Bounded_queue_sync.create
~high_watermark:Bounded_queue.Defaults.high_watermark ()
in
(module B : OT.Collector.BACKEND)
Exporter_queued.create ~q:bq ~consumer ()
|> Exporter_add_batching.add_batching ~config
let create_backend = create_exporter
let setup_ ?stop ?config () : unit =
let backend = create_backend ?stop ?config () in
OT.Collector.set_backend backend;
Main_exporter.set backend;
()
let setup ?stop ?config ?(enable = true) () =
if enable then setup_ ?stop ?config ()
let remove_backend () : unit Lwt.t =
let remove_exporter () : unit Lwt.t =
let done_fut, done_u = Lwt.wait () in
OT.Collector.remove_backend ~on_done:(fun () -> Lwt.wakeup_later done_u ()) ();
Main_exporter.remove ~on_done:(fun () -> Lwt.wakeup_later done_u ()) ();
done_fut
let remove_backend = remove_exporter
let with_setup ?stop ?(config = Config.make ()) ?(enable = true) () f : _ Lwt.t
=
if enable then (
@ -504,10 +143,10 @@ let with_setup ?stop ?(config = Config.make ()) ?(enable = true) () f : _ Lwt.t
Lwt.catch
(fun () ->
let* res = f () in
let+ () = remove_backend () in
let+ () = remove_exporter () in
res)
(fun exn ->
let* () = remove_backend () in
reraise exn)
let* () = remove_exporter () in
Lwt.reraise exn)
) else
f ()

View file

@ -12,14 +12,20 @@ val set_headers : (string * string) list -> unit
module Config = Config
val create_backend :
val create_consumer :
?stop:bool Atomic.t ->
?config:Config.t ->
unit ->
(module Opentelemetry.Collector.BACKEND)
(** Create a new backend using lwt and cohttp
Opentelemetry_client.Consumer.any_resource_builder
(** Consumer that pulls from a queue *)
NOTE [after_cleanup] optional parameter removed @since 0.12 *)
val create_exporter :
?stop:bool Atomic.t -> ?config:Config.t -> unit -> Opentelemetry.Exporter.t
(** Create a new backend using lwt and ezcurl-lwt *)
val create_backend :
?stop:bool Atomic.t -> ?config:Config.t -> unit -> Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup :
?stop:bool Atomic.t -> ?config:Config.t -> ?enable:bool -> unit -> unit

View file

@ -1,7 +1,7 @@
type t = Opentelemetry_client.Config.t
type t = Opentelemetry_client.Client_config.t
module Env = Opentelemetry_client.Config.Env ()
module Env = Opentelemetry_client.Client_config.Env ()
let pp = Opentelemetry_client.Config.pp
let pp = Opentelemetry_client.Client_config.pp
let make = Env.make (fun common () -> common)

View file

@ -1,4 +1,4 @@
type t = Opentelemetry_client.Config.t
type t = Opentelemetry_client.Client_config.t
(** Configuration.
To build one, use {!make} below. This might be extended with more fields in
@ -6,7 +6,7 @@ type t = Opentelemetry_client.Config.t
val pp : Format.formatter -> t -> unit
val make : (unit -> t) Opentelemetry_client.Config.make
val make : (unit -> t) Opentelemetry_client.Client_config.make
(** Make a configuration {!t}. *)
module Env : Opentelemetry_client.Config.ENV
module Env : Opentelemetry_client.Client_config.ENV

View file

@ -7,7 +7,8 @@
(libraries
opentelemetry
opentelemetry.atomic
opentelemetry.client
opentelemetry-client
opentelemetry-client.lwt
pbrt
mtime
mtime.clock.os

View file

@ -3,110 +3,42 @@
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module OT = Opentelemetry
module Config = Config
module Signal = Opentelemetry_client.Signal
module Batch = Opentelemetry_client.Batch
open Opentelemetry
open Opentelemetry_client
open Common_
let set_headers = Config.Env.set_headers
let get_headers = Config.Env.get_headers
external reraise : exn -> 'a = "%reraise"
(** This is equivalent to [Lwt.reraise]. We inline it here so we don't force to
use Lwt's latest version *)
type error = Export_error.t
let needs_gc_metrics = Atomic.make false
open struct
module IO = Opentelemetry_client_lwt.Io_lwt
end
let last_gc_metrics = Atomic.make (Mtime_clock.now ())
let timeout_gc_metrics = Mtime.Span.(20 * s)
let gc_metrics = ref []
(* side channel for GC, appended to {!E_metrics}'s data *)
(* capture current GC metrics if {!needs_gc_metrics} is true,
or it has been a long time since the last GC metrics collection,
and push them into {!gc_metrics} for later collection *)
let sample_gc_metrics_if_needed () =
let now = Mtime_clock.now () in
let alarm = Atomic.compare_and_set needs_gc_metrics true false in
let timeout () =
let elapsed = Mtime.span now (Atomic.get last_gc_metrics) in
Mtime.Span.compare elapsed timeout_gc_metrics > 0
in
if alarm || timeout () then (
Atomic.set last_gc_metrics now;
let l =
OT.Metrics.make_resource_metrics
~attrs:(Opentelemetry.GC_metrics.get_runtime_attributes ())
@@ Opentelemetry.GC_metrics.get_metrics ()
in
gc_metrics := l :: !gc_metrics
)
type error =
[ `Status of int * Opentelemetry.Proto.Status.status
| `Failure of string
| `Sysbreak
]
let n_errors = Atomic.make 0
let n_dropped = Atomic.make 0
let report_err_ = function
| `Sysbreak -> Printf.eprintf "opentelemetry: ctrl-c captured, stopping\n%!"
| `Failure msg ->
Format.eprintf "@[<2>opentelemetry: export failed: %s@]@." msg
| `Status (code, { Opentelemetry.Proto.Status.code = scode; message; details })
->
let pp_details out l =
List.iter
(fun s -> Format.fprintf out "%S;@ " (Bytes.unsafe_to_string s))
l
in
Format.eprintf
"@[<2>opentelemetry: export failed with@ http code=%d@ status \
{@[code=%ld;@ message=%S;@ details=[@[%a@]]@]}@]@."
code scode
(Bytes.unsafe_to_string message)
pp_details details
module Httpc : sig
type t
val create : unit -> t
val send :
t ->
url:string ->
decode:[ `Dec of Pbrt.Decoder.t -> 'a | `Ret of 'a ] ->
string ->
('a, error) result Lwt.t
val cleanup : t -> unit
end = struct
open Opentelemetry.Proto
(** HTTP client *)
module Httpc : Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
open Lwt.Syntax
type t = unit
type t = Curl.t
let create () : t = ()
let create () : t = Ezcurl_core.make ()
let cleanup _self = ()
let cleanup self = Ezcurl_core.delete self
(* send the content to the remote endpoint/path *)
let send (_self : t) ~url ~decode (bod : string) : ('a, error) result Lwt.t =
(** send the content to the remote endpoint/path *)
let send (self : t) ~url ~decode (bod : string) : ('a, error) result Lwt.t =
let* r =
let headers =
("Content-Type", "application/x-protobuf")
:: ("Accept", "application/x-protobuf")
:: Config.Env.get_headers ()
in
Ezcurl_lwt.post ~headers ~params:[] ~url ~content:(`String bod) ()
Ezcurl_lwt.post ~client:self ~headers ~params:[] ~url
~content:(`String bod) ()
in
match r with
| Error (code, msg) ->
@ -136,335 +68,31 @@ end = struct
in
Lwt.return r)
| Ok { code; body; _ } ->
let dec = Pbrt.Decoder.of_string body in
let r =
try
let status = Status.decode_pb_status dec in
Error (`Status (code, status))
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
status: %S\n\
%s"
url code (Printexc.to_string e) body bt))
in
Lwt.return r
let err = Export_error.decode_invalid_http_response ~url ~code body in
Lwt.return (Error err)
end
(** An emitter. This is used by {!Backend} below to forward traces/metrics/…
from the program to whatever collector client we have. *)
module type EMITTER = sig
open Opentelemetry.Proto
module Consumer_impl =
Generic_http_consumer.Make (IO) (Opentelemetry_client_lwt.Notifier_lwt)
(Httpc)
val push_trace : Trace.resource_spans list -> unit
let create_consumer ?(stop = Atomic.make false) ?(config = Config.make ()) () =
Consumer_impl.consumer ~ticker_task:(Some 0.5) ~stop ~config ()
val push_metrics : Metrics.resource_metrics list -> unit
val push_logs : Logs.resource_logs list -> unit
val set_on_tick_callbacks : (unit -> unit) AList.t -> unit
val tick : unit -> unit
val cleanup : on_done:(unit -> unit) -> unit -> unit
end
(* make an emitter.
exceptions inside should be caught, see
https://opentelemetry.io/docs/reference/specification/error-handling/ *)
let mk_emitter ~stop ~(config : Config.t) () : (module EMITTER) =
let open Proto in
let open Lwt.Syntax in
(* local helpers *)
let open struct
let timeout =
if config.batch_timeout_ms > 0 then
Some Mtime.Span.(config.batch_timeout_ms * ms)
else
None
let batch_traces : Trace.resource_spans Batch.t =
Batch.make ?batch:config.batch_traces ?timeout ()
let batch_metrics : Metrics.resource_metrics Batch.t =
Batch.make ?batch:config.batch_metrics ?timeout ()
let batch_logs : Logs.resource_logs Batch.t =
Batch.make ?batch:config.batch_logs ?timeout ()
let on_tick_cbs_ = Atomic.make (AList.make ())
let set_on_tick_callbacks = Atomic.set on_tick_cbs_
let send_http_ (httpc : Httpc.t) ~url data : unit Lwt.t =
let* r = Httpc.send httpc ~url ~decode:(`Ret ()) data in
match r with
| Ok () -> Lwt.return ()
| Error `Sysbreak ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set stop true;
Lwt.return ()
| Error err ->
(* TODO: log error _via_ otel? *)
Atomic.incr n_errors;
report_err_ err;
(* avoid crazy error loop *)
Lwt_unix.sleep 3.
let send_metrics_http client (l : Metrics.resource_metrics list) =
Signal.Encode.metrics l |> send_http_ client ~url:config.url_metrics
let send_traces_http client (l : Trace.resource_spans list) =
Signal.Encode.traces l |> send_http_ client ~url:config.url_traces
let send_logs_http client (l : Logs.resource_logs list) =
Signal.Encode.logs l |> send_http_ client ~url:config.url_logs
(* emit metrics, if the batch is full or timeout lapsed *)
let emit_metrics_maybe ~now ?force httpc : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_metrics with
| None -> Lwt.return false
| Some l ->
let batch = !gc_metrics @ l in
gc_metrics := [];
let+ () = send_metrics_http httpc batch in
true
let emit_traces_maybe ~now ?force httpc : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_traces with
| None -> Lwt.return false
| Some l ->
let+ () = send_traces_http httpc l in
true
let emit_logs_maybe ~now ?force httpc : bool Lwt.t =
match Batch.pop_if_ready ?force ~now batch_logs with
| None -> Lwt.return false
| Some l ->
let+ () = send_logs_http httpc l in
true
let[@inline] guard_exn_ where f =
try f ()
with e ->
let bt = Printexc.get_backtrace () in
Printf.eprintf
"opentelemetry-ocurl-lwt: uncaught exception in %s: %s\n%s\n%!" where
(Printexc.to_string e) bt
let emit_all_force (httpc : Httpc.t) : unit Lwt.t =
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now ~force:true httpc
and+ (_ : bool) = emit_logs_maybe ~now ~force:true httpc
and+ (_ : bool) = emit_metrics_maybe ~now ~force:true httpc in
()
(* thread that calls [tick()] regularly, to help enforce timeouts *)
let setup_ticker_thread ~tick ~finally () =
let rec tick_thread () =
if Atomic.get stop then (
finally ();
Lwt.return ()
) else
let* () = Lwt_unix.sleep 0.5 in
let* () = tick () in
tick_thread ()
in
Lwt.async tick_thread
end in
let httpc = Httpc.create () in
let module M = struct
(* we make sure that this is thread-safe, even though we don't have a
background thread. There can still be a ticker thread, and there
can also be several user threads that produce spans and call
the emit functions. *)
let push_to_batch b e =
match Batch.push b e with
| `Ok -> ()
| `Dropped -> Atomic.incr n_dropped
let push_trace e =
let@ () = guard_exn_ "push trace" in
push_to_batch batch_traces e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_traces_maybe ~now httpc in
())
let push_metrics e =
let@ () = guard_exn_ "push metrics" in
sample_gc_metrics_if_needed ();
push_to_batch batch_metrics e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_metrics_maybe ~now httpc in
())
let push_logs e =
let@ () = guard_exn_ "push logs" in
push_to_batch batch_logs e;
let now = Mtime_clock.now () in
Lwt.async (fun () ->
let+ (_ : bool) = emit_logs_maybe ~now httpc in
())
let set_on_tick_callbacks = set_on_tick_callbacks
let tick_ () =
if Config.Env.get_debug () then
Printf.eprintf "tick (from %d)\n%!" (tid ());
sample_gc_metrics_if_needed ();
List.iter
(fun f ->
try f ()
with e ->
Printf.eprintf "on tick callback raised: %s\n"
(Printexc.to_string e))
(AList.get @@ Atomic.get on_tick_cbs_);
let now = Mtime_clock.now () in
let+ (_ : bool) = emit_traces_maybe ~now httpc
and+ (_ : bool) = emit_logs_maybe ~now httpc
and+ (_ : bool) = emit_metrics_maybe ~now httpc in
()
let () = setup_ticker_thread ~tick:tick_ ~finally:ignore ()
(* if called in a blocking context: work in the background *)
let tick () = Lwt.async tick_
let cleanup ~on_done () =
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: exiting…\n%!";
Lwt.async (fun () ->
let* () = emit_all_force httpc in
Httpc.cleanup httpc;
on_done ();
Lwt.return ())
end in
(module M)
module Backend
(Arg : sig
val stop : bool Atomic.t
val config : Config.t
end)
() : Opentelemetry.Collector.BACKEND = struct
include (val mk_emitter ~stop:Arg.stop ~config:Arg.config ())
open Opentelemetry.Proto
open Opentelemetry.Collector
let send_trace : Trace.resource_spans list sender =
{
send =
(fun l ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send spans %a@."
(Format.pp_print_list Trace.pp_resource_spans)
l);
push_trace l;
ret ());
}
let last_sent_metrics = Atomic.make (Mtime_clock.now ())
let timeout_sent_metrics = Mtime.Span.(5 * s)
(* send metrics from time to time *)
let signal_emit_gc_metrics () =
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: emit GC metrics requested\n%!";
Atomic.set needs_gc_metrics true
let additional_metrics () : Metrics.resource_metrics list =
(* add exporter metrics to the lot? *)
let last_emit = Atomic.get last_sent_metrics in
let now = Mtime_clock.now () in
let add_own_metrics =
let elapsed = Mtime.span last_emit now in
Mtime.Span.compare elapsed timeout_sent_metrics > 0
in
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_errors);
];
];
]
) else
[]
let send_metrics : Metrics.resource_metrics list sender =
{
send =
(fun m ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send metrics %a@."
(Format.pp_print_list Metrics.pp_resource_metrics)
m);
let m = List.rev_append (additional_metrics ()) m in
push_metrics m;
ret ());
}
let send_logs : Logs.resource_logs list sender =
{
send =
(fun m ~ret ->
(if Config.Env.get_debug () then
let@ () = Lock.with_lock in
Format.eprintf "send logs %a@."
(Format.pp_print_list Logs.pp_resource_logs)
m);
push_logs m;
ret ());
}
end
let create_backend ?(stop = Atomic.make false) ?(config = Config.make ()) () =
let module B =
Backend
(struct
let stop = stop
let config = config
end)
()
let create_exporter ?stop ?(config = Config.make ()) () =
let consumer = create_consumer ?stop ~config () in
let bq =
Bounded_queue_sync.create
~high_watermark:Bounded_queue.Defaults.high_watermark ()
in
(module B : OT.Collector.BACKEND)
Exporter_queued.create ~q:bq ~consumer ()
|> Exporter_add_batching.add_batching ~config
let create_backend = create_exporter
let setup_ ?stop ?config () : unit =
let backend = create_backend ?stop ?config () in
OT.Collector.set_backend backend;
let exp = create_backend ?stop ?config () in
Main_exporter.set exp;
()
let setup ?stop ?config ?(enable = true) () =
@ -472,7 +100,7 @@ let setup ?stop ?config ?(enable = true) () =
let remove_backend () : unit Lwt.t =
let done_fut, done_u = Lwt.wait () in
OT.Collector.remove_backend ~on_done:(fun () -> Lwt.wakeup_later done_u ()) ();
Main_exporter.remove ~on_done:(fun () -> Lwt.wakeup_later done_u ()) ();
done_fut
let with_setup ?stop ?(config = Config.make ()) ?(enable = true) () f : _ Lwt.t
@ -488,6 +116,6 @@ let with_setup ?stop ?(config = Config.make ()) ?(enable = true) () f : _ Lwt.t
res)
(fun exn ->
let* () = remove_backend () in
reraise exn)
Lwt.reraise exn)
) else
f ()

View file

@ -12,13 +12,21 @@ val set_headers : (string * string) list -> unit
module Config = Config
val create_backend :
val create_consumer :
?stop:bool Atomic.t ->
?config:Config.t ->
unit ->
(module Opentelemetry.Collector.BACKEND)
Opentelemetry_client.Consumer.any_resource_builder
(** Consumer that pulls from a queue *)
val create_exporter :
?stop:bool Atomic.t -> ?config:Config.t -> unit -> Opentelemetry.Exporter.t
(** Create a new backend using lwt and ezcurl-lwt *)
val create_backend :
?stop:bool Atomic.t -> ?config:Config.t -> unit -> Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup :
?stop:bool Atomic.t -> ?config:Config.t -> ?enable:bool -> unit -> unit
(** Setup endpoint. This modifies {!Opentelemetry.Collector.backend}.

View file

@ -1,24 +0,0 @@
type 'a t = {
mutable len: int;
mutable l: 'a list list;
mutable started: Mtime.t;
}
let create () = { len = 0; l = []; started = Mtime_clock.now () }
let push self l =
if l != [] then (
if self.l == [] then self.started <- Mtime_clock.now ();
self.l <- l :: self.l;
self.len <- self.len + List.length l
)
let[@inline] len self = self.len
let[@inline] time_started self = self.started
let pop_all self =
let l = self.l in
self.l <- [];
self.len <- 0;
l

View file

@ -1,14 +0,0 @@
(** List of lists with length *)
type 'a t
val create : unit -> 'a t
val push : 'a t -> 'a list -> unit
val len : _ t -> int
val time_started : _ t -> Mtime.t
(** Time at which the batch most recently became non-empty *)
val pop_all : 'a t -> 'a list list

View file

@ -1,8 +1,8 @@
module Atomic = Opentelemetry_atomic.Atomic
include Opentelemetry.Lock
module Proto = Opentelemetry_proto
let spf = Printf.sprintf
let ( let@ ) = ( @@ )
let tid () = Thread.id @@ Thread.self ()
let[@inline] tid () = Thread.id @@ Thread.self ()

View file

@ -1,3 +1,5 @@
open Opentelemetry_client
type t = {
bg_threads: int;
(** Are there background threads, and how many? Default [4]. This will be
@ -10,7 +12,7 @@ type t = {
[ticker_thread] is [true]. This will be clamped between [2 ms] and
some longer interval (maximum [60s] currently). Default 500.
@since 0.7 *)
common: Opentelemetry_client.Config.t;
common: Client_config.t;
(** Common configuration options
@since 0.12*)
}
@ -20,10 +22,9 @@ let pp out self =
Format.fprintf out
"{@[ bg_threads=%d;@ ticker_thread=%B;@ ticker_interval_ms=%d;@ common=%a \
@]}"
bg_threads ticker_thread ticker_interval_ms Opentelemetry_client.Config.pp
common
bg_threads ticker_thread ticker_interval_ms Client_config.pp common
module Env = Opentelemetry_client.Config.Env ()
module Env = Client_config.Env ()
let make =
Env.make

View file

@ -12,7 +12,7 @@ type t = {
[ticker_thread] is [true]. This will be clamped between [2 ms] and
some longer interval (maximum [60s] currently). Default 500.
@since 0.7 *)
common: Opentelemetry_client.Config.t;
common: Opentelemetry_client.Client_config.t;
(** Common configuration options
@since 0.12*)
}
@ -29,7 +29,7 @@ val make :
?ticker_interval_ms:int ->
unit ->
t)
Opentelemetry_client.Config.make
Opentelemetry_client.Client_config.make
(** Make a configuration {!t}. *)
module Env : Opentelemetry_client.Config.ENV
module Env : Opentelemetry_client.Client_config.ENV

View file

@ -4,7 +4,7 @@
(libraries
opentelemetry
opentelemetry.atomic
opentelemetry.client
opentelemetry-client
curl
pbrt
threads

View file

@ -3,472 +3,126 @@
https://github.com/open-telemetry/oteps/blob/main/text/0099-otlp-http.md
*)
module OT = Opentelemetry
module Config = Config
module Self_trace = Opentelemetry_client.Self_trace
module Signal = Opentelemetry_client.Signal
open Opentelemetry
include Common_
module OTELC = Opentelemetry_client
open Common_
module OTEL = Opentelemetry
let get_headers = Config.Env.get_headers
let set_headers = Config.Env.set_headers
let needs_gc_metrics = Atomic.make false
let n_bytes_sent : int Atomic.t = Atomic.make 0
let last_gc_metrics = Atomic.make (Mtime_clock.now ())
type error = OTELC.Export_error.t
let timeout_gc_metrics = Mtime.Span.(20 * s)
open struct
module Notifier = OTELC.Notifier_sync
(** side channel for GC, appended to metrics batch data *)
let gc_metrics = AList.make ()
module IO : OTELC.Generic_io.S_WITH_CONCURRENCY with type 'a t = 'a = struct
include OTELC.Generic_io.Direct_style
(** capture current GC metrics if {!needs_gc_metrics} is true or it has been a
long time since the last GC metrics collection, and push them into
{!gc_metrics} for later collection *)
let sample_gc_metrics_if_needed () =
let now = Mtime_clock.now () in
let alarm = Atomic.exchange needs_gc_metrics false in
let timeout () =
let elapsed = Mtime.span now (Atomic.get last_gc_metrics) in
Mtime.Span.compare elapsed timeout_gc_metrics > 0
in
if alarm || timeout () then (
Atomic.set last_gc_metrics now;
let l =
OT.Metrics.make_resource_metrics
~attrs:(Opentelemetry.GC_metrics.get_runtime_attributes ())
@@ Opentelemetry.GC_metrics.get_metrics ()
in
AList.add gc_metrics l
)
let sleep_s = Thread.delay
let n_errors = Atomic.make 0
let n_dropped = Atomic.make 0
(** Something sent to the collector *)
module Event = struct
open Opentelemetry.Proto
type t =
| E_metric of Metrics.resource_metrics list
| E_trace of Trace.resource_spans list
| E_logs of Logs.resource_logs list
| E_tick
| E_flush_all (** Flush all batches *)
let[@inline] spawn f =
ignore (OTELC.Util_thread.start_bg_thread f : Thread.t)
end
end
(** Something to be sent via HTTP *)
module To_send = struct
open Opentelemetry.Proto
module Httpc : OTELC.Generic_http_consumer.HTTPC with module IO = IO = struct
module IO = IO
type t =
| Send_metric of Metrics.resource_metrics list list
| Send_trace of Trace.resource_spans list list
| Send_logs of Logs.resource_logs list list
end
type t = Curl.t
(** start a thread in the background, running [f()] *)
let start_bg_thread (f : unit -> unit) : Thread.t =
let unix_run () =
let signals =
[
Sys.sigusr1;
Sys.sigusr2;
Sys.sigterm;
Sys.sigpipe;
Sys.sigalrm;
Sys.sigstop;
]
in
ignore (Thread.sigmask Unix.SIG_BLOCK signals : _ list);
f ()
in
(* no signals on Windows *)
let run () =
if Sys.win32 then
f ()
else
unix_run ()
in
Thread.create run ()
let create () = Ezcurl.make ()
let str_to_hex (s : string) : string =
let i_to_hex (i : int) =
if i < 10 then
Char.chr (i + Char.code '0')
else
Char.chr (i - 10 + Char.code 'a')
in
let cleanup = Ezcurl.delete
let res = Bytes.create (2 * String.length s) in
for i = 0 to String.length s - 1 do
let n = Char.code (String.get s i) in
Bytes.set res (2 * i) (i_to_hex ((n land 0xf0) lsr 4));
Bytes.set res ((2 * i) + 1) (i_to_hex (n land 0x0f))
done;
Bytes.unsafe_to_string res
module Backend_impl : sig
type t
val create : stop:bool Atomic.t -> config:Config.t -> unit -> t
val send_event : t -> Event.t -> unit
val n_bytes_sent : unit -> int
val shutdown : t -> on_done:(unit -> unit) -> unit
end = struct
open Opentelemetry.Proto
type t = {
stop: bool Atomic.t;
cleaned: bool Atomic.t; (** True when we cleaned up after closing *)
config: Config.t;
q: Event.t B_queue.t; (** Queue to receive data from the user's code *)
mutable main_th: Thread.t option; (** Thread that listens on [q] *)
send_q: To_send.t B_queue.t; (** Queue for the send worker threads *)
mutable send_threads: Thread.t array; (** Threads that send data via http *)
}
let send_http_ ~stop ~(config : Config.t) (client : Curl.t) ~url data : unit =
let@ _sc =
Self_trace.with_ ~kind:Span.Span_kind_producer "otel-ocurl.send-http"
in
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: send http POST to %s (%dB)\n%!" url
(String.length data);
let headers =
("Content-Type", "application/x-protobuf") :: config.common.headers
in
match
let@ _sc =
Self_trace.with_ ~kind:Span.Span_kind_internal "curl.post"
~attrs:[ "sz", `Int (String.length data); "url", `String url ]
let send (self : t) ~url ~decode (bod : string) : ('a, error) result =
let r =
let headers =
("Content-Type", "application/x-protobuf")
:: ("Accept", "application/x-protobuf")
:: Config.Env.get_headers ()
in
Ezcurl.post ~headers ~client ~params:[] ~url ~content:(`String data) ()
with
| Ok { code; _ } when code >= 200 && code < 300 ->
if Config.Env.get_debug () then
Printf.eprintf "opentelemetry: got response code=%d\n%!" code
| Ok { code; body; headers = _; info = _ } ->
Atomic.incr n_errors;
Self_trace.add_event _sc
@@ Opentelemetry.Event.make "error" ~attrs:[ "code", `Int code ];
if Config.Env.get_debug () then (
let dec = Pbrt.Decoder.of_string body in
let body =
try
let status = Status.decode_pb_status dec in
Format.asprintf "%a" Status.pp_status status
with _ ->
spf "(could not decode status)\nraw bytes: %s" (str_to_hex body)
in
Printf.eprintf
"opentelemetry: error while sending data to %s:\n code=%d\n %s\n%!"
url code body
);
()
| exception Sys.Break ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set stop true
Ezcurl.post ~client:self ~headers ~params:[] ~url ~content:(`String bod)
()
in
match r with
| Error (code, msg) ->
(* TODO: log error _via_ otel? *)
Atomic.incr n_errors;
Printf.eprintf
"opentelemetry: export failed:\n %s\n curl code: %s\n url: %s\n%!"
msg (Curl.strerror code) url;
(* avoid crazy error loop *)
Thread.delay 3.
let[@inline] send_event (self : t) ev : unit = B_queue.push self.q ev
let n_bytes_sent_ = Atomic.make 0
let[@inline] n_bytes_sent () = Atomic.get n_bytes_sent_
(** Thread that, in a loop, reads from [q] to get the next message to send via
http *)
let bg_thread_loop (self : t) : unit =
Ezcurl.with_client ?set_opts:None @@ fun client ->
let config = self.config in
let stop = self.stop in
let send ~name ~url ~conv signals =
let l = List.fold_left (fun acc l -> List.rev_append l acc) [] signals in
let@ _sp =
Self_trace.with_ ~kind:Span_kind_producer name
~attrs:[ "n", `Int (List.length l) ]
let err =
`Failure
(spf
"sending signals via http POST failed:\n\
\ %s\n\
\ curl code: %s\n\
\ url: %s\n\
%!"
msg (Curl.strerror code) url)
in
let msg = conv l in
ignore (Atomic.fetch_and_add n_bytes_sent_ (String.length msg) : int);
send_http_ ~stop ~config ~url client msg
in
try
while not (Atomic.get stop) do
let msg = B_queue.pop self.send_q in
match msg with
| To_send.Send_trace tr ->
send ~name:"send-traces" ~conv:Signal.Encode.traces
~url:config.common.url_traces tr
| To_send.Send_metric ms ->
send ~name:"send-metrics" ~conv:Signal.Encode.metrics
~url:config.common.url_metrics ms
| To_send.Send_logs logs ->
send ~name:"send-logs" ~conv:Signal.Encode.logs
~url:config.common.url_logs logs
done
with B_queue.Closed -> ()
type batches = {
traces: Proto.Trace.resource_spans Batch.t;
logs: Proto.Logs.resource_logs Batch.t;
metrics: Proto.Metrics.resource_metrics Batch.t;
}
let batch_max_size_ = 200
let should_send_batch_ ?(side = []) ~config ~now (b : _ Batch.t) : bool =
(Batch.len b > 0 || side != [])
&& (Batch.len b >= batch_max_size_
||
let timeout = Mtime.Span.(config.Config.common.batch_timeout_ms * ms) in
let elapsed = Mtime.span now (Batch.time_started b) in
Mtime.Span.compare elapsed timeout >= 0)
let main_thread_loop (self : t) : unit =
let local_q = Queue.create () in
let config = self.config in
(* keep track of batches *)
let batches =
{
traces = Batch.create ();
logs = Batch.create ();
metrics = Batch.create ();
}
in
let send_metrics () =
let metrics = AList.pop_all gc_metrics :: Batch.pop_all batches.metrics in
B_queue.push self.send_q (To_send.Send_metric metrics)
in
let send_logs () =
B_queue.push self.send_q (To_send.Send_logs (Batch.pop_all batches.logs))
in
let send_traces () =
B_queue.push self.send_q
(To_send.Send_trace (Batch.pop_all batches.traces))
in
try
while not (Atomic.get self.stop) do
(* read multiple events at once *)
B_queue.pop_all self.q local_q;
(* are we asked to flush all events? *)
let must_flush_all = ref false in
(* how to process a single event *)
let process_ev (ev : Event.t) : unit =
match ev with
| Event.E_metric m -> Batch.push batches.metrics m
| Event.E_trace tr -> Batch.push batches.traces tr
| Event.E_logs logs -> Batch.push batches.logs logs
| Event.E_tick ->
(* the only impact of "tick" is that it wakes us up regularly *)
()
| Event.E_flush_all -> must_flush_all := true
in
Queue.iter process_ev local_q;
Queue.clear local_q;
if !must_flush_all then (
if Batch.len batches.metrics > 0 || not (AList.is_empty gc_metrics)
then
send_metrics ();
if Batch.len batches.logs > 0 then send_logs ();
if Batch.len batches.traces > 0 then send_traces ()
) else (
let now = Mtime_clock.now () in
if
should_send_batch_ ~config ~now batches.metrics
~side:(AList.get gc_metrics)
then
send_metrics ();
if should_send_batch_ ~config ~now batches.traces then send_traces ();
if should_send_batch_ ~config ~now batches.logs then send_logs ()
)
done
with B_queue.Closed -> ()
let create ~stop ~config () : t =
let n_send_threads = max 2 config.Config.bg_threads in
let self =
{
stop;
config;
q = B_queue.create ();
send_threads = [||];
send_q = B_queue.create ();
cleaned = Atomic.make false;
main_th = None;
}
in
let main_th = start_bg_thread (fun () -> main_thread_loop self) in
self.main_th <- Some main_th;
self.send_threads <-
Array.init n_send_threads (fun _i ->
start_bg_thread (fun () -> bg_thread_loop self));
self
let shutdown self ~on_done : unit =
Atomic.set self.stop true;
if not (Atomic.exchange self.cleaned true) then (
(* empty batches *)
send_event self Event.E_flush_all;
(* close the incoming queue, wait for the thread to finish
before we start cutting off the background threads, so that they
have time to receive the final batches *)
B_queue.close self.q;
Option.iter Thread.join self.main_th;
(* close send queues, then wait for all threads *)
B_queue.close self.send_q;
Array.iter Thread.join self.send_threads
);
on_done ()
Error err
| Ok { code; body; _ } when code >= 200 && code < 300 ->
(match decode with
| `Ret x -> Ok x
| `Dec f ->
let dec = Pbrt.Decoder.of_string body in
(try Ok (f dec)
with e ->
let bt = Printexc.get_backtrace () in
Error
(`Failure
(spf "decoding failed with:\n%s\n%s" (Printexc.to_string e) bt))))
| Ok { code; body; _ } ->
let err =
OTELC.Export_error.decode_invalid_http_response ~url ~code body
in
Error err
end
let create_backend ?(stop = Atomic.make false)
?(config : Config.t = Config.make ()) () : (module Collector.BACKEND) =
let module M = struct
open Opentelemetry.Proto
open Opentelemetry.Collector
module Consumer_impl = OTELC.Generic_http_consumer.Make (IO) (Notifier) (Httpc)
let backend = Backend_impl.create ~stop ~config ()
let send_trace : Trace.resource_spans list sender =
{
send =
(fun l ~ret ->
Backend_impl.send_event backend (Event.E_trace l);
ret ());
}
let last_sent_metrics = Atomic.make (Mtime_clock.now ())
(* send metrics from time to time *)
let timeout_sent_metrics = Mtime.Span.(5 * s)
let signal_emit_gc_metrics () =
if config.common.debug then
Printf.eprintf "opentelemetry: emit GC metrics requested\n%!";
Atomic.set needs_gc_metrics true
let additional_metrics () : Metrics.resource_metrics list =
(* add exporter metrics to the lot? *)
let last_emit = Atomic.get last_sent_metrics in
let now = Mtime_clock.now () in
let add_own_metrics =
let elapsed = Mtime.span last_emit now in
Mtime.Span.compare elapsed timeout_sent_metrics > 0
in
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
let now_unix = OT.Timestamp_ns.now_unix_ns () in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int ~start_time_unix_nano:now_unix ~now:now_unix
(Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int ~start_time_unix_nano:now_unix ~now:now_unix
(Atomic.get n_errors);
];
];
]
) else
[]
let send_metrics : Metrics.resource_metrics list sender =
{
send =
(fun m ~ret ->
let m = List.rev_append (additional_metrics ()) m in
Backend_impl.send_event backend (Event.E_metric m);
ret ());
}
let send_logs : Logs.resource_logs list sender =
{
send =
(fun m ~ret ->
Backend_impl.send_event backend (Event.E_logs m);
ret ());
}
let on_tick_cbs_ = Atomic.make (AList.make ())
let set_on_tick_callbacks = Atomic.set on_tick_cbs_
let tick () =
sample_gc_metrics_if_needed ();
Backend_impl.send_event backend Event.E_tick;
List.iter (fun f -> f ()) (AList.get @@ Atomic.get on_tick_cbs_)
let cleanup ~on_done () = Backend_impl.shutdown backend ~on_done
end in
(module M)
(** thread that calls [tick()] regularly, to help enforce timeouts *)
let setup_ticker_thread ~stop ~sleep_ms (module B : Collector.BACKEND) () =
let sleep_s = float sleep_ms /. 1000. in
let tick_loop () =
try
while not @@ Atomic.get stop do
Thread.delay sleep_s;
B.tick ()
done
with B_queue.Closed -> ()
let consumer ?(stop = Atomic.make false) ?(config = Config.make ()) () :
Opentelemetry_client.Consumer.any_resource_builder =
let n_workers = max 2 (min 32 config.bg_threads) in
let ticker_task =
if config.ticker_thread then
Some (float config.ticker_interval_ms /. 1000.)
else
None
in
start_bg_thread tick_loop
Consumer_impl.consumer ~override_n_workers:n_workers ~ticker_task ~stop
~config:config.common ()
let create_exporter ?stop ?(config = Config.make ()) () : OTEL.Exporter.t =
let consumer = consumer ?stop ~config () in
let bq =
OTELC.Bounded_queue_sync.create
~high_watermark:OTELC.Bounded_queue.Defaults.high_watermark ()
in
OTELC.Exporter_queued.create ~q:bq ~consumer ()
|> OTELC.Exporter_add_batching.add_batching ~config:config.common
let create_backend = create_exporter
let setup_ ?(stop = Atomic.make false) ?(config : Config.t = Config.make ()) ()
: unit =
let backend = create_backend ~stop ~config () in
Opentelemetry.Collector.set_backend backend;
let exporter = create_exporter ~stop ~config () in
OTEL.Main_exporter.set exporter;
Self_trace.set_enabled config.common.self_trace;
OTELC.Self_trace.set_enabled config.common.self_trace;
if config.ticker_thread then (
(* at most a minute *)
let sleep_ms = min 60_000 (max 2 config.ticker_interval_ms) in
ignore (setup_ticker_thread ~stop ~sleep_ms backend () : Thread.t)
ignore
(OTELC.Util_thread.setup_ticker_thread ~stop ~sleep_ms exporter ()
: Thread.t)
)
let remove_backend () : unit =
(* we don't need the callback, this runs in the same thread *)
OT.Collector.remove_backend () ~on_done:ignore
OTEL.Main_exporter.remove () ~on_done:ignore
let setup ?stop ?config ?(enable = true) () =
if enable then setup_ ?stop ?config ()
@ -480,4 +134,4 @@ let with_setup ?stop ?config ?(enable = true) () f =
) else
f ()
let n_bytes_sent = Backend_impl.n_bytes_sent
let[@inline] n_bytes_sent () = Atomic.get n_bytes_sent

View file

@ -3,22 +3,31 @@
https://opentelemetry.io/docs/reference/specification/protocol/exporter/
*)
open Opentelemetry_atomic
val get_headers : unit -> (string * string) list
val set_headers : (string * string) list -> unit
(** Set http headers that are sent on every http query to the collector. *)
module Atomic = Opentelemetry_atomic.Atomic
module Config = Config
val n_bytes_sent : unit -> int
(** Global counter of bytes sent (or attempted to be sent) *)
val create_backend :
val consumer :
?stop:bool Atomic.t ->
?config:Config.t ->
unit ->
(module Opentelemetry.Collector.BACKEND)
Opentelemetry_client.Consumer.any_resource_builder
(** Consumer that pulls from a queue *)
val create_exporter :
?stop:bool Atomic.t -> ?config:Config.t -> unit -> Opentelemetry.Exporter.t
val create_backend :
?stop:bool Atomic.t -> ?config:Config.t -> unit -> Opentelemetry.Exporter.t
[@@deprecated "use create_exporter"]
val setup :
?stop:bool Atomic.t -> ?config:Config.t -> ?enable:bool -> unit -> unit

View file

@ -0,0 +1,33 @@
open Opentelemetry.Proto
(** A resource *)
type t =
| R_metrics of Metrics.resource_metrics list
| R_spans of Trace.resource_spans list
| R_logs of Logs.resource_logs list
open struct
let of_x_or_empty ?service_name ?attrs ~f l =
if l = [] then
[]
else
[ f ?service_name ?attrs l ]
end
let of_logs ?service_name ?attrs logs : t =
R_logs [ Util_resources.make_resource_logs ?service_name ?attrs logs ]
let of_logs_or_empty ?service_name ?attrs logs =
of_x_or_empty ?service_name ?attrs ~f:of_logs logs
let of_spans ?service_name ?attrs spans : t =
R_spans [ Util_resources.make_resource_spans ?service_name ?attrs spans ]
let of_spans_or_empty ?service_name ?attrs spans =
of_x_or_empty ?service_name ?attrs ~f:of_spans spans
let of_metrics ?service_name ?attrs m : t =
R_metrics [ Util_resources.make_resource_metrics ?service_name ?attrs m ]
let of_metrics_or_empty ?service_name ?attrs ms =
of_x_or_empty ?service_name ?attrs ~f:of_metrics ms

View file

@ -1,14 +1,19 @@
open Opentelemetry_util
module Otel = Opentelemetry
module A = Opentelemetry_atomic.Atomic
module Domain = Opentelemetry_domain
type 'a state = {
start: Mtime.t;
size: int;
q: 'a list; (** The queue is a FIFO represented as a list in reverse order *)
}
type 'a t = {
mutable size: int;
mutable q: 'a list;
(** The queue is a FIFO represented as a list in reverse order *)
st: 'a state A.t;
batch: int; (** Minimum size to batch before popping *)
high_watermark: int; (** Size above which we start dropping signals *)
timeout: Mtime.span option;
mutable start: Mtime.t;
mutex: Mutex.t;
}
let default_high_watermark batch_size =
@ -17,6 +22,10 @@ let default_high_watermark batch_size =
else
batch_size * 10
let _dummy_start = Mtime.min_stamp
let _empty_state : _ state = { q = []; size = 0; start = _dummy_start }
let make ?(batch = 1) ?high_watermark ?now ?timeout () : _ t =
let high_watermark =
match high_watermark with
@ -26,36 +35,58 @@ let make ?(batch = 1) ?high_watermark ?now ?timeout () : _ t =
let start =
match now with
| Some x -> x
| None -> Mtime_clock.now ()
| None -> _dummy_start
in
let mutex = Mutex.create () in
assert (batch > 0);
{ size = 0; q = []; start; batch; timeout; high_watermark; mutex }
{ st = A.make { size = 0; q = []; start }; batch; timeout; high_watermark }
let timeout_expired_ ~now self : bool =
match self.timeout with
let timeout_expired_ ~now ~timeout (self : _ state) : bool =
match timeout with
| Some t ->
let elapsed = Mtime.span now self.start in
Mtime.Span.compare elapsed t >= 0
| None -> false
(* Big enough to send a batch *)
let is_full_ self : bool = self.size >= self.batch
let[@inline] is_full_ ~batch (self : _ state) : bool = self.size >= batch
let ready_to_pop ~force ~now self =
self.size > 0 && (force || is_full_ self || timeout_expired_ ~now self)
let[@inline] atomic_update_loop_ (type res) (self : _ t)
(f : 'a state -> 'a state * res) : res =
let exception Return of res in
try
let backoff = ref 1 in
while true do
let st = A.get self.st in
let new_st, res = f st in
if A.compare_and_set self.st st new_st then raise_notrace (Return res);
(* poor man's backoff strategy *)
Domain.relax_loop !backoff;
backoff := min 128 (2 * !backoff)
done
with Return res -> res
let pop_if_ready ?(force = false) ~now (self : _ t) : _ list option =
let rev_batch_opt =
Otel.Util_mutex.protect self.mutex @@ fun () ->
if ready_to_pop ~force ~now self then (
assert (self.q <> []);
let batch = self.q in
self.q <- [];
self.size <- 0;
Some batch
(* update state. When uncontended this runs only once. *)
atomic_update_loop_ self @@ fun state ->
(* *)
(* check if the batch is ready *)
let ready_to_pop =
state.size > 0
&& (force
|| is_full_ ~batch:self.batch state
|| timeout_expired_ ~now ~timeout:self.timeout state)
in
if ready_to_pop then (
assert (state.q <> []);
let batch = state.q in
let new_st = _empty_state in
new_st, Some batch
) else
None
state, None
in
match rev_batch_opt with
| None -> None
@ -63,25 +94,78 @@ let pop_if_ready ?(force = false) ~now (self : _ t) : _ list option =
(* Reverse the list to retrieve the FIFO order. *)
Some (List.rev batch)
let rec push_unprotected (self : _ t) ~(elems : _ list) : unit =
match elems with
| [] -> ()
| x :: xs ->
self.q <- x :: self.q;
self.size <- 1 + self.size;
push_unprotected self ~elems:xs
let push (self : _ t) elems : [ `Dropped | `Ok ] =
Otel.Util_mutex.protect self.mutex @@ fun () ->
if self.size >= self.high_watermark then
(* drop this to prevent queue from growing too fast *)
`Dropped
if elems = [] then
`Ok `Ok
else (
if self.size = 0 && Option.is_some self.timeout then
(* current batch starts now *)
self.start <- Mtime_clock.now ();
let now = lazy (Mtime_clock.now ()) in
atomic_update_loop_ self @@ fun state ->
if state.size >= self.high_watermark then
(* drop this to prevent queue from growing too fast *)
state, `Dropped
else (
let start =
if state.size = 0 && Option.is_some self.timeout then
Lazy.force now
else
state.start
in
(* add to queue *)
push_unprotected self ~elems;
`Ok
(* add to queue *)
let state =
{
size = state.size + List.length elems;
q = List.rev_append elems state.q;
start;
}
in
state, `Ok
)
)
let[@inline] push' self elems = ignore (push self elems : [ `Dropped | `Ok ])
open Opentelemetry_emitter
let wrap_emitter (self : _ t) (e : _ Emitter.t) : _ Emitter.t =
let enabled () = e.enabled () in
let closed () = e.closed () in
let flush_and_close () =
(* FIXME: we need to close the batch first, to prevent
further pushes; then write the content to [e]; then
flusn and close [e]. In this order. *)
(match pop_if_ready self ~force:true ~now:Mtime.max_stamp with
| None -> ()
| Some l -> Emitter.emit e l);
Emitter.flush_and_close e
in
let maybe_emit ~now =
match pop_if_ready self ~force:false ~now with
| None -> ()
| Some l -> Emitter.emit e l
in
let tick ~now =
(* first, check if batch has timed out *)
maybe_emit ~now;
(* only then, tick the underlying emitter *)
Emitter.tick e ~now
in
let emit l =
if l <> [] && e.enabled () then (
push' self l;
(* TODO: it'd be nice if we checked only for size here, not
for timeout. The [tick] function is enough for timeouts,
whereas [emit] is in the hot path of every single span/metric/log *)
let now = Mtime_clock.now () in
maybe_emit ~now
)
in
{ Emitter.closed; enabled; flush_and_close; tick; emit }

View file

@ -50,3 +50,12 @@ val push : 'a t -> 'a list -> [ `Dropped | `Ok ]
(** [push b xs] is [`Ok] if it succeeds in pushing the values in [xs] into the
batch [b], or [`Dropped] if the current size of the batch has exceeded the
high water mark determined by the [batch] argument to [{!make}]. ) *)
val push' : 'a t -> 'a list -> unit
(** Like {!push} but ignores the result *)
open Opentelemetry_emitter
val wrap_emitter : 'a t -> 'a Emitter.t -> 'a Emitter.t
(** [wrap_emitter batch e] is an emitter that uses batch [batch] to gather
signals into larger lists before passing them to [e]. *)

View file

@ -0,0 +1,67 @@
(** Interface for a thread-safe, bounded queue.
After the high watermark is reached, pushing items into the queue will
instead discard them. *)
exception Closed
(** Raised when pushing into a closed queue *)
type 'a pop_result =
[ `Empty
| `Closed
| `Item of 'a
]
type 'a t = {
push: 'a list -> unit;
(** Push items. This might discard some of them.
@raise Closed if the queue is closed. *)
num_discarded: unit -> int; (** How many items were discarded? *)
on_non_empty: (unit -> unit) -> unit;
(** [on_non_empty f] registers [f] to be called whenever the queue
transitions from empty to non-empty. *)
try_pop: unit -> 'a pop_result; (** Try to pop an item right now. *)
close: unit -> unit;
(** Close the queue. Items currently in the queue will still be accessible
to consumers until the queue is emptied out. Idempotent. *)
closed: unit -> bool;
(** Is the queue closed {b for writing}. Consumers should only use
[try_pop] because a queue that's closed-for-writing might still
contain straggler items that need to be consumed.
This should be as fast and cheap as possible. *)
}
(** A bounded queue, with multiple producers and potentially multiple consumers.
All functions must be thread-safe except for [try_pop] which might not have
to be depending on the context (e.g. a Lwt-specific queue implementation
will consume only from the Lwt thread). *)
let[@inline] push (self : _ t) x : unit = self.push x
let[@inline] num_discarded self = self.num_discarded ()
let[@inline] try_pop (self : _ t) : _ pop_result = self.try_pop ()
let[@inline] on_non_empty (self : _ t) f = self.on_non_empty f
let[@inline] close (self : _ t) : unit = self.close ()
let[@inline] closed (self : _ t) : bool = self.closed ()
(** Turn the writing end of the queue into an emitter. *)
let to_emitter (self : 'a t) : 'a Opentelemetry_emitter.Emitter.t =
let closed () = self.closed () in
let enabled () = not (closed ()) in
let emit x = if x <> [] then push self x in
let tick ~now:_ = () in
(* NOTE: we cannot actually flush, only close. Emptying the queue is
fundamentally asynchronous because it's done by consumers *)
let flush_and_close () = close self in
{ closed; enabled; emit; tick; flush_and_close }
module Defaults = struct
(** The default high watermark *)
let high_watermark : int = 2048
end

View file

@ -0,0 +1,116 @@
module BQ = Bounded_queue
exception Closed = Bounded_queue.Closed
(* a variant of {!Sync_queue} with more bespoke pushing behavior *)
module Q : sig
type 'a t
val create : unit -> 'a t
val close : _ t -> unit
val closed : _ t -> bool
val try_pop : 'a t -> 'a option
val push_while_not_full : high_watermark:int -> 'a t -> 'a list -> int * int
(** [push_while_not_full q ~high_watermark xs] tries to push each item of [x]
into [q].
An item is not pushed if the queue is "full" (size >= high_watermark).
This returns a pair [num_discarded, old_size] where [num_discarded] is the
number of items that could not be pushed, and [old_size] is the size
before anything was pushed. *)
end = struct
module UM = Opentelemetry_util.Util_mutex
type 'a t = {
mutex: Mutex.t;
q: 'a Queue.t;
mutable closed: bool;
}
let create () : _ t =
{ mutex = Mutex.create (); q = Queue.create (); closed = false }
(* NOTE: the race condition here is benign, assuming no tearing of
a value of type [bool] which OCaml's memory model should guarantee. *)
let[@inline] closed self = self.closed
let close (self : _ t) =
UM.protect self.mutex @@ fun () ->
if not self.closed then self.closed <- true
let try_pop (self : 'a t) : 'a option =
UM.protect self.mutex @@ fun () ->
if self.closed then raise Closed;
try Some (Queue.pop self.q) with Queue.Empty -> None
let push_while_not_full ~high_watermark (self : 'a t) (xs : 'a list) :
int * int =
UM.protect self.mutex @@ fun () ->
if self.closed then raise Closed;
let old_size = Queue.length self.q in
let xs = ref xs in
let continue = ref true in
while !continue && Queue.length self.q < high_watermark do
match !xs with
| [] -> continue := false
| x :: tl_xs ->
xs := tl_xs;
Queue.push x self.q
done;
let n_discarded = List.length !xs in
n_discarded, old_size
end
type 'a state = {
n_discarded: int Atomic.t;
high_watermark: int;
q: 'a Q.t;
on_non_empty: Cb_set.t;
}
let push (self : _ state) x =
let discarded, old_size =
try Q.push_while_not_full self.q ~high_watermark:self.high_watermark x
with Sync_queue.Closed -> raise BQ.Closed
in
if discarded > 0 then
ignore (Atomic.fetch_and_add self.n_discarded discarded : int);
(* wake up lagards if the queue was empty *)
if old_size = 0 then Cb_set.trigger self.on_non_empty;
()
let try_pop (self : _ state) : _ BQ.pop_result =
match Q.try_pop self.q with
| Some x -> `Item x
| None -> `Empty
| exception Sync_queue.Closed -> `Closed
let to_bounded_queue (self : 'a state) : 'a BQ.t =
let closed () = Q.closed self.q in
let num_discarded () = Atomic.get self.n_discarded in
let push x = push self x in
let on_non_empty = Cb_set.register self.on_non_empty in
let try_pop () = try_pop self in
let close () = Q.close self.q in
{ BQ.push; num_discarded; try_pop; on_non_empty; close; closed }
let create ~high_watermark () : _ BQ.t =
let st =
{
high_watermark;
q = Q.create ();
n_discarded = Atomic.make 0;
on_non_empty = Cb_set.create ();
}
in
to_bounded_queue st

View file

@ -0,0 +1,7 @@
(** Bounded queue based on simple synchronization primitives.
This is not the fastest queue but it should be versatile. *)
val create : high_watermark:int -> unit -> 'a Bounded_queue.t
(** [create ~high_watermark ()] creates a new bounded queue based on
{!Sync_queue} *)

View file

@ -1,8 +0,0 @@
(** Utilities for writing clients
These are used for implementing e.g., the [opentelemetry-client-cohttp-lwt]
and [opentelemetry-client-ocurl] packages package. *)
module Config = Config
module Signal = Signal
module Self_trace = Self_trace

View file

@ -9,12 +9,19 @@ type t = {
batch_logs: int option;
batch_timeout_ms: int;
self_trace: bool;
http_concurrency_level: int option;
}
let pp out (self : t) : unit =
let ppiopt = Format.pp_print_option Format.pp_print_int in
let ppiopt out i =
match i with
| None -> Format.fprintf out "None"
| Some i -> Format.fprintf out "%d" i
in
let pp_header ppf (a, b) = Format.fprintf ppf "@[%s: @,%s@]@." a b in
let ppheaders = Format.pp_print_list pp_header in
let ppheaders out l =
Format.fprintf out "[@[%a@]]" (Format.pp_print_list pp_header) l
in
let {
debug;
self_trace;
@ -26,15 +33,17 @@ let pp out (self : t) : unit =
batch_metrics;
batch_logs;
batch_timeout_ms;
http_concurrency_level;
} =
self
in
Format.fprintf out
"{@[ debug=%B;@ self_trace=%B; url_traces=%S;@ url_metrics=%S;@ \
url_logs=%S;@ headers=%a;@ batch_traces=%a;@ batch_metrics=%a;@ \
batch_logs=%a;@ batch_timeout_ms=%d @]}"
batch_logs=%a;@ batch_timeout_ms=%d;@ http_concurrency_level=%a @]}"
debug self_trace url_traces url_metrics url_logs ppheaders headers ppiopt
batch_traces ppiopt batch_metrics ppiopt batch_logs batch_timeout_ms
batch_traces ppiopt batch_metrics ppiopt batch_logs batch_timeout_ms ppiopt
http_concurrency_level
let default_url = "http://localhost:4318"
@ -50,6 +59,7 @@ type 'k make =
?headers:(string * string) list ->
?batch_timeout_ms:int ->
?self_trace:bool ->
?http_concurrency_level:int ->
'k
module type ENV = sig
@ -123,7 +133,8 @@ module Env () : ENV = struct
let make k ?(debug = get_debug ()) ?url ?url_traces ?url_metrics ?url_logs
?(batch_traces = Some 400) ?(batch_metrics = Some 20)
?(batch_logs = Some 400) ?(headers = get_headers ())
?(batch_timeout_ms = 2_000) ?(self_trace = false) =
?(batch_timeout_ms = 2_000) ?(self_trace = false) ?http_concurrency_level
=
(* Ensure the state is synced, in case these values are passed in explicitly *)
set_debug debug;
set_headers headers;
@ -165,5 +176,6 @@ module Env () : ENV = struct
batch_logs;
batch_timeout_ms;
self_trace;
http_concurrency_level;
}
end

View file

@ -19,8 +19,8 @@ type t = private {
(** Batch metrics? If [Some i], then this produces batches of (at most)
[i] items. If [None], there is no batching.
Note that traces and metrics are batched separately. Default [None].
*)
Note that traces and metrics are batched separately. Default
[Some 20]. *)
batch_logs: int option;
(** Batch logs? See {!batch_metrics} for details. Default [Some 400] *)
batch_timeout_ms: int;
@ -32,6 +32,9 @@ type t = private {
(** If true, the OTEL library will also emit its own spans. Default
[false].
@since 0.7 *)
http_concurrency_level: int option;
(** How many HTTP requests can be done simultaneously (at most)?
@since NEXT_RELEASE *)
}
(** Configuration.
@ -55,6 +58,7 @@ type 'k make =
?headers:(string * string) list ->
?batch_timeout_ms:int ->
?self_trace:bool ->
?http_concurrency_level:int ->
'k
(** A function that gathers all the values needed to construct a {!t}, and
produces a ['k]. ['k] is typically a continuation used to construct a

6
src/client/common_.ml Normal file
View file

@ -0,0 +1,6 @@
module OTEL = Opentelemetry
module Proto = Opentelemetry_proto
let spf = Printf.sprintf
let ( let@ ) = ( @@ )

30
src/client/consumer.ml Normal file
View file

@ -0,0 +1,30 @@
(** Consumer that accepts items from a bounded queue *)
type 'a t = {
active: unit -> bool; (** Still running? Must be fast and thread-safe *)
tick: unit -> unit;
(** Regularly called, eg to emit metrics, check timeouts, etc. Must be
thread safe. *)
shutdown: on_done:(unit -> unit) -> unit;
(** Shutdown the consumer as soon as possible, call [on_done()] once it's
done. *)
}
(** A consumer for signals of type ['a] *)
type 'a consumer = 'a t
let[@inline] active (self : _ t) = self.active ()
let[@inline] shutdown (self : _ t) ~on_done = self.shutdown ~on_done
module Builder = struct
type 'a t = { start_consuming: 'a Bounded_queue.t -> 'a consumer }
(** A builder that will create a consumer for a given queue, start the
consumer so it starts consuming from the queue, and return the consumer.
*)
let start_consuming (self : _ t) bq = self.start_consuming bq
end
type any_resource_builder = Any_resource.t Builder.t
(** The type that's useful for OTEL backends *)

View file

@ -1,5 +1,16 @@
(library
(name opentelemetry_client)
(public_name opentelemetry.client)
(libraries opentelemetry pbrt mtime mtime.clock.os)
(synopsis "Common types and logic shared between client implementations"))
(public_name opentelemetry-client)
(flags :standard -open Opentelemetry_util)
(libraries
opentelemetry
opentelemetry.util
opentelemetry.emitter
opentelemetry.proto
opentelemetry.domain
pbrt
saturn
mtime
mtime.clock.os)
(synopsis
"Basic exporters, as well as common types and logic shared between exporters"))

View file

@ -0,0 +1,48 @@
type t =
[ `Status of int * Opentelemetry.Proto.Status.status
| `Failure of string
| `Sysbreak
]
let str_to_hex (s : string) : string =
Opentelemetry_util.Util_bytes_.bytes_to_hex (Bytes.unsafe_of_string s)
(** Report the error on stderr. *)
let report_err : t -> unit = function
| `Sysbreak -> Printf.eprintf "opentelemetry: ctrl-c captured, stopping\n%!"
| `Failure msg ->
Format.eprintf "@[<2>opentelemetry: export failed: %s@]@." msg
| `Status
( code,
{
Opentelemetry.Proto.Status.code = scode;
message;
details;
_presence = _;
} ) ->
let pp_details out l =
List.iter
(fun s -> Format.fprintf out "%S;@ " (Bytes.unsafe_to_string s))
l
in
Format.eprintf
"@[<2>opentelemetry: export failed with@ http code=%d@ status \
{@[code=%ld;@ message=%S;@ details=[@[%a@]]@]}@]@."
code scode
(Bytes.unsafe_to_string message)
pp_details details
let decode_invalid_http_response ~code ~url (body : string) : t =
try
let dec = Pbrt.Decoder.of_string body in
let status = Opentelemetry.Proto.Status.decode_pb_status dec in
`Status (code, status)
with e ->
let bt = Printexc.get_backtrace () in
`Failure
(Printf.sprintf
"httpc: decoding of status (url=%S, code=%d) failed with:\n\
%s\n\
HTTP body: %s\n\
%s"
url code (Printexc.to_string e) (str_to_hex body) bt)

View file

@ -0,0 +1,25 @@
(** Add batching to emitter based on client config *)
open Common_
open struct
let add_batch ~timeout batch (emitter : 'a OTEL.Emitter.t) : 'a OTEL.Emitter.t
=
let b = Batch.make ~batch ~timeout () in
Batch.wrap_emitter b emitter
end
let add_batching ~(config : Client_config.t) (exp : OTEL.Exporter.t) :
OTEL.Exporter.t =
let timeout = Mtime.Span.(config.batch_timeout_ms * ms) in
let add_batch_opt (b : int option) e =
match b with
| None -> e
| Some b -> add_batch ~timeout b e
in
let emit_spans = add_batch_opt config.batch_traces exp.emit_spans in
let emit_metrics = add_batch_opt config.batch_metrics exp.emit_metrics in
let emit_logs = add_batch_opt config.batch_logs exp.emit_logs in
{ exp with emit_spans; emit_metrics; emit_logs }

View file

@ -0,0 +1,32 @@
open Common_
open Opentelemetry_emitter
(** [debug exporter] behaves like [exporter], but will print signals on [stderr]
before passing them to [exporter] *)
let debug ?(out = Format.err_formatter) (exp : OTEL.Exporter.t) :
OTEL.Exporter.t =
let open Proto in
{
emit_spans =
Emitter.tap
(fun sp -> Format.fprintf out "SPAN: %a@." Trace.pp_span sp)
exp.emit_spans;
emit_logs =
Emitter.tap
(fun log -> Format.fprintf out "LOG: %a@." Proto.Logs.pp_log_record log)
exp.emit_logs;
emit_metrics =
Emitter.tap
(fun m -> Format.fprintf out "METRIC: %a@." Metrics.pp_metric m)
exp.emit_metrics;
on_tick = exp.on_tick;
tick = exp.tick;
cleanup =
(fun ~on_done () ->
Format.fprintf out "CLEANUP@.";
exp.cleanup ~on_done ());
}
(** Exporter that simply debugs on [stderr] *)
let debug_only : OTEL.Exporter.t =
debug ~out:Format.err_formatter @@ OTEL.Exporter.dummy ()

View file

@ -0,0 +1,57 @@
(** Build an exporter from a queue and a consumer *)
open Common_
module BQ = Bounded_queue
module BQ_emitters = struct
let logs_emitter_of_bq ?service_name ?attrs
(q : Any_resource.t Bounded_queue.t) : OTEL.Logger.t =
Bounded_queue.to_emitter q
|> Opentelemetry_emitter.Emitter.flat_map
(Any_resource.of_logs_or_empty ?service_name ?attrs)
let spans_emitter_of_bq ?service_name ?attrs
(q : Any_resource.t Bounded_queue.t) : OTEL.Tracer.t =
Bounded_queue.to_emitter q
|> Opentelemetry_emitter.Emitter.flat_map
(Any_resource.of_spans_or_empty ?service_name ?attrs)
let metrics_emitter_of_bq ?service_name ?attrs
(q : Any_resource.t Bounded_queue.t) : OTEL.Metrics_emitter.t =
Bounded_queue.to_emitter q
|> Opentelemetry_emitter.Emitter.flat_map
(Any_resource.of_metrics_or_empty ?service_name ?attrs)
end
(** Pair a queue with a consumer to build an exporter.
The resulting exporter will emit logs, spans, and traces directly into the
bounded queue; while the consumer takes them from the queue to forward them
somewhere else, store them, etc.
@param resource_attributes attributes added to every "resource" batch *)
let create ?(resource_attributes = []) ~(q : Any_resource.t Bounded_queue.t)
~(consumer : Consumer.any_resource_builder) () : OTEL.Exporter.t =
let emit_spans =
BQ_emitters.spans_emitter_of_bq ~attrs:resource_attributes q
in
let emit_logs = BQ_emitters.logs_emitter_of_bq ~attrs:resource_attributes q in
let emit_metrics =
BQ_emitters.metrics_emitter_of_bq ~attrs:resource_attributes q
in
let tick_set = Cb_set.create () in
let tick () = Cb_set.trigger tick_set in
let on_tick f = Cb_set.register tick_set f in
let closed = Atomic.make false in
let consumer = consumer.start_consuming q in
let cleanup ~on_done () =
if not (Atomic.exchange closed true) then (
Bounded_queue.close q;
Consumer.shutdown consumer ~on_done
) else
on_done ()
in
{ emit_logs; emit_metrics; emit_spans; tick; on_tick; cleanup }

View file

@ -0,0 +1,65 @@
(** A simple exporter that prints on stdout *)
open Common_
open Opentelemetry_util
open Opentelemetry_emitter
open struct
let pp_span out (sp : OTEL.Span.t) =
let open OTEL in
Format.fprintf out
"@[<2>SPAN@ trace_id: %a@ span_id: %a@ name: %S@ start: %a@ end: %a@]@."
Trace_id.pp
(Trace_id.of_bytes sp.trace_id)
Span_id.pp
(Span_id.of_bytes sp.span_id)
sp.name Timestamp_ns.pp_debug sp.start_time_unix_nano
Timestamp_ns.pp_debug sp.end_time_unix_nano
let pp_vlist mutex pp out l =
if l != [] then (
let@ () = Util_mutex.protect mutex in
Format.fprintf out "@[<v>";
List.iteri
(fun i x ->
if i > 0 then Format.fprintf out "@,";
pp out x)
l;
Format.fprintf out "@]@."
)
end
let stdout : OTEL.Exporter.t =
let open Opentelemetry_util in
let out = Format.std_formatter in
let mutex = Mutex.create () in
let ticker = Cb_set.create () in
let closed = Atomic.make false in
let tick () = Cb_set.trigger ticker in
let mk_emitter pp_signal =
let emit l =
if Atomic.get closed then raise Emitter.Closed;
pp_vlist mutex pp_signal out l
in
let enabled () = not (Atomic.get closed) in
let tick ~now:_ = () in
let flush_and_close () =
if not (Atomic.exchange closed true) then
let@ () = Util_mutex.protect mutex in
Format.pp_print_flush out ()
in
let closed () = Atomic.get closed in
{ Emitter.emit; closed; enabled; tick; flush_and_close }
in
{
emit_spans = mk_emitter pp_span;
emit_logs = mk_emitter Proto.Logs.pp_log_record;
emit_metrics = mk_emitter Proto.Metrics.pp_metric;
on_tick = Cb_set.register ticker;
tick;
cleanup = (fun ~on_done () -> on_done ());
}

View file

@ -0,0 +1,234 @@
type error = Export_error.t
(* TODO: emit this in a metric in [tick()] if self tracing is enabled? *)
(** Number of errors met during export *)
let n_errors = Atomic.make 0
(* TODO: put this somewhere with an interval limiter to 30s
(* there is a possible race condition here, as several threads might update
metrics at the same time. But that's harmless. *)
if add_own_metrics then (
Atomic.set last_sent_metrics now;
let open OT.Metrics in
[
make_resource_metrics
[
sum ~name:"otel.export.dropped" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_dropped);
];
sum ~name:"otel.export.errors" ~is_monotonic:true
[
int
~start_time_unix_nano:(Mtime.to_uint64_ns last_emit)
~now:(Mtime.to_uint64_ns now) (Atomic.get n_errors);
];
];
]
) else
[]
*)
module type IO = Generic_io.S_WITH_CONCURRENCY
module type HTTPC = sig
module IO : IO
type t
val create : unit -> t
val send :
t ->
url:string ->
decode:[ `Dec of Pbrt.Decoder.t -> 'a | `Ret of 'a ] ->
string ->
('a, error) result IO.t
val cleanup : t -> unit
end
module Make
(IO : IO)
(Notifier : Generic_notifier.S with type 'a IO.t = 'a IO.t)
(Httpc : HTTPC with type 'a IO.t = 'a IO.t) : sig
val consumer :
?override_n_workers:int ->
ticker_task:float option ->
stop:bool Atomic.t ->
config:Client_config.t ->
unit ->
Consumer.any_resource_builder
(** Create a consumer.
@param stop
shared stop variable, set to true to stop this (and maybe other tasks)
@param ticker_task
controls whether we start a task to call [tick] at the given interval in
seconds, or [None] to not start such a task at all. *)
end = struct
module Proto = Opentelemetry_proto
open IO
type other_config = {
override_n_workers: int option;
ticker_task: float option;
}
type state = {
stop: bool Atomic.t;
cleaned: bool Atomic.t; (** True when we cleaned up after closing *)
config: Client_config.t;
other_config: other_config;
q: Any_resource.t Bounded_queue.t;
notify: Notifier.t;
}
let shutdown self =
Atomic.set self.stop true;
if not (Atomic.exchange self.cleaned true) then (
Notifier.trigger self.notify;
Notifier.delete self.notify
)
let send_http_ (self : state) (httpc : Httpc.t) ~backoff ~url (data : string)
: unit IO.t =
let* r = Httpc.send httpc ~url ~decode:(`Ret ()) data in
match r with
| Ok () ->
Util_backoff.on_success backoff;
IO.return ()
| Error `Sysbreak ->
Printf.eprintf "ctrl-c captured, stopping\n%!";
Atomic.set self.stop true;
IO.return ()
| Error err ->
Atomic.incr n_errors;
Export_error.report_err err;
(* avoid crazy error loop *)
let dur_s = Util_backoff.cur_duration_s backoff in
Util_backoff.on_error backoff;
IO.sleep_s (dur_s +. Random.float (dur_s /. 10.))
let send_metrics_http (st : state) client ~encoder ~backoff
(l : Proto.Metrics.resource_metrics list) =
let msg = Signal.Encode.metrics ~encoder l in
send_http_ st client msg ~backoff ~url:st.config.url_metrics
let send_traces_http st client ~encoder ~backoff
(l : Proto.Trace.resource_spans list) =
let msg = Signal.Encode.traces ~encoder l in
send_http_ st client msg ~backoff ~url:st.config.url_traces
let send_logs_http st client ~encoder ~backoff
(l : Proto.Logs.resource_logs list) =
let msg = Signal.Encode.logs ~encoder l in
send_http_ st client msg ~backoff ~url:st.config.url_logs
let tick (self : state) = Notifier.trigger self.notify
let start_worker (self : state) : unit =
let client = Httpc.create () in
let encoder = Pbrt.Encoder.create () in
let backoff = Util_backoff.create () in
(* loop on [q] *)
let rec loop () : unit IO.t =
if Atomic.get self.stop then
IO.return ()
else
let* () =
match Bounded_queue.try_pop self.q with
| `Closed ->
shutdown self;
IO.return ()
| `Empty -> Notifier.wait self.notify
| `Item (R_logs logs) ->
send_logs_http self client ~encoder ~backoff logs
| `Item (R_metrics ms) ->
send_metrics_http self client ~encoder ~backoff ms
| `Item (R_spans spans) ->
send_traces_http self client ~encoder ~backoff spans
in
loop ()
in
IO.spawn (fun () ->
IO.protect loop ~finally:(fun () ->
Httpc.cleanup client;
IO.return ()))
let start_ticker (self : state) ~(interval_s : float) : unit =
let rec loop () : unit IO.t =
if Atomic.get self.stop then
IO.return ()
else
let* () = IO.sleep_s interval_s in
tick self;
loop ()
in
IO.spawn loop
let default_n_workers = 50
let create_state ?override_n_workers ~ticker_task ~stop ~config ~q () : state
=
let other_config = { override_n_workers; ticker_task } in
let self =
{
stop;
config;
other_config;
q;
cleaned = Atomic.make false;
notify = Notifier.create ();
}
in
(* start workers *)
let n_workers =
min 2
(max 500
(match
( self.other_config.override_n_workers,
self.config.http_concurrency_level )
with
| Some n, _ -> n
| None, Some n -> n
| None, None -> default_n_workers))
in
for _i = 1 to n_workers do
start_worker self
done;
(* start ticker *)
(match self.other_config.ticker_task with
| None -> ()
| Some interval_s -> start_ticker self ~interval_s);
self
let to_consumer (self : state) : Any_resource.t Consumer.t =
let active () = not (Atomic.get self.stop) in
let shutdown ~on_done =
shutdown self;
on_done ()
in
let tick () = tick self in
{ active; tick; shutdown }
let consumer ?override_n_workers ~ticker_task ~stop ~config () :
Consumer.any_resource_builder =
{
start_consuming =
(fun q ->
let st =
create_state ?override_n_workers ~ticker_task ~stop ~config ~q ()
in
to_consumer st);
}
end

28
src/client/generic_io.ml Normal file
View file

@ -0,0 +1,28 @@
(** Generic IO *)
module type S = sig
type 'a t
val return : 'a -> 'a t
val ( let* ) : 'a t -> ('a -> 'b t) -> 'b t
val protect : finally:(unit -> unit t) -> (unit -> 'a t) -> 'a t
end
module type S_WITH_CONCURRENCY = sig
include S
val sleep_s : float -> unit t
val spawn : (unit -> unit t) -> unit
end
module Direct_style : S with type 'a t = 'a = struct
type 'a t = 'a
let[@inline] return x = x
let[@inline] ( let* ) x f = f x
let protect = Fun.protect
end

View file

@ -0,0 +1,17 @@
module type IO = Generic_io.S
module type S = sig
module IO : IO
type t
val create : unit -> t
val delete : t -> unit
val trigger : t -> unit
val wait : t -> unit IO.t
val register_bounded_queue : t -> _ Bounded_queue.t -> unit
end

View file

@ -0,0 +1 @@
module OTEL = Opentelemetry

21
src/client/lwt/dune Normal file
View file

@ -0,0 +1,21 @@
(library
(name opentelemetry_client_lwt)
(public_name opentelemetry-client.lwt)
(flags
:standard
-open
Opentelemetry_util
-open
Opentelemetry_client
-open
Opentelemetry_atomic)
(optional) ; lwt
(libraries
opentelemetry.core
opentelemetry.util
opentelemetry.atomic
opentelemetry.emitter
opentelemetry-client
lwt
lwt.unix)
(synopsis "Lwt-specific helpers for opentelemetry-client"))

11
src/client/lwt/io_lwt.ml Normal file
View file

@ -0,0 +1,11 @@
type 'a t = 'a Lwt.t
let return = Lwt.return
let ( let* ) = Lwt.Syntax.( let* )
let sleep_s = Lwt_unix.sleep
let spawn = Lwt.async
let[@inline] protect ~finally f = Lwt.finalize f finally

View file

@ -0,0 +1 @@
include Generic_io.S_WITH_CONCURRENCY with type 'a t = 'a Lwt.t

View file

@ -0,0 +1,40 @@
(** Notification that can be used on the consumer side of a bounded queue *)
module IO = Io_lwt
type t = {
notified: bool Atomic.t;
cond: unit Lwt_condition.t;
notification: int;
lwt_tid: int; (** thread ID where lwt runs *)
deleted: bool Atomic.t;
}
let create () : t =
let notified = Atomic.make false in
let cond = Lwt_condition.create () in
let notification =
Lwt_unix.make_notification (fun () ->
Atomic.set notified false;
Lwt_condition.broadcast cond ())
in
let lwt_tid = Thread.id @@ Thread.self () in
{ notified; notification; cond; lwt_tid; deleted = Atomic.make false }
let delete self : unit =
if not (Atomic.exchange self.deleted true) then
Lwt_unix.stop_notification self.notification
let trigger (self : t) : unit =
let tid = Thread.id @@ Thread.self () in
if tid = self.lwt_tid then
(* in lwt thread, directly use the condition *)
Lwt_condition.broadcast self.cond ()
else if not (Atomic.exchange self.notified true) then
Lwt_unix.send_notification self.notification
let wait (self : t) : unit Lwt.t = Lwt_condition.wait self.cond
let register_bounded_queue (self : t) (q : _ Bounded_queue.t) : unit =
Bounded_queue.on_non_empty q (fun () -> trigger self)

View file

@ -0,0 +1 @@
include Generic_notifier.S with module IO = Io_lwt

View file

@ -0,0 +1,18 @@
open Common_
open Lwt.Syntax
(** Lwt task that calls [Exporter.tick] regularly, to help enforce timeouts.
@param frequency_s how often in seconds does the tick tock? *)
let start_ticker_thread ?(finally = ignore) ~(stop : bool Atomic.t)
~(frequency_s : float) (exp : OTEL.Exporter.t) : unit =
let frequency_s = max frequency_s 0.5 in
let rec tick_loop () =
if Atomic.get stop then (
finally ();
Lwt.return ()
) else
let* () = Lwt_unix.sleep frequency_s in
OTEL.Exporter.tick exp;
tick_loop ()
in
Lwt.async tick_loop

View file

@ -0,0 +1,21 @@
module IO = Generic_io.Direct_style
type t = {
mutex: Mutex.t;
cond: Condition.t;
}
let create () : t = { mutex = Mutex.create (); cond = Condition.create () }
let trigger self = Condition.signal self.cond
let delete = ignore
let wait self =
Mutex.lock self.mutex;
Condition.wait self.cond self.mutex;
Mutex.unlock self.mutex
(** Ensure we get signalled when the queue goes from empty to non-empty *)
let register_bounded_queue (self : t) (bq : _ Bounded_queue.t) : unit =
Bounded_queue.on_non_empty bq (fun () -> trigger self)

View file

@ -0,0 +1 @@
include Generic_notifier.S with type 'a IO.t = 'a

59
src/client/rpool.ml Normal file
View file

@ -0,0 +1,59 @@
module A = Atomic
type 'a list_ =
| Nil
| Cons of int * 'a * 'a list_
type 'a t = {
mk_item: unit -> 'a;
clear: 'a -> unit;
max_size: int; (** Max number of items *)
items: 'a list_ A.t;
}
let create ?(clear = ignore) ~mk_item ?(max_size = 512) () : _ t =
{ mk_item; clear; max_size; items = A.make Nil }
let rec acquire self =
match A.get self.items with
| Nil -> self.mk_item ()
| Cons (_, x, tl) as l ->
if A.compare_and_set self.items l tl then
x
else
acquire self
let[@inline] size_ = function
| Cons (sz, _, _) -> sz
| Nil -> 0
let release self x : unit =
let rec loop () =
match A.get self.items with
| Cons (sz, _, _) when sz >= self.max_size ->
(* forget the item *)
()
| l ->
if not (A.compare_and_set self.items l (Cons (size_ l + 1, x, l))) then
loop ()
in
self.clear x;
loop ()
let with_resource (self : _ t) f =
let x = acquire self in
try
let res = f x in
release self x;
res
with e ->
let bt = Printexc.get_raw_backtrace () in
release self x;
Printexc.raise_with_backtrace e bt
module Raw = struct
let release = release
let acquire = acquire
end

27
src/client/rpool.mli Normal file
View file

@ -0,0 +1,27 @@
(** Simple resource pool.
This is intended for buffers, protobuf encoders, etc. *)
type 'a t
(** Pool of values of type ['a] *)
val create :
?clear:('a -> unit) -> mk_item:(unit -> 'a) -> ?max_size:int -> unit -> 'a t
(** Create a new pool.
@param mk_item produce a new item in case the pool is empty
@param max_size
maximum number of item in the pool before we start dropping resources on
the floor. This controls resource consumption.
@param clear a function called on items before recycling them. *)
val with_resource : 'a t -> ('a -> 'b) -> 'b
(** [with_resource pool f] runs [f x] with [x] a resource; when [f] fails or
returns, [x] is returned to the pool for future reuse. *)
(** Low level control over the pool. This is easier to get wrong (e.g. releasing
the same resource twice) so use with caution. *)
module Raw : sig
val acquire : 'a t -> 'a
val release : 'a t -> 'a -> unit
end

49
src/client/sampler.ml Normal file
View file

@ -0,0 +1,49 @@
type t = {
proba_accept: float;
n_seen: int Atomic.t;
n_accepted: int Atomic.t;
}
let create ~proba_accept () : t =
(* FIXME: either czzry a random state and protect it, or make sure
we Random.self_init() in the current domain?? *)
if proba_accept < 0. || proba_accept > 1. then
invalid_arg "sampler: proba_accept must be in [0., 1.]";
{ proba_accept; n_seen = Atomic.make 0; n_accepted = Atomic.make 0 }
let[@inline] proba_accept self = self.proba_accept
let actual_rate (self : t) : float =
let accept = Atomic.get self.n_accepted in
let total = Atomic.get self.n_seen in
if total = 0 then
1.
else
float accept /. float total
let accept (self : t) : bool =
Atomic.incr self.n_seen;
let n = Random.float 1. in
let res = n < self.proba_accept in
if res then Atomic.incr self.n_accepted;
res
open Opentelemetry_emitter
let wrap_emitter (self : t) (e : _ Emitter.t) : _ Emitter.t =
let enabled () = e.enabled () in
let closed () = Emitter.closed e in
let flush_and_close () = Emitter.flush_and_close e in
let tick ~now = Emitter.tick e ~now in
let emit l =
if l <> [] && e.enabled () then (
let accepted = List.filter (fun _x -> accept self) l in
if accepted <> [] then Emitter.emit e accepted
)
in
{ Emitter.closed; enabled; flush_and_close; tick; emit }

25
src/client/sampler.mli Normal file
View file

@ -0,0 +1,25 @@
(** Basic random sampling *)
type t
val create : proba_accept:float -> unit -> t
(** [create ~proba_accept:n ()] makes a new sampler.
The sampler will accept signals with probability [n] (must be between 0 and
1).
@raise Invalid_argument if [n] is not between 0 and 1. *)
val accept : t -> bool
(** Do we accept a sample? This returns [true] with probability [proba_accept].
*)
val proba_accept : t -> float
val actual_rate : t -> float
(** The ratio of signals we actually accepted so far *)
open Opentelemetry_emitter
val wrap_emitter : t -> 'a Emitter.t -> 'a Emitter.t
(** [wrap_emitter sampler e] is a new emitter that uses the [sampler] on each
individual signal before passing them to [e]. *)

View file

@ -1,22 +1,28 @@
module OT = Opentelemetry
open Common_
let enabled = Atomic.make false
let add_event (scope : OT.Scope.t) ev = OT.Scope.add_event scope (fun () -> ev)
let tracer = Atomic.make OTEL.Tracer.dynamic_forward_to_main_exporter
let dummy_trace_id_ = OT.Trace_id.dummy
let[@inline] add_event (scope : OTEL.Span.t) ev = OTEL.Span.add_event scope ev
let dummy_span_id = OT.Span_id.dummy
let set_tracer tr = Atomic.set tracer tr
let dummy_trace_id_ = OTEL.Trace_id.dummy
let dummy_span_id = OTEL.Span_id.dummy
let with_ ?kind ?attrs name f =
if Atomic.get enabled then
OT.Trace.with_ ?kind ?attrs name f
else (
if Atomic.get enabled then (
let tracer = Atomic.get tracer in
OTEL.Tracer.with_ tracer ?kind ?attrs name f
) else (
(* A new scope is needed here because it might be modified *)
let scope =
OT.Scope.make ~trace_id:dummy_trace_id_ ~span_id:dummy_span_id ()
let span : OTEL.Span.t =
OTEL.Span.make ~trace_id:dummy_trace_id_ ~id:dummy_span_id ~start_time:0L
~end_time:0L name
in
f scope
f span
)
let set_enabled b = Atomic.set enabled b

View file

@ -1,12 +1,21 @@
(** Mini tracing module (disabled if [config.self_trace=false]) *)
val add_event : Opentelemetry.Scope.t -> Opentelemetry.Event.t -> unit
open Common_
val add_event : OTEL.Span.t -> OTEL.Event.t -> unit
val with_ :
?kind:Opentelemetry.Span_kind.t ->
?attrs:(string * Opentelemetry.value) list ->
?kind:OTEL.Span_kind.t ->
?attrs:(string * OTEL.value) list ->
string ->
(Opentelemetry.Scope.t -> 'a) ->
(OTEL.Span.t -> 'a) ->
'a
(** A simple way to create spans to instrument parts of the OTEL SDK itself. *)
val set_tracer : OTEL.Tracer.t -> unit
(** Set the tracer to use for self-tracing. We need to make sure it will not
lead to infinite loops (if the tracer itself is self-tracing, it might
invoke itself recursively, and so on). *)
val set_enabled : bool -> unit
(** Enable self tracing. A tracer must also be set. *)

View file

@ -37,7 +37,7 @@ let is_logs = function
| _ -> false
module Encode = struct
let resource_to_string ~encoder ~ctor ~enc resource =
let resource_to_string ~encoder ~ctor ~enc resource : string =
let encoder =
match encoder with
| Some e ->
@ -48,29 +48,28 @@ module Encode = struct
let x = ctor resource in
let@ _sc = Self_trace.with_ ~kind:Span.Span_kind_internal "encode-proto" in
enc x encoder;
Pbrt.Encoder.to_string encoder
let data = Pbrt.Encoder.to_string encoder in
Pbrt.Encoder.reset encoder;
data
let logs ?encoder resource_logs =
resource_logs
|> resource_to_string ~encoder
~ctor:(fun r ->
Logs_service.make_export_logs_service_request ~resource_logs:r ())
~enc:Logs_service.encode_pb_export_logs_service_request
resource_to_string ~encoder resource_logs
~ctor:(fun r ->
Logs_service.make_export_logs_service_request ~resource_logs:r ())
~enc:Logs_service.encode_pb_export_logs_service_request
let metrics ?encoder resource_metrics =
resource_metrics
|> resource_to_string ~encoder
~ctor:(fun r ->
Metrics_service.make_export_metrics_service_request
~resource_metrics:r ())
~enc:Metrics_service.encode_pb_export_metrics_service_request
resource_to_string ~encoder resource_metrics
~ctor:(fun r ->
Metrics_service.make_export_metrics_service_request ~resource_metrics:r
())
~enc:Metrics_service.encode_pb_export_metrics_service_request
let traces ?encoder resource_spans =
resource_spans
|> resource_to_string ~encoder
~ctor:(fun r ->
Trace_service.make_export_trace_service_request ~resource_spans:r ())
~enc:Trace_service.encode_pb_export_trace_service_request
resource_to_string ~encoder resource_spans
~ctor:(fun r ->
Trace_service.make_export_trace_service_request ~resource_spans:r ())
~enc:Trace_service.encode_pb_export_trace_service_request
end
module Decode = struct

View file

@ -1,4 +1,4 @@
open Opentelemetry.Util_mutex
module UM = Opentelemetry_util.Util_mutex
type 'a t = {
mutex: Mutex.t;
@ -18,14 +18,14 @@ let create () : _ t =
}
let close (self : _ t) =
protect self.mutex @@ fun () ->
UM.protect self.mutex @@ fun () ->
if not self.closed then (
self.closed <- true;
Condition.broadcast self.cond (* awake waiters so they fail *)
)
let push (self : _ t) x : unit =
protect self.mutex @@ fun () ->
UM.protect self.mutex @@ fun () ->
if self.closed then
raise Closed
else (
@ -45,7 +45,7 @@ let pop (self : 'a t) : 'a =
x
)
in
protect self.mutex loop
UM.protect self.mutex loop
let pop_all (self : 'a t) into : unit =
let rec loop () =
@ -56,4 +56,4 @@ let pop_all (self : 'a t) into : unit =
) else
Queue.transfer self.q into
in
protect self.mutex loop
UM.protect self.mutex loop

View file

@ -1,4 +1,4 @@
(** Basic Blocking Queue *)
(** Simple blocking queue *)
type 'a t
@ -15,8 +15,9 @@ val pop : 'a t -> 'a
@raise Closed if the queue was closed before a new element was available. *)
val pop_all : 'a t -> 'a Queue.t -> unit
(** [pop_all q into] pops all the elements of [q] and moves them into [into]. It
might block until an element comes.
(** [pop_all q into] pops all the elements of [q] and moves them into [into]. if
no element is available, it will block until it successfully transfers at
least one item to [into].
@raise Closed if the queue was closed before a new element was available. *)
val close : _ t -> unit

View file

@ -0,0 +1,13 @@
type t = {
mutable delay_s: float;
min_delay_s: float;
max_delay_s: float;
}
let create () = { delay_s = 0.001; min_delay_s = 0.001; max_delay_s = 20. }
let on_success self = self.delay_s <- max self.min_delay_s (self.delay_s /. 10.)
let on_error self = self.delay_s <- min self.max_delay_s (self.delay_s *. 2.)
let[@inline] cur_duration_s self = self.delay_s

View file

@ -0,0 +1,12 @@
(** Backoff behavior in case of errors *)
type t
(** Backoff state. Not thread safe *)
val create : unit -> t
val on_success : t -> unit
val on_error : t -> unit
val cur_duration_s : t -> float

View file

@ -0,0 +1,34 @@
(** Group signals into [resource_xxx] objects *)
open Common_
let make_resource_logs ?service_name ?attrs (logs : Proto.Logs.log_record list)
: Proto.Logs.resource_logs =
let attributes = OTEL.Globals.mk_attributes ?service_name ?attrs () in
let resource = Proto.Resource.make_resource ~attributes () in
let ll =
Proto.Logs.make_scope_logs ~scope:OTEL.Globals.instrumentation_library
~log_records:logs ()
in
Proto.Logs.make_resource_logs ~resource ~scope_logs:[ ll ] ()
let make_resource_spans ?service_name ?attrs spans : Proto.Trace.resource_spans
=
let ils =
Proto.Trace.make_scope_spans ~scope:OTEL.Globals.instrumentation_library
~spans ()
in
let attributes = OTEL.Globals.mk_attributes ?service_name ?attrs () in
let resource = Proto.Resource.make_resource ~attributes () in
Proto.Trace.make_resource_spans ~resource ~scope_spans:[ ils ] ()
(** Aggregate metrics into a {!Proto.Metrics.resource_metrics} *)
let make_resource_metrics ?service_name ?attrs (l : OTEL.Metrics.t list) :
Proto.Metrics.resource_metrics =
let open Proto.Metrics in
let lm =
make_scope_metrics ~scope:OTEL.Globals.instrumentation_library ~metrics:l ()
in
let attributes = OTEL.Globals.mk_attributes ?service_name ?attrs () in
let resource = Proto.Resource.make_resource ~attributes () in
Proto.Metrics.make_resource_metrics ~scope_metrics:[ lm ] ~resource ()

44
src/client/util_thread.ml Normal file
View file

@ -0,0 +1,44 @@
open Common_
(** start a thread in the background, running [f()], blocking signals *)
let start_bg_thread (f : unit -> unit) : Thread.t =
let unix_run () =
let signals =
[
Sys.sigusr1;
Sys.sigusr2;
Sys.sigterm;
Sys.sigpipe;
Sys.sigalrm;
Sys.sigstop;
]
in
ignore (Thread.sigmask Unix.SIG_BLOCK signals : _ list);
f ()
in
(* no signals on Windows *)
let run () =
if Sys.win32 then
f ()
else
unix_run ()
in
Thread.create run ()
(** thread that calls [tick()] regularly, to help enforce timeouts *)
let setup_ticker_thread ~stop ~sleep_ms (exp : OTEL.Exporter.t) () =
let sleep_s = float sleep_ms /. 1000. in
let tick_loop () =
try
while not @@ Atomic.get stop do
Thread.delay sleep_s;
OTEL.Exporter.tick exp
done
with
| Sync_queue.Closed -> ()
| exn ->
(* print and ignore *)
Printf.eprintf "otel-ocurl: ticker thread: uncaught exn:\n%s\n%!"
(Printexc.to_string exn)
in
start_bg_thread tick_loop

13
src/core/any_signal.ml Normal file
View file

@ -0,0 +1,13 @@
(** Any kind of signal *)
open Common_
type t =
| Span of Span.t
| Metric of Metrics.t
| Log of Log_record.t
let pp out = function
| Span s -> Proto.Trace.pp_span out s
| Metric m -> Proto.Metrics.pp_metric out m
| Log l -> Proto.Logs.pp_log_record out l

4
src/core/common_.ml Normal file
View file

@ -0,0 +1,4 @@
let spf = Printf.sprintf
module Proto = Opentelemetry_proto
module Atomic = Opentelemetry_atomic.Atomic

Some files were not shown because too many files have changed in this diff Show more