Compare commits

..

No commits in common. "main" and "v0.5" have entirely different histories.
main ... v0.5

359 changed files with 2937 additions and 11730 deletions

View file

@ -13,23 +13,21 @@ jobs:
- uses: actions/checkout@main
- name: Use OCaml
uses: ocaml/setup-ocaml@v3
uses: ocaml/setup-ocaml@v2
with:
ocaml-compiler: '5.3'
ocaml-compiler: '5.0'
dune-cache: true
allow-prerelease-opam: true
# temporary until it's in a release
- run: opam pin picos 0.6.0 -y -n
- name: Deps
run: opam install odig moonpool
- run: opam install odig moonpool moonpool-lwt -t
- run: opam exec -- odig odoc --cache-dir=_doc/ moonpool moonpool-lwt
- name: Build
run: opam exec -- odig odoc --cache-dir=_doc/ moonpool
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./_doc/html
destination_dir: .
enable_jekyll: false
destination_dir: dev
enable_jekyll: true

View file

@ -8,82 +8,34 @@ on:
jobs:
run:
name: build # build+test on various versions of OCaml, on linux
timeout-minutes: 15
name: build
timeout-minutes: 10
strategy:
fail-fast: true
matrix:
os:
- ubuntu-latest
ocaml-compiler:
- '5.0'
- '5.3'
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@main
- name: Use OCaml ${{ matrix.ocaml-compiler }}
uses: ocaml/setup-ocaml@v3
with:
ocaml-compiler: ${{ matrix.ocaml-compiler }}
dune-cache: true
allow-prerelease-opam: true
- run: opam pin picos 0.6.0 -y -n
- run: opam install -t moonpool moonpool-lwt --deps-only
- run: opam exec -- dune build @install
# install some depopts
- run: opam install thread-local-storage trace hmap
- run: opam exec -- dune build --profile=release --force @install @runtest
compat:
name: build-compat # compat with other OSes
timeout-minutes: 15
strategy:
fail-fast: true
matrix:
os:
- macos-latest
#- macos-latest
#- windows-latest
ocaml-compiler:
- '5.2'
- '4.08'
- '4.14'
- '5.0'
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@main
- name: Use OCaml ${{ matrix.ocaml-compiler }}
uses: ocaml/setup-ocaml@v3
uses: ocaml/setup-ocaml@v2
with:
ocaml-compiler: ${{ matrix.ocaml-compiler }}
dune-cache: true
allow-prerelease-opam: true
# temporary until it's in a release
- run: opam pin https://github.com/ocaml-multicore/picos.git -y -n
- run: opam install -t moonpool moonpool-lwt --deps-only
- run: opam install -t moonpool --deps-only
- run: opam exec -- dune build @install
# install some depopts
- run: opam install thread-local-storage trace domain-local-await
- run: opam exec -- dune build --profile=release --force @install @runtest
format:
name: format
strategy:
matrix:
ocaml-compiler:
- '5.3'
runs-on: 'ubuntu-latest'
steps:
- uses: actions/checkout@main
- name: Use OCaml ${{ matrix.ocaml-compiler }}
uses: ocaml/setup-ocaml@v3
with:
ocaml-compiler: ${{ matrix.ocaml-compiler }}
dune-cache: true
allow-prerelease-opam: true
- run: opam install ocamlformat.0.27.0
- run: opam exec -- make format-check
- run: opam exec -- dune runtest
- run: opam install domain-local-await
if: matrix.ocaml-compiler == '5.0'
- run: opam exec -- dune build @install @runtest
if: matrix.ocaml-compiler == '5.0'

1
.gitignore vendored
View file

@ -1,3 +1,2 @@
_build
_opam
*.tmp

View file

@ -1,4 +1,4 @@
version = 0.27.0
version = 0.24.1
profile=conventional
margin=80
if-then-else=k-r

View file

@ -1,89 +1,10 @@
# 0.10
- breaking: remove `around_task` from schedulers
- breaking: remove `moonpool.fib` entirely. Please use `picos_std.structured`
if you really need structured concurrency.
- remove deprecated moonpool-io and moonpool.sync
- feat core: add `Main`, salvaged from moonpool.fib
- block signals in background threads
- refactor `chan`; fix bug in `Chan.try_push`
- fix: make `Moonpool_lwt.fut_of_lwt` idempotent
# 0.9
- breaking: require OCaml 5
* no further need for a preprocessor
* forkjoin not longer optional
- moonpool-lwt: large changes, including a Runner that runs
inside `Lwt_unix`'s event loop and can thus use any `_ Lwt.t` function
- remove bounded_queue
- fix core: better repropagating of errors
- add `Fut.{cancel,try_cancel}`
- perf: `await` on immediately ready timer queues its task
- feat: add `Moonpool.yield`
- deprecate moonpool.sync
- deprecate moonpool_io
# 0.8
- api(fut): make alias `'a Fut.t = 'a Picos.Computation.t` public
- feat: add `Fut.make_promise`, have `'a promise = private 'a fut`
- feat(exn_bt): in show/pp, do print the backtrace when present
- feat: block signals in workers if asked to
- relax bound on picos to 0.5-0.6
- feat fib: `spawn_ignore` now has `?on` optional param
- change Moonpool.Chan so it's bounded (stil experimental)
- fix task local storage: type was too specific
- fix fiber: use a single fut/computation in fibers
# 0.7
- add `Moonpool_fiber.spawn_top_ignore`
- add `moonpool-io`, based on `picos_io` (still very experimental)
- move to picos as the foundation layer for concurrency primitives (#30)
- move to `thread-local-storage` 0.2 with get/set API
# 0.6
- breaking: remove `Immediate_runner` (bug prone and didn't
handle effects). `Moonpool_fib.main` can be used to handle
effects in the main function.
- remove deprecated alias `Moonpool.Pool`
- feat: add structured concurrency sub-library `moonpool.fib` with
fibers. Fibers can use `await` and spawn other fibers that will
be appropriately cancelled when their parent is.
- feat: add add `moonpool-lwt` as an experimental bridge between moonpool and lwt.
This allows moonpool runners to be used from within Lwt to
perform background computations, and conversely to call Lwt from
moonpool with some precautions.
- feat: task-local storage in the main moonpool runners, available from
fibers and regular tasks.
- feat: add `Exn_bt` to core
- feat: add `Runner.dummy`
- make `moonpool.forkjoin` optional (only on OCaml >= 5.0)
- feat: add `Fut.Advanced.barrier_on_abstract_container_of_futures`
- feat: add `Fut.map_list`
- refactor: split off domain pool to `moonpool.dpool`
- fix too early exit in Ws_pool
# 0.5.1
- fix `Ws_pool`: workers would exit before processing
all remaining tasks upon shutdown
# 0.5
## features
- add `Bb_queue.transfer`
- add `Bb_queue.to_{iter,gen,seq}`
-add `Bb_queue.to_{iter,gen,seq}`
- add `Fifo_pool`, a simple pool with a single blocking queue for
workloads with coarse granularity tasks that value
latency (e.g. a web server)
@ -112,9 +33,7 @@
## breaking
- deprecate `Pool`, now an alias to `Fifo_pool`
- the `Fut.Infix_local` and `Fut.infix` are gone, replaced with
a simpler `Fut.Infix` module that tries to use the current runner
for intermediate tasks.
# 0.4

View file

@ -9,21 +9,12 @@ clean:
test:
@dune runtest $(DUNE_OPTS)
test-autopromote:
@dune runtest $(DUNE_OPTS) --auto-promote
doc:
@dune build $(DUNE_OPTS) @doc
build-dev:
dune build @install @runtest $(DUNE_OPTS) --workspace=dune-workspace.dev
format:
@dune build $(DUNE_OPTS) @fmt --auto-promote
format-check:
@dune build $(DUNE_OPTS) @fmt --display=quiet
WATCH?= @check @runtest
watch:
dune build $(DUNE_OPTS) -w $(WATCH)
@ -67,14 +58,6 @@ bench-pi:
'./_build/default/benchs/pi.exe -n $(PI_NSTEPS) -j 16 -mode forkjoin -kind=pool' \
'./_build/default/benchs/pi.exe -n $(PI_NSTEPS) -j 20 -mode forkjoin -kind=pool'
bench-repro-41:
dune build $(DUNE_OPTS_BENCH) examples/repro_41/run.exe
hyperfine --warmup=1 \
"./_build/default/examples/repro_41/run.exe 4 domainslib" \
"./_build/default/examples/repro_41/run.exe 4 moonpool" \
"./_build/default/examples/repro_41/run.exe 5 moonpool" \
"./_build/default/examples/repro_41/run.exe 5 seq"
.PHONY: test clean bench-fib bench-pi
VERSION=$(shell awk '/^version:/ {print $$2}' moonpool.opam)

View file

@ -16,11 +16,10 @@ In addition, some concurrency and parallelism primitives are provided:
- `Moonpool.Chan` provides simple cooperative and thread-safe channels
to use within pool-bound tasks. They're essentially re-usable futures.
Moonpool now requires OCaml 5 (meaning there's actual domains and effects, not just threads),
so the `Fut.await` primitive is always provided. It's simpler and more powerful
On OCaml 5 (meaning there's actual domains and effects, not just threads),
a `Fut.await` primitive is provided. It's simpler and more powerful
than the monadic combinators.
- `Moonpool_forkjoin`, in the library `moonpool.forkjoin`
provides the fork-join parallelism primitives
- `Moonpool.Fork_join` provides the fork-join parallelism primitives
to use within tasks running in the pool.
## Usage
@ -165,32 +164,16 @@ val expected_sum : int = 5050
- : unit = ()
```
### Errors
We have a `Exn_bt.t` type that comes in handy in many places. It bundles together
an exception and the backtrace associated with the place the exception was caught.
### Local storage
Moonpool, via picos, provides _task local storage_ (like thread-local storage, but per task).
The storage is convenient for carrying around context for cross-cutting concerns such
as logging or tracing (e.g. a log tag for the current user or request ID, or a tracing
scope).
### Fork-join
The sub-library `moonpool.forkjoin`
provides a module `Moonpool_forkjoin`
On OCaml 5, again using effect handlers, the module `Fork_join`
implements the [fork-join model](https://en.wikipedia.org/wiki/Fork%E2%80%93join_model).
It must run on a pool (using `Runner.run_async` or inside a future via `Fut.spawn`).
It must run on a pool (using [Runner.run_async] or inside a future via [Fut.spawn]).
It is generally better to use the work-stealing pool for workloads that rely on
fork-join for better performance, because fork-join will tend to spawn lots of
shorter tasks.
Here is an simple example of a parallel sort.
It uses selection sort for small slices, like this:
```ocaml
# let rec select_sort arr i len =
if len >= 2 then (
@ -204,11 +187,7 @@ It uses selection sort for small slices, like this:
select_sort arr (i+1) (len-1)
);;
val select_sort : 'a array -> int -> int -> unit = <fun>
```
And a parallel quicksort for larger slices:
```ocaml
# let rec quicksort arr i len : unit =
if len <= 10 then select_sort arr i len
else (
@ -216,7 +195,6 @@ And a parallel quicksort for larger slices:
let low = ref (i - 1) in
let high = ref (i + len) in
(* partition the array slice *)
while !low < !high do
incr low;
decr high;
@ -233,8 +211,7 @@ And a parallel quicksort for larger slices:
)
done;
(* sort lower half and upper half in parallel *)
Moonpool_forkjoin.both_ignore
Moonpool.Fork_join.both_ignore
(fun () -> quicksort arr i (!low - i))
(fun () -> quicksort arr !low (len - (!low - i)))
);;
@ -260,8 +237,8 @@ val arr : int array =
142; 255; 72; 85; 95; 93; 73; 202|]
# Moonpool.Fut.spawn ~on:pool
(fun () -> quicksort arr 0 (Array.length arr))
|> Moonpool.Fut.wait_block_exn
;;
|> Moonpool.Fut.wait_block_exn
;;
- : unit = ()
# arr;;
- : int array =
@ -270,12 +247,6 @@ val arr : int array =
204; 220; 231; 243; 247; 255|]
```
Note that the sort had to be started in a task (via `Moonpool.Fut.spawn`)
so that fork-join would run on the thread pool.
This is necessary even for the initial iteration because fork-join
relies on OCaml 5's effects, meaning that the computation needs to run
inside an effect handler provided by the thread pool.
### More intuition
To quote [gasche](https://discuss.ocaml.org/t/ann-moonpool-0-1/12387/15):
@ -293,18 +264,18 @@ You are assuming that, if pool P1 has 5000 tasks, and pool P2 has 10 other tasks
## OCaml versions
This works for OCaml >= 5.00.
This works for OCaml >= 4.08.
- On OCaml 4.xx, there are no domains, so this is just a library for regular thread pools
with not actual parallelism (except for threads that call C code that releases the runtime lock, that is).
- on OCaml 5.xx, there is a fixed pool of domains (using the recommended domain count).
These domains do not do much by themselves, but we schedule new threads on them, and group
threads from each domain into pools.
Each domain might thus have multiple threads that belong to distinct pools (and several threads from
the same pool, too — this is useful for threads blocking on IO).
Internally, there is a fixed pool of domains (using the recommended domain count).
These domains do not do much by themselves, but we schedule new threads on them, and form pools
of threads that contain threads from each domain.
Each domain might thus have multiple threads that belong to distinct pools (and several threads from
the same pool, too — this is useful for threads blocking on IO); Each pool will have threads
running on distinct domains, which enables parallelism.
A useful analogy is that each domain is a bit like a CPU core, and `Thread.t` is a logical thread running on a core.
Multiple threads have to share a single core and do not run in parallel on it[^2].
We can therefore build pools that spread their worker threads on multiple cores to enable parallelism within each pool.
A useful analogy is that each domain is a bit like a CPU core, and `Thread.t` is a logical thread running on a core.
Multiple threads have to share a single core and do not run in parallel on it[^2].
We can therefore build pools that spread their worker threads on multiple cores to enable parallelism within each pool.
TODO: actually use https://github.com/haesbaert/ocaml-processor to pin domains to cores,
possibly optionally using `select` in dune.
@ -319,5 +290,4 @@ MIT license.
$ opam install moonpool
```
[^2]: ignoring hyperthreading for the sake of the analogy.
[^2]: let's not talk about hyperthreading.

View file

@ -1,3 +0,0 @@
#!/bin/sh
OPTS="--profile=release --display=quiet"
exec dune exec $OPTS -- benchs/primes.exe $@

View file

@ -1,3 +1,6 @@
(executables
(names fib_rec pi primes)
(libraries moonpool moonpool.forkjoin unix trace trace-tef domainslib))
(names fib_rec pi)
(preprocess (action
(run %{project_root}/src/cpp/cpp.exe %{input-file})))
(libraries moonpool unix trace trace-tef domainslib))

View file

@ -1,8 +1,4 @@
open Moonpool
module Trace = Trace_core
module FJ = Moonpool_forkjoin
let ( let@ ) = ( @@ )
let rec fib_direct x =
if x <= 1 then
@ -26,7 +22,7 @@ let fib_fj ~on x : int Fut.t =
fib_direct x
else (
let n1, n2 =
FJ.both (fun () -> fib_rec (x - 1)) (fun () -> fib_rec (x - 2))
Fork_join.both (fun () -> fib_rec (x - 1)) (fun () -> fib_rec (x - 2))
in
n1 + n2
)
@ -70,7 +66,6 @@ let str_of_int_opt = function
| Some i -> Printf.sprintf "Some %d" i
let run ~psize ~n ~seq ~dl ~fj ~await ~niter ~kind () : unit =
let@ _sp = Trace.with_span ~__FILE__ ~__LINE__ "fib.run" in
let pool = lazy (create_pool ~kind ~psize ()) in
let dl_pool =
lazy
@ -113,7 +108,6 @@ let run ~psize ~n ~seq ~dl ~fj ~await ~niter ~kind () : unit =
Ws_pool.shutdown (Lazy.force pool)
let () =
let@ () = Trace_tef.with_setup () in
let n = ref 40 in
let psize = ref None in
let seq = ref false in

View file

@ -1,14 +1,12 @@
(* compute Pi *)
open Moonpool
module FJ = Moonpool_forkjoin
let ( let@ ) = ( @@ )
let j = ref 0
let spf = Printf.sprintf
let run_sequential (num_steps : int) : float =
let@ _sp = Trace.with_span ~__FILE__ ~__LINE__ "pi.seq" in
let step = 1. /. float num_steps in
let sum = ref 0. in
for i = 0 to num_steps - 1 do
@ -44,11 +42,6 @@ let run_par1 ~kind (num_steps : int) : float =
(* one chunk of the work *)
let run_task _idx_task : unit =
let@ _sp =
Trace.with_span ~__FILE__ ~__LINE__ "pi.slice" ~data:(fun () ->
[ "i", `Int _idx_task ])
in
let sum = ref 0. in
let i = ref 0 in
while !i < num_steps do
@ -66,6 +59,8 @@ let run_par1 ~kind (num_steps : int) : float =
let pi = step *. Lock.get global_sum in
pi
[@@@ifge 5.0]
let run_fork_join ~kind num_steps : float =
let@ pool = with_pool ~kind () in
@ -75,7 +70,7 @@ let run_fork_join ~kind num_steps : float =
let global_sum = Lock.create 0. in
Ws_pool.run_wait_block pool (fun () ->
FJ.for_
Fork_join.for_
~chunk_size:(3 + (num_steps / num_tasks))
num_steps
(fun low high ->
@ -90,6 +85,13 @@ let run_fork_join ~kind num_steps : float =
let pi = step *. Lock.get global_sum in
pi
[@@@else_]
let run_fork_join _ =
failwith "fork join not available on this version of OCaml"
[@@@endif]
type mode =
| Sequential
| Par1
@ -97,9 +99,6 @@ type mode =
let () =
let@ () = Trace_tef.with_setup () in
Trace.set_thread_name "main";
let@ _sp = Trace.with_span ~__FILE__ ~__LINE__ "main" in
let mode = ref Sequential in
let n = ref 1000 in
let time = ref false in
@ -140,7 +139,7 @@ let () =
Printf.printf "pi=%.6f (pi=%.6f, diff=%.3f)%s\n%!" res Float.pi
(abs_float (Float.pi -. res))
(if !time then
spf " in %.4fs" elapsed
else
"");
spf " in %.4fs" elapsed
else
"");
()

View file

@ -1,60 +0,0 @@
let ( let@ ) = ( @@ )
let spf = Printf.sprintf
let generate' chan =
for i = 2 to Int.max_int do
Moonpool.Chan.push chan i
done
let filter' in_chan out_chan prime =
let rec loop () =
let n = Moonpool.Chan.pop in_chan in
if n mod prime <> 0 then Moonpool.Chan.push out_chan n;
loop ()
in
loop ()
let main ~chan_size ~n ~on_prime () : unit =
let@ runner = Moonpool.Ws_pool.with_ () in
let@ () = Moonpool.Ws_pool.run_wait_block runner in
let primes = ref @@ Moonpool.Chan.create ~max_size:chan_size () in
Moonpool.run_async runner
(let chan = !primes in
fun () -> generate' chan);
for _i = 1 to n do
let prime = Moonpool.Chan.pop !primes in
on_prime prime;
let filtered_chan = Moonpool.Chan.create ~max_size:chan_size () in
Moonpool.run_async runner
(let in_chan = !primes in
fun () -> filter' in_chan filtered_chan prime);
primes := filtered_chan
done
let () =
let n = ref 10_000 in
let chan_size = ref 0 in
let time = ref true in
let opts =
[
"-n", Arg.Set_int n, " number of iterations";
"--no-time", Arg.Clear time, " do not compute time";
"--chan-size", Arg.Set_int chan_size, " channel size";
]
|> Arg.align
in
Arg.parse opts ignore "";
Printf.printf "computing %d primes\n%!" !n;
let t_start = Unix.gettimeofday () in
let n_primes = Atomic.make 0 in
main ~n:!n ~chan_size:!chan_size ~on_prime:(fun _ -> Atomic.incr n_primes) ();
let elapsed : float = Unix.gettimeofday () -. t_start in
Printf.printf "computed %d primes%s\n%!" (Atomic.get n_primes)
(if !time then
spf " in %.4fs" elapsed
else
"")

13
dune
View file

@ -1,9 +1,6 @@
(env
(_
(flags :standard -strict-sequence -warn-error -a+8 -w +a-4-40-42-70)))
(mdx
(libraries moonpool moonpool.forkjoin threads)
(package moonpool)
(enabled_if
(>= %{ocaml_version} 5.0)))
(env
(_ (flags :standard -strict-sequence -warn-error -a+8 -w +a-4-40-42-70)))
(mdx (libraries moonpool threads)
(enabled_if (>= %{ocaml_version} 5.0)))

View file

@ -1,20 +1,13 @@
(lang dune 3.0)
(using mdx 0.2)
(name moonpool)
(version 0.10)
(version 0.5)
(generate_opam_files true)
(source
(github c-cube/moonpool))
(authors "Simon Cruanes")
(maintainers "Simon Cruanes")
(license MIT)
;(documentation https://url/to/documentation)
@ -23,60 +16,21 @@
(name moonpool)
(synopsis "Pools of threads supported by a pool of domains")
(depends
(ocaml
(>= 5.0))
(ocaml (>= 4.08))
dune
(either
(>= 1.0))
(either (>= 1.0))
(trace :with-test)
(trace-tef :with-test)
(qcheck-core
(and
:with-test
(>= 0.19)))
(thread-local-storage
(and
(>= 0.2)
(< 0.3)))
(qcheck-core (and :with-test (>= 0.19)))
(odoc :with-doc)
(hmap :with-test)
(picos
(and
(>= 0.5)
(< 0.7)))
(picos_std
(and
(>= 0.5)
(< 0.7)))
(mdx
(and
(>= 1.9.0)
:with-test)))
(depopts
hmap
(trace
(>= 0.6)))
thread-local-storage
(domain-local-await (>= 0.2)))
(tags
(thread pool domain futures fork-join)))
(package
(name moonpool-lwt)
(synopsis "Event loop for moonpool based on Lwt-engine (experimental)")
(allow_empty) ; on < 5.0
(depends
(moonpool
(= :version))
(ocaml
(>= 5.0))
(qcheck-core
(and
:with-test
(>= 0.19)))
(hmap :with-test)
lwt
base-unix
(trace :with-test)
(trace-tef :with-test)
(odoc :with-doc)))
; See the complete stanza docs at https://dune.readthedocs.io/en/stable/dune-files.html#dune-project

View file

@ -1,27 +0,0 @@
(** NOTE: this was an example from
https://discuss.ocaml.org/t/confused-about-moonpool-cancellation/15381 but
there is no cancelation anymore :) *)
let ( let@ ) = ( @@ )
let () =
let@ () = Trace_tef.with_setup () in
let@ _ = Moonpool.main in
(* let@ runner = Moonpool.Ws_pool.with_ () in *)
let@ runner = Moonpool.Background_thread.with_ () in
(* Pretend this is some long-running read loop *)
for i = 1 to 10 do
Printf.printf "MAIN LOOP %d\n%!" i;
let _ : _ Moonpool.Fut.t =
Moonpool.Fut.spawn ~on:runner (fun () ->
Printf.printf "RUN FIBER %d\n%!" i;
Format.printf "FIBER %d NOT CANCELLED YET@." i;
failwith "BOOM")
in
Moonpool.Fut.yield ();
(* Thread.delay 0.2; *)
(* Thread.delay 0.0001; *)
()
done

View file

@ -1,11 +0,0 @@
(executables
(names discuss1)
(enabled_if
(>= %{ocaml_version} 5.0))
;(package moonpool)
(libraries
moonpool
trace
trace-tef
;tracy-client.trace
))

View file

@ -1,5 +0,0 @@
(executables
(names run)
(enabled_if
(>= %{ocaml_version} 5.0))
(libraries moonpool trace trace-tef domainslib))

View file

@ -1,54 +0,0 @@
(* fibo.ml *)
let cutoff = 25
let input = 40
let rec fibo_seq n =
if n <= 1 then
n
else
fibo_seq (n - 1) + fibo_seq (n - 2)
let rec fibo_domainslib ctx n =
if n <= cutoff then
fibo_seq n
else
let open Domainslib in
let fut1 = Task.async ctx (fun () -> fibo_domainslib ctx (n - 1)) in
let fut2 = Task.async ctx (fun () -> fibo_domainslib ctx (n - 2)) in
Task.await ctx fut1 + Task.await ctx fut2
let rec fibo_moonpool ctx n =
if n <= cutoff then
fibo_seq n
else
let open Moonpool in
let fut1 = Fut.spawn ~on:ctx (fun () -> fibo_moonpool ctx (n - 1)) in
let fut2 = Fut.spawn ~on:ctx (fun () -> fibo_moonpool ctx (n - 2)) in
Fut.await fut1 + Fut.await fut2
let usage =
"fibo.exe <num_domains> [ domainslib | moonpool | moonpool_fifo | seq ]"
let num_domains = try int_of_string Sys.argv.(1) with _ -> failwith usage
let implem = try Sys.argv.(2) with _ -> failwith usage
let () =
let output =
match implem with
| "moonpool" ->
let open Moonpool in
let ctx = Ws_pool.create ~num_threads:num_domains () in
Ws_pool.run_wait_block ctx (fun () -> fibo_moonpool ctx input)
| "moonpool_fifo" ->
let open Moonpool in
let ctx = Fifo_pool.create ~num_threads:num_domains () in
Ws_pool.run_wait_block ctx (fun () -> fibo_moonpool ctx input)
| "domainslib" ->
let open Domainslib in
let pool = Task.setup_pool ~num_domains () in
Task.run pool (fun () -> fibo_domainslib pool input)
| "seq" -> fibo_seq input
| _ -> failwith usage
in
print_int output;
print_newline ()

View file

@ -1,36 +0,0 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.10"
synopsis: "Event loop for moonpool based on Lwt-engine (experimental)"
maintainer: ["Simon Cruanes"]
authors: ["Simon Cruanes"]
license: "MIT"
homepage: "https://github.com/c-cube/moonpool"
bug-reports: "https://github.com/c-cube/moonpool/issues"
depends: [
"dune" {>= "3.0"}
"moonpool" {= version}
"ocaml" {>= "5.0"}
"qcheck-core" {with-test & >= "0.19"}
"hmap" {with-test}
"lwt"
"base-unix"
"trace" {with-test}
"trace-tef" {with-test}
"odoc" {with-doc}
]
build: [
["dune" "subst"] {dev}
[
"dune"
"build"
"-p"
name
"-j"
jobs
"@install"
"@runtest" {with-test}
"@doc" {with-doc}
]
]
dev-repo: "git+https://github.com/c-cube/moonpool.git"

View file

@ -1,6 +1,6 @@
# This file is generated by dune, edit dune-project instead
opam-version: "2.0"
version: "0.10"
version: "0.5"
synopsis: "Pools of threads supported by a pool of domains"
maintainer: ["Simon Cruanes"]
authors: ["Simon Cruanes"]
@ -9,22 +9,18 @@ tags: ["thread" "pool" "domain" "futures" "fork-join"]
homepage: "https://github.com/c-cube/moonpool"
bug-reports: "https://github.com/c-cube/moonpool/issues"
depends: [
"ocaml" {>= "5.0"}
"ocaml" {>= "4.08"}
"dune" {>= "3.0"}
"either" {>= "1.0"}
"trace" {with-test}
"trace-tef" {with-test}
"qcheck-core" {with-test & >= "0.19"}
"thread-local-storage" {>= "0.2" & < "0.3"}
"odoc" {with-doc}
"hmap" {with-test}
"picos" {>= "0.5" & < "0.7"}
"picos_std" {>= "0.5" & < "0.7"}
"mdx" {>= "1.9.0" & with-test}
]
depopts: [
"hmap"
"trace" {>= "0.6"}
"thread-local-storage"
"domain-local-await" {>= "0.2"}
]
build: [
["dune" "subst"] {dev}

46
src/atomic_.ml Normal file
View file

@ -0,0 +1,46 @@
[@@@ifge 4.12]
include Atomic
[@@@else_]
type 'a t = { mutable x: 'a }
let[@inline] make x = { x }
let[@inline] get { x } = x
let[@inline] set r x = r.x <- x
let[@inline never] exchange r x =
(* atomic *)
let y = r.x in
r.x <- x;
(* atomic *)
y
let[@inline never] compare_and_set r seen v =
(* atomic *)
if r.x == seen then (
r.x <- v;
(* atomic *)
true
) else
false
let[@inline never] fetch_and_add r x =
(* atomic *)
let v = r.x in
r.x <- x + r.x;
(* atomic *)
v
let[@inline never] incr r =
(* atomic *)
r.x <- 1 + r.x
(* atomic *)
let[@inline never] decr r =
(* atomic *)
r.x <- r.x - 1
(* atomic *)
[@@@endif]

82
src/bb_queue.mli Normal file
View file

@ -0,0 +1,82 @@
(** Basic Blocking Queue *)
type 'a t
val create : unit -> _ t
exception Closed
val push : 'a t -> 'a -> unit
(** [push q x] pushes [x] into [q], and returns [()].
@raise Closed if [close q] was previously called.*)
val size : _ t -> int
(** Number of items currently in the queue.
@since 0.2 *)
val pop : 'a t -> 'a
(** [pop q] pops the next element in [q]. It might block until an element comes.
@raise Closed if the queue was closed before a new element was available. *)
val try_pop : force_lock:bool -> 'a t -> 'a option
(** [try_pop q] immediately pops the first element of [q], if any,
or returns [None] without blocking.
@param force_lock if true, use {!Mutex.lock} (which can block under contention);
if false, use {!Mutex.try_lock}, which might return [None] even in
presence of an element if there's contention *)
val try_push : 'a t -> 'a -> bool
(** [try_push q x] tries to push into [q], in which case
it returns [true]; or it fails to push and returns [false]
without blocking.
@raise Closed if the locking succeeded but the queue is closed.
*)
val transfer : 'a t -> 'a Queue.t -> unit
(** [transfer bq q2] transfers all items presently
in [bq] into [q2] in one atomic section, and clears [bq].
It blocks if no element is in [bq].
This is useful to consume elements from the queue in batch.
Create a [Queue.t] locally:
{[
let dowork (work_queue: job Bb_queue.t) =
(* local queue, not thread safe *)
let local_q = Queue.create() in
try
while true do
(* work on local events, already on this thread *)
while not (Queue.is_empty local_q) do
let job = Queue.pop local_q in
process_job job
done;
(* get all the events in the incoming blocking queue, in
one single critical section. *)
Bb_queue.transfer work_queue local_q
done
with Bb_queue.Closed -> ()
]}
@since 0.4 *)
val close : _ t -> unit
(** Close the queue, meaning there won't be any more [push] allowed. *)
type 'a gen = unit -> 'a option
type 'a iter = ('a -> unit) -> unit
val to_iter : 'a t -> 'a iter
(** [to_iter q] returns an iterator over all items in the queue.
This might not terminate if [q] is never closed.
@since 0.4 *)
val to_gen : 'a t -> 'a gen
(** [to_gen q] returns a generator from the queue.
@since 0.4 *)
val to_seq : 'a t -> 'a Seq.t
(** [to_gen q] returns a (transient) sequence from the queue.
@since 0.4 *)

182
src/bounded_queue.ml Normal file
View file

@ -0,0 +1,182 @@
type 'a t = {
max_size: int;
q: 'a Queue.t;
mutex: Mutex.t;
cond_push: Condition.t;
cond_pop: Condition.t;
mutable closed: bool;
}
exception Closed
let create ~max_size () : _ t =
if max_size < 1 then invalid_arg "Bounded_queue.create";
{
max_size;
mutex = Mutex.create ();
cond_push = Condition.create ();
cond_pop = Condition.create ();
q = Queue.create ();
closed = false;
}
let close (self : _ t) =
Mutex.lock self.mutex;
if not self.closed then (
self.closed <- true;
(* awake waiters so they fail *)
Condition.broadcast self.cond_push;
Condition.broadcast self.cond_pop
);
Mutex.unlock self.mutex
(** Check if the queue is full. Precondition: [self.mutex] is acquired. *)
let[@inline] is_full_ (self : _ t) : bool = Queue.length self.q >= self.max_size
let push (self : _ t) x : unit =
let continue = ref true in
Mutex.lock self.mutex;
while !continue do
if self.closed then (
(* push always fails on a closed queue *)
Mutex.unlock self.mutex;
raise Closed
) else if is_full_ self then
Condition.wait self.cond_push self.mutex
else (
let was_empty = Queue.is_empty self.q in
Queue.push x self.q;
if was_empty then Condition.broadcast self.cond_pop;
(* exit loop *)
continue := false;
Mutex.unlock self.mutex
)
done
let pop (self : 'a t) : 'a =
Mutex.lock self.mutex;
let rec loop () =
if Queue.is_empty self.q then (
if self.closed then (
(* pop fails on a closed queue if it's also empty,
otherwise it still returns the remaining elements *)
Mutex.unlock self.mutex;
raise Closed
);
Condition.wait self.cond_pop self.mutex;
(loop [@tailcall]) ()
) else (
let was_full = is_full_ self in
let x = Queue.pop self.q in
(* wakeup pushers that were blocked *)
if was_full then Condition.broadcast self.cond_push;
Mutex.unlock self.mutex;
x
)
in
loop ()
let try_pop ~force_lock (self : _ t) : _ option =
let has_lock =
if force_lock then (
Mutex.lock self.mutex;
true
) else
Mutex.try_lock self.mutex
in
if has_lock then (
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
);
let was_full_before_pop = is_full_ self in
match Queue.pop self.q with
| x ->
(* wakeup pushers that are blocked *)
if was_full_before_pop then Condition.broadcast self.cond_push;
Mutex.unlock self.mutex;
Some x
| exception Queue.Empty ->
Mutex.unlock self.mutex;
None
) else
None
let try_push ~force_lock (self : _ t) x : bool =
let has_lock =
if force_lock then (
Mutex.lock self.mutex;
true
) else
Mutex.try_lock self.mutex
in
if has_lock then (
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
);
if is_full_ self then (
Mutex.unlock self.mutex;
false
) else (
let was_empty = Queue.is_empty self.q in
Queue.push x self.q;
if was_empty then Condition.broadcast self.cond_pop;
Mutex.unlock self.mutex;
true
)
) else
false
let[@inline] max_size self = self.max_size
let size (self : _ t) : int =
Mutex.lock self.mutex;
let n = Queue.length self.q in
Mutex.unlock self.mutex;
n
let transfer (self : 'a t) q2 : unit =
Mutex.lock self.mutex;
let continue = ref true in
while !continue do
if Queue.is_empty self.q then (
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
);
Condition.wait self.cond_pop self.mutex
) else (
let was_full = is_full_ self in
Queue.transfer self.q q2;
if was_full then Condition.broadcast self.cond_push;
continue := false;
Mutex.unlock self.mutex
)
done
type 'a gen = unit -> 'a option
type 'a iter = ('a -> unit) -> unit
let to_iter self k =
try
while true do
let x = pop self in
k x
done
with Closed -> ()
let to_gen self : _ gen =
fun () ->
match pop self with
| exception Closed -> None
| x -> Some x
let rec to_seq self : _ Seq.t =
fun () ->
match pop self with
| exception Closed -> Seq.Nil
| x -> Seq.Cons (x, to_seq self)

86
src/bounded_queue.mli Normal file
View file

@ -0,0 +1,86 @@
(** A blocking queue of finite size.
This queue, while still using locks underneath
(like the regular blocking queue) should be enough for
usage under reasonable contention.
The bounded size is helpful whenever some form of backpressure is
desirable: if the queue is used to communicate between producer(s)
and consumer(s), the consumer(s) can limit the rate at which
producer(s) send new work down their way.
Whenever the queue is full, means that producer(s) will have to
wait before pushing new work.
@since 0.4 *)
type 'a t
(** A bounded queue. *)
val create : max_size:int -> unit -> 'a t
val close : _ t -> unit
(** [close q] closes [q]. No new elements can be pushed into [q],
and after all the elements still in [q] currently are [pop]'d,
{!pop} will also raise {!Closed}. *)
exception Closed
val push : 'a t -> 'a -> unit
(** [push q x] pushes [x] at the end of the queue.
If [q] is full, this will block until there is
room for [x].
@raise Closed if [q] is closed. *)
val try_push : force_lock:bool -> 'a t -> 'a -> bool
(** [try_push q x] attempts to push [x] into [q], but abandons
if it cannot acquire [q] or if [q] is full.
@param force_lock if true, use {!Mutex.lock} (which can block
under contention);
if false, use {!Mutex.try_lock}, which might return [false] even
if there's room in the queue.
@raise Closed if [q] is closed. *)
val pop : 'a t -> 'a
(** [pop q] pops the first element off [q]. It blocks if [q]
is empty, until some element becomes available.
@raise Closed if [q] is empty and closed. *)
val try_pop : force_lock:bool -> 'a t -> 'a option
(** [try_pop ~force_lock q] tries to pop the first element, or returns [None]
if no element is available or if it failed to acquire [q].
@param force_lock if true, use {!Mutex.lock} (which can block
under contention);
if false, use {!Mutex.try_lock}, which might return [None] even in
presence of an element if there's contention.
@raise Closed if [q] is empty and closed. *)
val size : _ t -> int
(** Number of elements currently in [q] *)
val max_size : _ t -> int
(** Maximum size of the queue. See {!create}. *)
val transfer : 'a t -> 'a Queue.t -> unit
(** [transfer bq q2] transfers all elements currently available
in [bq] into local queue [q2], and clears [bq], atomically.
It blocks if [bq] is empty.
See {!Bb_queue.transfer} for more details.
@raise Closed if [bq] is empty and closed. *)
type 'a gen = unit -> 'a option
type 'a iter = ('a -> unit) -> unit
val to_iter : 'a t -> 'a iter
(** [to_iter q] returns an iterator over all items in the queue.
This might not terminate if [q] is never closed. *)
val to_gen : 'a t -> 'a gen
(** [to_gen q] returns a generator from the queue. *)
val to_seq : 'a t -> 'a Seq.t
(** [to_gen q] returns a (transient) sequence from the queue. *)

193
src/chan.ml Normal file
View file

@ -0,0 +1,193 @@
module A = Atomic_
type 'a or_error = 'a Fut.or_error
type 'a waiter = 'a Fut.promise
let[@inline] list_is_empty_ = function
| [] -> true
| _ :: _ -> false
(** Simple functional queue *)
module Q : sig
type 'a t
val return : 'a -> 'a t
val is_empty : _ t -> bool
exception Empty
val pop_exn : 'a t -> 'a * 'a t
val push : 'a t -> 'a -> 'a t
val iter : ('a -> unit) -> 'a t -> unit
end = struct
type 'a t = {
hd: 'a list;
tl: 'a list;
}
(** Queue containing elements of type 'a.
invariant: if hd=[], then tl=[] *)
let[@inline] return x : _ t = { hd = [ x ]; tl = [] }
let[@inline] make_ hd tl =
match hd with
| [] -> { hd = List.rev tl; tl = [] }
| _ :: _ -> { hd; tl }
let[@inline] is_empty self = list_is_empty_ self.hd
let[@inline] push self x : _ t = make_ self.hd (x :: self.tl)
let iter f (self : _ t) : unit =
List.iter f self.hd;
List.iter f self.tl
exception Empty
let pop_exn self =
match self.hd with
| [] ->
assert (list_is_empty_ self.tl);
raise Empty
| x :: hd' ->
let self' = make_ hd' self.tl in
x, self'
end
exception Closed
type 'a state =
| Empty
| St_closed
| Elems of 'a Q.t
| Waiters of 'a waiter Q.t
type 'a t = { st: 'a state A.t } [@@unboxed]
let create () : _ t = { st = A.make Empty }
(** Produce a state from a queue of waiters *)
let[@inline] mk_st_waiters_ ws : _ state =
if Q.is_empty ws then
Empty
else
Waiters ws
(** Produce a state from a queue of elements *)
let[@inline] mk_st_elems_ q : _ state =
if Q.is_empty q then
Empty
else
Elems q
let push (self : _ t) x : unit =
while
let old_st = A.get self.st in
match old_st with
| St_closed -> raise Closed
| Empty -> not (A.compare_and_set self.st old_st (Elems (Q.return x)))
| Waiters ws ->
(* awake first waiter and give it [x] *)
let w, ws' = Q.pop_exn ws in
let new_st = mk_st_waiters_ ws' in
if A.compare_and_set self.st old_st new_st then (
Fut.fulfill w (Ok x);
false
) else
true
| Elems q -> not (A.compare_and_set self.st old_st (Elems (Q.push q x)))
do
Domain_.relax ()
done
let try_pop (type elt) self : elt option =
let module M = struct
exception Found of elt
end in
try
(* a bit of spinning *)
for _retry = 1 to 10 do
let old_st = A.get self.st in
match old_st with
| Elems q ->
let x, q' = Q.pop_exn q in
let new_st = mk_st_elems_ q' in
if A.compare_and_set self.st old_st new_st then
raise_notrace (M.Found x)
else
Domain_.relax ()
| _ -> raise_notrace Exit
done;
None
with
| M.Found x -> Some x
| Exit -> None
let pop (type elt) (self : _ t) : elt Fut.t =
let module M = struct
exception Ret of elt
exception Fut of elt Fut.t
end in
try
while
let old_st = A.get self.st in
(match old_st with
| St_closed ->
let bt = Printexc.get_callstack 10 in
raise_notrace (M.Fut (Fut.fail Closed bt))
| Elems q ->
let x, q' = Q.pop_exn q in
let new_st = mk_st_elems_ q' in
if A.compare_and_set self.st old_st new_st then raise_notrace (M.Ret x)
| Empty ->
let fut, promise = Fut.make () in
let new_st = Waiters (Q.return promise) in
if A.compare_and_set self.st old_st new_st then
raise_notrace (M.Fut fut)
| Waiters ws ->
let fut, promise = Fut.make () in
(* add new promise at the end of the queue of waiters *)
let new_st = Waiters (Q.push ws promise) in
if A.compare_and_set self.st old_st new_st then
raise_notrace (M.Fut fut));
true
do
Domain_.relax ()
done;
(* never reached *)
assert false
with
| M.Ret x -> Fut.return x
| M.Fut f -> f
let pop_block_exn (self : 'a t) : 'a =
match try_pop self with
| Some x -> x
| None -> Fut.wait_block_exn @@ pop self
let close (self : _ t) : unit =
while
let old_st = A.get self.st in
match old_st with
| St_closed -> false (* exit *)
| Elems _ | Empty -> not (A.compare_and_set self.st old_st St_closed)
| Waiters ws ->
if A.compare_and_set self.st old_st St_closed then (
(* fail all waiters with [Closed]. *)
let bt = Printexc.get_callstack 10 in
Q.iter (fun w -> Fut.fulfill_idempotent w (Error (Closed, bt))) ws;
false
) else
true
do
Domain_.relax ()
done
[@@@ifge 5.0]
let pop_await self =
match try_pop self with
| Some x -> x
| None -> Fut.await @@ pop self
[@@@endif]

52
src/chan.mli Normal file
View file

@ -0,0 +1,52 @@
(** Channels.
Channels are pipelines of values where threads can push into
one end, and pull from the other end.
Unlike {!Moonpool.Blocking_queue}, channels are designed so
that pushing never blocks, and pop'ing values returns a future.
@since 0.3
*)
type 'a or_error = 'a Fut.or_error
type 'a t
(** Channel carrying values of type ['a]. *)
val create : unit -> 'a t
(** Create a channel. *)
exception Closed
val push : 'a t -> 'a -> unit
(** [push chan x] pushes [x] into [chan]. This does not block.
@raise Closed if the channel is closed. *)
val pop : 'a t -> 'a Fut.t
(** Pop an element. This returns a future that will be
fulfilled when an element is available.
@raise Closed if the channel is closed, or fails the future
if the channel is closed before an element is available for it. *)
val try_pop : 'a t -> 'a option
(** [try_pop chan] pops and return an element if one is available
immediately. Otherwise it returns [None]. *)
val pop_block_exn : 'a t -> 'a
(** Like [pop], but blocks if an element is not available immediately.
The precautions around blocking from inside a thread pool
are the same as explained in {!Fut.wait_block}. *)
val close : _ t -> unit
(** Close the channel. Further push and pop calls will fail.
This is idempotent. *)
[@@@ifge 5.0]
val pop_await : 'a t -> 'a
(** Like {!pop} but suspends the current thread until an element is
available. See {!Fut.await} for more details.
@since 0.3 *)
[@@@endif]

View file

@ -1,20 +0,0 @@
include Runner
let ( let@ ) = ( @@ )
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?name:string ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
let create ?on_init_thread ?on_exit_thread ?on_exn ?name () : t =
Fifo_pool.create ?on_init_thread ?on_exit_thread ?on_exn ?name ~num_threads:1
()
let with_ ?on_init_thread ?on_exit_thread ?on_exn ?name () f =
let pool = create ?on_init_thread ?on_exit_thread ?on_exn ?name () in
let@ () = Fun.protect ~finally:(fun () -> shutdown pool) in
f pool

View file

@ -1,23 +0,0 @@
(** A simple runner with a single background thread.
Because this is guaranteed to have a single worker thread, tasks scheduled
in this runner always run asynchronously but in a sequential fashion.
This is similar to {!Fifo_pool} with exactly one thread.
@since 0.6 *)
include module type of Runner
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?name:string ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
val create : (unit -> t, _) create_args
(** Create the background runner *)
val with_ : (unit -> (t -> 'a) -> 'a, _) create_args

View file

@ -1,79 +0,0 @@
(** Basic Blocking Queue *)
type 'a t
val create : unit -> _ t
exception Closed
val push : 'a t -> 'a -> unit
(** [push q x] pushes [x] into [q], and returns [()].
@raise Closed if [close q] was previously called.*)
val size : _ t -> int
(** Number of items currently in the queue.
@since 0.2 *)
val pop : 'a t -> 'a
(** [pop q] pops the next element in [q]. It might block until an element comes.
@raise Closed if the queue was closed before a new element was available. *)
val try_pop : force_lock:bool -> 'a t -> 'a option
(** [try_pop q] immediately pops the first element of [q], if any, or returns
[None] without blocking.
@param force_lock
if true, use {!Mutex.lock} (which can block under contention); if false,
use {!Mutex.try_lock}, which might return [None] even in presence of an
element if there's contention *)
val try_push : 'a t -> 'a -> bool
(** [try_push q x] tries to push into [q], in which case it returns [true]; or
it fails to push and returns [false] without blocking.
@raise Closed if the locking succeeded but the queue is closed. *)
val transfer : 'a t -> 'a Queue.t -> unit
(** [transfer bq q2] transfers all items presently in [bq] into [q2] in one
atomic section, and clears [bq]. It blocks if no element is in [bq].
This is useful to consume elements from the queue in batch. Create a
[Queue.t] locally:
{[
let dowork (work_queue : job Bb_queue.t) =
(* local queue, not thread safe *)
let local_q = Queue.create () in
try
while true do
(* work on local events, already on this thread *)
while not (Queue.is_empty local_q) do
let job = Queue.pop local_q in
process_job job
done;
(* get all the events in the incoming blocking queue, in
one single critical section. *)
Bb_queue.transfer work_queue local_q
done
with Bb_queue.Closed -> ()
]}
@since 0.4 *)
val close : _ t -> unit
(** Close the queue, meaning there won't be any more [push] allowed. *)
type 'a gen = unit -> 'a option
type 'a iter = ('a -> unit) -> unit
val to_iter : 'a t -> 'a iter
(** [to_iter q] returns an iterator over all items in the queue. This might not
terminate if [q] is never closed.
@since 0.4 *)
val to_gen : 'a t -> 'a gen
(** [to_gen q] returns a generator from the queue.
@since 0.4 *)
val to_seq : 'a t -> 'a Seq.t
(** [to_gen q] returns a (transient) sequence from the queue.
@since 0.4 *)

View file

@ -1,123 +0,0 @@
exception Closed
type 'a t = {
q: 'a Queue.t;
mutex: Mutex.t; (** protects critical section *)
mutable closed: bool;
max_size: int;
push_waiters: Trigger.t Queue.t;
pop_waiters: Trigger.t Queue.t;
}
let create ~max_size () : _ t =
if max_size < 0 then invalid_arg "Chan: max_size < 0";
{
max_size;
mutex = Mutex.create ();
closed = false;
q = Queue.create ();
push_waiters = Queue.create ();
pop_waiters = Queue.create ();
}
let try_push (self : _ t) x : bool =
if Mutex.try_lock self.mutex then (
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
);
match Queue.length self.q with
| 0 ->
let to_awake = Queue.create () in
Queue.push x self.q;
Queue.transfer self.pop_waiters to_awake;
Mutex.unlock self.mutex;
(* wake up pop triggers if needed. Be careful to do that
outside the critical section*)
Queue.iter Trigger.signal to_awake;
true
| n when n < self.max_size ->
Queue.push x self.q;
Mutex.unlock self.mutex;
true
| _ ->
Mutex.unlock self.mutex;
false
) else
false
let try_pop (type elt) self : elt option =
if Mutex.try_lock self.mutex then (
match Queue.pop self.q with
| exception Queue.Empty ->
Mutex.unlock self.mutex;
if self.closed then
raise Closed
else
None
| x ->
Mutex.unlock self.mutex;
Some x
) else
None
let close (self : _ t) : unit =
let triggers_to_signal = Queue.create () in
Mutex.lock self.mutex;
if not self.closed then (
self.closed <- true;
Queue.transfer self.pop_waiters triggers_to_signal;
Queue.transfer self.push_waiters triggers_to_signal
);
Mutex.unlock self.mutex;
Queue.iter Trigger.signal triggers_to_signal
let rec push (self : _ t) x : unit =
Mutex.lock self.mutex;
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
);
match Queue.length self.q with
| 0 ->
Queue.push x self.q;
let to_wakeup = Queue.create () in
Queue.transfer self.pop_waiters to_wakeup;
Mutex.unlock self.mutex;
Queue.iter Trigger.signal to_wakeup
| n when n < self.max_size ->
Queue.push x self.q;
Mutex.unlock self.mutex
| _ ->
let tr = Trigger.create () in
Queue.push tr self.push_waiters;
Mutex.unlock self.mutex;
Trigger.await_exn tr;
push self x
let rec pop (self : 'a t) : 'a =
Mutex.lock self.mutex;
match Queue.pop self.q with
| x ->
if Queue.is_empty self.q then (
let to_wakeup = Queue.create () in
Queue.transfer self.push_waiters to_wakeup;
Mutex.unlock self.mutex;
Queue.iter Trigger.signal to_wakeup
) else
Mutex.unlock self.mutex;
x
| exception Queue.Empty ->
if self.closed then (
Mutex.unlock self.mutex;
raise Closed
);
let tr = Trigger.create () in
Queue.push tr self.pop_waiters;
Mutex.unlock self.mutex;
Trigger.await_exn tr;
pop self

View file

@ -1,49 +0,0 @@
(** Channels.
The channels have bounded size. They use effects/await to provide
a direct style implementation. Pushing into a full channel,
or popping from an empty one, will suspend the current task.
The channels became bounded since @0.7 .
*)
type 'a t
(** Channel carrying values of type ['a]. *)
val create : max_size:int -> unit -> 'a t
(** Create a channel. *)
exception Closed
val try_push : 'a t -> 'a -> bool
(** [try_push chan x] pushes [x] into [chan]. This does not block. Returns
[true] if it succeeded in pushing.
@raise Closed if the channel is closed. *)
val try_pop : 'a t -> 'a option
(** [try_pop chan] pops and return an element if one is available immediately.
Otherwise it returns [None].
@raise Closed if the channel is closed and empty. *)
val close : _ t -> unit
(** Close the channel. Further push and pop calls will fail. This is idempotent.
*)
val push : 'a t -> 'a -> unit
(** Push the value into the channel, suspending the current task if the channel
is currently full.
@raise Closed if the channel is closed
@since 0.7 *)
val pop : 'a t -> 'a
(** Pop an element. This might suspend the current task if the channel is
currently empty.
@raise Closed if the channel is empty and closed.
@since 0.7 *)
(*
val pop_block_exn : 'a t -> 'a
(** Like [pop], but blocks if an element is not available immediately.
The precautions around blocking from inside a thread pool
are the same as explained in {!Fut.wait_block}. *)
*)

View file

@ -1,15 +0,0 @@
(library
(public_name moonpool)
(name moonpool)
(libraries
moonpool.private
(re_export thread-local-storage)
(select
hmap_ls_.ml
from
(hmap -> hmap_ls_.real.ml)
(-> hmap_ls_.dummy.ml))
moonpool.dpool
(re_export picos))
(flags :standard -open Moonpool_private)
(private_modules util_pool_))

View file

@ -1,30 +0,0 @@
type t = exn * Printexc.raw_backtrace
let[@inline] make exn bt : t = exn, bt
let[@inline] exn (e, _) = e
let[@inline] bt (_, bt) = bt
let show self =
let bt = Printexc.raw_backtrace_to_string (bt self) in
let exn = Printexc.to_string (exn self) in
if bt = "" then
exn
else
Printf.sprintf "%s\n%s" exn bt
let pp out self = Format.pp_print_string out (show self)
let[@inline] raise (e, bt) = Printexc.raise_with_backtrace e bt
let[@inline] get exn =
let bt = Printexc.get_raw_backtrace () in
make exn bt
let[@inline] get_callstack n exn =
let bt = Printexc.get_callstack n in
make exn bt
type nonrec 'a result = ('a, t) result
let[@inline] unwrap = function
| Ok x -> x
| Error ebt -> raise ebt

View file

@ -1,29 +0,0 @@
(** Exception with backtrace.
Type changed @since 0.7
@since 0.6 *)
(** An exception bundled with a backtrace *)
type t = exn * Printexc.raw_backtrace
val exn : t -> exn
val bt : t -> Printexc.raw_backtrace
val raise : t -> 'a
val get : exn -> t
val get_callstack : int -> exn -> t
val make : exn -> Printexc.raw_backtrace -> t
(** Trivial builder *)
val show : t -> string
(** Simple printing *)
val pp : Format.formatter -> t -> unit
type nonrec 'a result = ('a, t) result
val unwrap : 'a result -> 'a
(** [unwrap (Ok x)] is [x], [unwrap (Error ebt)] re-raises [ebt].
@since 0.7 *)

View file

@ -1,184 +0,0 @@
open Types_
include Runner
module WL = Worker_loop_
type fiber = Picos.Fiber.t
type task_full = WL.task_full
let ( let@ ) = ( @@ )
type state = {
threads: Thread.t array;
q: task_full Bb_queue.t; (** Queue for tasks. *)
mutable as_runner: t;
(* init options *)
name: string option;
on_init_thread: dom_id:int -> t_id:int -> unit -> unit;
on_exit_thread: dom_id:int -> t_id:int -> unit -> unit;
on_exn: exn -> Printexc.raw_backtrace -> unit;
}
(** internal state *)
type worker_state = {
idx: int;
dom_idx: int;
st: state;
}
let[@inline] size_ (self : state) = Array.length self.threads
let[@inline] num_tasks_ (self : state) : int = Bb_queue.size self.q
(*
get_thread_state = TLS.get_opt k_worker_state
*)
let default_thread_init_exit_ ~dom_id:_ ~t_id:_ () = ()
let shutdown_ ~wait (self : state) : unit =
Bb_queue.close self.q;
if wait then Array.iter Thread.join self.threads
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?num_threads:int ->
?name:string ->
'a
(** Run [task] as is, on the pool. *)
let schedule_ (self : state) (task : task_full) : unit =
try Bb_queue.push self.q task with Bb_queue.Closed -> raise Shutdown
let runner_of_state (pool : state) : t =
let run_async ~fiber f = schedule_ pool @@ T_start { f; fiber } in
Runner.For_runner_implementors.create
~shutdown:(fun ~wait () -> shutdown_ pool ~wait)
~run_async
~size:(fun () -> size_ pool)
~num_tasks:(fun () -> num_tasks_ pool)
()
(** Run [task] as is, on the pool. *)
let schedule_w (self : worker_state) (task : task_full) : unit =
try Bb_queue.push self.st.q task with Bb_queue.Closed -> raise Shutdown
let get_next_task (self : worker_state) =
try Bb_queue.pop self.st.q with Bb_queue.Closed -> raise WL.No_more_tasks
let before_start (self : worker_state) =
let t_id = Thread.id @@ Thread.self () in
self.st.on_init_thread ~dom_id:self.dom_idx ~t_id ();
(* set thread name *)
Option.iter
(fun name ->
Tracing_.set_thread_name (Printf.sprintf "%s.worker.%d" name self.idx))
self.st.name
let cleanup (self : worker_state) : unit =
(* on termination, decrease refcount of underlying domain *)
Domain_pool_.decr_on self.dom_idx;
let t_id = Thread.id @@ Thread.self () in
self.st.on_exit_thread ~dom_id:self.dom_idx ~t_id ()
let worker_ops : worker_state WL.ops =
let runner (st : worker_state) = st.st.as_runner in
let on_exn (st : worker_state) (ebt : Exn_bt.t) =
st.st.on_exn (Exn_bt.exn ebt) (Exn_bt.bt ebt)
in
{
WL.schedule = schedule_w;
runner;
get_next_task;
on_exn;
before_start;
cleanup;
}
let create_ ?(on_init_thread = default_thread_init_exit_)
?(on_exit_thread = default_thread_init_exit_) ?(on_exn = fun _ _ -> ())
~threads ?name () : state =
let self =
{
threads;
q = Bb_queue.create ();
as_runner = Runner.dummy;
name;
on_init_thread;
on_exit_thread;
on_exn;
}
in
self.as_runner <- runner_of_state self;
self
let create ?on_init_thread ?on_exit_thread ?on_exn ?num_threads ?name () : t =
let num_domains = Domain_pool_.max_number_of_domains () in
(* number of threads to run *)
let num_threads = Util_pool_.num_threads ?num_threads () in
(* make sure we don't bias towards the first domain(s) in {!D_pool_} *)
let offset = Random.int num_domains in
let pool =
let dummy_thread = Thread.self () in
let threads = Array.make num_threads dummy_thread in
create_ ?on_init_thread ?on_exit_thread ?on_exn ~threads ?name ()
in
let runner = runner_of_state pool in
(* temporary queue used to obtain thread handles from domains
on which the thread are started. *)
let receive_threads = Bb_queue.create () in
(* start the thread with index [i] *)
let start_thread_with_idx i =
let dom_idx = (offset + i) mod num_domains in
(* function called in domain with index [i], to
create the thread and push it into [receive_threads] *)
let create_thread_in_domain () =
let st = { idx = i; dom_idx; st = pool } in
let thread =
Thread.create (WL.worker_loop ~block_signals:true ~ops:worker_ops) st
in
(* send the thread from the domain back to us *)
Bb_queue.push receive_threads (i, thread)
in
Domain_pool_.run_on dom_idx create_thread_in_domain
in
(* start all threads, placing them on the domains
according to their index and [offset] in a round-robin fashion. *)
for i = 0 to num_threads - 1 do
start_thread_with_idx i
done;
(* receive the newly created threads back from domains *)
for _j = 1 to num_threads do
let i, th = Bb_queue.pop receive_threads in
pool.threads.(i) <- th
done;
runner
let with_ ?on_init_thread ?on_exit_thread ?on_exn ?num_threads ?name () f =
let pool =
create ?on_init_thread ?on_exit_thread ?on_exn ?num_threads ?name ()
in
let@ () = Fun.protect ~finally:(fun () -> shutdown pool) in
f pool
module Private_ = struct
type nonrec worker_state = worker_state
let worker_ops = worker_ops
let runner_of_state (self : worker_state) = worker_ops.runner self
let create_single_threaded_state ~thread ?on_exn () : worker_state =
let st : state = create_ ?on_exn ~threads:[| thread |] () in
{ idx = 0; dom_idx = 0; st }
end

View file

@ -1,58 +0,0 @@
(** A simple thread pool in FIFO order.
FIFO: first-in, first-out. Basically tasks are put into a queue, and worker
threads pull them out of the queue at the other end.
Since this uses a single blocking queue to manage tasks, it's very simple
and reliable. The number of worker threads is fixed, but they are spread
over several domains to enable parallelism.
This can be useful for latency-sensitive applications (e.g. as a pool of
workers for network servers). Work-stealing pools might have higher
throughput but they're very unfair to some tasks; by contrast, here, older
tasks have priority over younger tasks.
@since 0.5 *)
include module type of Runner
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?num_threads:int ->
?name:string ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
val create : (unit -> t, _) create_args
(** [create ()] makes a new thread pool.
@param on_init_thread
called at the beginning of each new thread in the pool.
@param num_threads
number of worker threads. See {!Ws_pool.create} for more details.
@param on_exit_thread called at the end of each worker thread in the pool.
@param name name for the pool, used in tracing (since 0.6) *)
val with_ : (unit -> (t -> 'a) -> 'a, _) create_args
(** [with_ () f] calls [f pool], where [pool] is obtained via {!create}. When
[f pool] returns or fails, [pool] is shutdown and its resources are
released. Most parameters are the same as in {!create}. *)
(**/**)
module Private_ : sig
type worker_state
val worker_ops : worker_state Worker_loop_.ops
val create_single_threaded_state :
thread:Thread.t ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
unit ->
worker_state
val runner_of_state : worker_state -> Runner.t
end
(**/**)

View file

@ -1,332 +0,0 @@
(** Futures.
A future of type ['a t] represents the result of a computation that will
yield a value of type ['a].
Typically, the computation is running on a thread pool {!Runner.t} and will
proceed on some worker. Once set, a future cannot change. It either succeeds
(storing a [Ok x] with [x: 'a]), or fail (storing a [Error (exn, bt)] with
an exception and the corresponding backtrace).
Using {!spawn}, it's possible to start a bunch of tasks, obtaining futures,
and then use {!await} to get their result in the desired order.
Combinators such as {!map} and {!join_array} can be used to produce futures
from other futures (in a monadic way). Some combinators take a [on] argument
to specify a runner on which the intermediate computation takes place; for
example [map ~on:pool ~f fut] maps the value in [fut] using function [f],
applicatively; the call to [f] happens on the runner [pool] (once [fut]
resolves successfully with a value). Be aware that these combinators do not
preserve local storage. *)
type 'a or_error = ('a, Exn_bt.t) result
type 'a t = 'a Picos.Computation.t
(** A future with a result of type ['a]. *)
type 'a promise = private 'a t
(** A promise, which can be fulfilled exactly once to set the corresponding
future. This is a private alias of ['a t] since 0.7, previously it was
opaque. *)
val make : unit -> 'a t * 'a promise
(** Make a new future with the associated promise. *)
val make_promise : unit -> 'a promise
(** Same as {!make} but returns a single promise (which can be upcast to a
future). This is useful mostly to preserve memory, you probably don't need
it.
How to upcast to a future in the worst case:
{[
let prom = Fut.make_promise ()
let fut = (prom : _ Fut.promise :> _ Fut.t)
]}
@since 0.7 *)
val on_result : 'a t -> ('a or_error -> unit) -> unit
(** [on_result fut f] registers [f] to be called in the future when [fut] is
set; or calls [f] immediately if [fut] is already set.
{b NOTE:} it's ill advised to do meaningful work inside the callback [f].
Instead, try to spawn another task on the runner, or use {!await}. *)
val on_result_ignore : _ t -> (Exn_bt.t option -> unit) -> unit
(** [on_result_ignore fut f] registers [f] to be called in the future when [fut]
is set; or calls [f] immediately if [fut] is already set. It does not pass
the result, only a success/error signal.
@since 0.7 *)
exception Already_fulfilled
val try_cancel : _ promise -> Exn_bt.t -> bool
(** [try_cancel promise ebt] tries to cancel the promise using the given
exception, returning [true]. It returns [false] if the promise is already
resolved.
@since 0.9 *)
val cancel : _ promise -> Exn_bt.t -> unit
(** Silent version of {!try_cancel}, ignoring the result.
@since 0.9 *)
val fulfill : 'a promise -> 'a or_error -> unit
(** Fullfill the promise, setting the future at the same time.
@raise Already_fulfilled if the promise is already fulfilled. *)
val fulfill_idempotent : 'a promise -> 'a or_error -> unit
(** Fullfill the promise, setting the future at the same time. Does nothing if
the promise is already fulfilled. *)
val return : 'a -> 'a t
(** Already settled future, with a result *)
val fail : exn -> Printexc.raw_backtrace -> _ t
(** Already settled future, with a failure *)
val fail_exn_bt : Exn_bt.t -> _ t
(** Fail from a bundle of exception and backtrace
@since 0.6 *)
val of_result : 'a or_error -> 'a t
(** Already resolved future from a result. *)
val is_resolved : _ t -> bool
(** [is_resolved fut] is [true] iff [fut] is resolved. *)
val peek : 'a t -> 'a or_error option
(** [peek fut] returns [Some r] if [fut] is currently resolved with [r], and
[None] if [fut] is not resolved yet. *)
exception Not_ready
(** @since 0.2 *)
val get_or_fail : 'a t -> 'a or_error
(** [get_or_fail fut] obtains the result from [fut] if it's fulfilled (i.e. if
[peek fut] returns [Some res], [get_or_fail fut] returns [res]).
@raise Not_ready if the future is not ready.
@since 0.2 *)
val get_or_fail_exn : 'a t -> 'a
(** [get_or_fail_exn fut] obtains the result from [fut] if it's fulfilled, like
{!get_or_fail}. If the result is an [Error _], the exception inside is
re-raised.
@raise Not_ready if the future is not ready.
@since 0.2 *)
val is_done : _ t -> bool
(** Is the future resolved? This is the same as [peek fut |> Option.is_some].
@since 0.2 *)
val is_success : _ t -> bool
(** Checks if the future is resolved with [Ok _] as a result.
@since 0.6 *)
val is_failed : _ t -> bool
(** Checks if the future is resolved with [Error _] as a result.
@since 0.6 *)
val raise_if_failed : _ t -> unit
(** [raise_if_failed fut] raises [e] if [fut] failed with [e].
@since 0.6 *)
(** {2 Combinators} *)
val spawn : on:Runner.t -> (unit -> 'a) -> 'a t
(** [spaw ~on f] runs [f()] on the given runner [on], and return a future that
will hold its result. *)
val spawn_on_current_runner : (unit -> 'a) -> 'a t
(** This must be run from inside a runner, and schedules the new task on it as
well.
See {!Runner.get_current_runner} to see how the runner is found.
@since 0.5
@raise Failure if run from outside a runner. *)
val reify_error : 'a t -> 'a or_error t
(** [reify_error fut] turns a failing future into a non-failing one that contain
[Error (exn, bt)]. A non-failing future returning [x] is turned into [Ok x].
@since 0.4 *)
val map : ?on:Runner.t -> f:('a -> 'b) -> 'a t -> 'b t
(** [map ?on ~f fut] returns a new future [fut2] that resolves with [f x] if
[fut] resolved with [x]; and fails with [e] if [fut] fails with [e] or [f x]
raises [e].
@param on if provided, [f] runs on the given runner *)
val bind : ?on:Runner.t -> f:('a -> 'b t) -> 'a t -> 'b t
(** [bind ?on ~f fut] returns a new future [fut2] that resolves like the future
[f x] if [fut] resolved with [x]; and fails with [e] if [fut] fails with [e]
or [f x] raises [e].
This does not preserve local storage of [fut] inside [f].
@param on if provided, [f] runs on the given runner *)
val bind_reify_error : ?on:Runner.t -> f:('a or_error -> 'b t) -> 'a t -> 'b t
(** [bind_reify_error ?on ~f fut] returns a new future [fut2] that resolves like
the future [f (Ok x)] if [fut] resolved with [x]; and resolves like the
future [f (Error (exn, bt))] if [fut] fails with [exn] and backtrace [bt].
This does not preserve local storage of [fut] inside [f].
@param on if provided, [f] runs on the given runner
@since 0.4 *)
val join : 'a t t -> 'a t
(** [join fut] is [fut >>= Fun.id]. It joins the inner layer of the future.
@since 0.2 *)
val both : 'a t -> 'b t -> ('a * 'b) t
(** [both a b] succeeds with [x, y] if [a] succeeds with [x] and [b] succeeds
with [y], or fails if any of them fails. *)
val choose : 'a t -> 'b t -> ('a, 'b) Either.t t
(** [choose a b] succeeds [Left x] or [Right y] if [a] succeeds with [x] or [b]
succeeds with [y], or fails if both of them fails. If they both succeed, it
is not specified which result is used. *)
val choose_same : 'a t -> 'a t -> 'a t
(** [choose_same a b] succeeds with the value of one of [a] or [b] if they
succeed, or fails if both fail. If they both succeed, it is not specified
which result is used. *)
val join_array : 'a t array -> 'a array t
(** Wait for all the futures in the array. Fails if any future fails. *)
val join_list : 'a t list -> 'a list t
(** Wait for all the futures in the list. Fails if any future fails. *)
(** Advanced primitives for synchronization *)
module Advanced : sig
val barrier_on_abstract_container_of_futures :
iter:(('a t -> unit) -> 'cont -> unit) ->
len:('cont -> int) ->
aggregate_results:(('a t -> 'a) -> 'cont -> 'res) ->
'cont ->
'res t
(** [barrier_on_abstract_container_of_futures ~iter ~aggregate_results ~len
cont] takes a container of futures ([cont]), with [len] elements, and
returns a future result of type [res] (possibly another type of
container).
This waits for all futures in [cont: 'cont] to be done (futures obtained
via [iter <some function> cont]). If they all succeed, their results are
aggregated into a new result of type ['res] via
[aggregate_results <some function> cont].
{b NOTE}: the behavior is not specified if [iter f cont] (for a function
f) doesn't call [f] on exactly [len cont] elements.
@since 0.5.1 *)
end
val map_list : f:('a -> 'b t) -> 'a list -> 'b list t
(** [map_list ~f l] is like [join_list @@ List.map f l].
@since 0.5.1 *)
val wait_array : _ t array -> unit t
(** [wait_array arr] waits for all futures in [arr] to resolve. It discards the
individual results of futures in [arr]. It fails if any future fails. *)
val wait_list : _ t list -> unit t
(** [wait_list l] waits for all futures in [l] to resolve. It discards the
individual results of futures in [l]. It fails if any future fails. *)
val for_ : on:Runner.t -> int -> (int -> unit) -> unit t
(** [for_ ~on n f] runs [f 0], [f 1], …, [f (n-1)] on the runner, and returns a
future that resolves when all the tasks have resolved, or fails as soon as
one task has failed. *)
val for_array : on:Runner.t -> 'a array -> (int -> 'a -> unit) -> unit t
(** [for_array ~on arr f] runs [f 0 arr.(0)], …, [f (n-1) arr.(n-1)] in the
runner (where [n = Array.length arr]), and returns a future that resolves
when all the tasks are done, or fails if any of them fails.
@since 0.2 *)
val for_list : on:Runner.t -> 'a list -> ('a -> unit) -> unit t
(** [for_list ~on l f] is like [for_array ~on (Array.of_list l) f].
@since 0.2 *)
(** {2 Await}
This suspends the current task using an OCaml 5 algebraic effect, and makes
preparations for the task to be resumed once the future has been resolved.
*)
val await : 'a t -> 'a
(** [await fut] suspends the current tasks until [fut] is fulfilled, then
resumes the task on this same runner (but possibly on a different
thread/domain).
@since 0.3
This must only be run from inside the runner itself. The runner must support
{!Suspend_}. *)
val yield : unit -> unit
(** Like {!Moonpool.yield}.
@since 0.10 *)
(** {2 Blocking} *)
val wait_block : 'a t -> 'a or_error
(** [wait_block fut] blocks the current thread until [fut] is resolved, and
returns its value.
{b NOTE:} A word of warning: this will monopolize the calling thread until
the future resolves. This can also easily cause deadlocks, if enough threads
in a pool call [wait_block] on futures running on the same pool or a pool
depending on it.
A good rule to avoid deadlocks is to run this from outside of any pool, or
to have an acyclic order between pools where [wait_block] is only called
from a pool on futures evaluated in a pool that comes lower in the
hierarchy. If this rule is broken, it is possible for all threads in a pool
to wait for futures that can only make progress on these same threads, hence
the deadlock. *)
val wait_block_exn : 'a t -> 'a
(** Same as {!wait_block} but re-raises the exception if the future failed.
{b NOTE:} do check the cautionary note in {!wait_block} concerning
deadlocks. *)
(** {2 Infix operators}
These combinators run on either the current pool (if present), or on the
same thread that just fulfilled the previous future if not.
They were previously present as [module Infix_local] and [val infix], but
are now simplified.
@since 0.5 *)
(** @since 0.5 *)
module Infix : sig
val ( >|= ) : 'a t -> ('a -> 'b) -> 'b t
val ( >>= ) : 'a t -> ('a -> 'b t) -> 'b t
val ( let+ ) : 'a t -> ('a -> 'b) -> 'b t
val ( and+ ) : 'a t -> 'b t -> ('a * 'b) t
val ( let* ) : 'a t -> ('a -> 'b t) -> 'b t
val ( and* ) : 'a t -> 'b t -> ('a * 'b) t
end
include module type of Infix
module Infix_local = Infix
[@@deprecated "Use Infix"]
(** @deprecated use Infix instead *)
(**/**)
module Private_ : sig
val unsafe_promise_of_fut : 'a t -> 'a promise
(** Do not use unless you know exactly what you are doing. *)
val as_computation : 'a t -> 'a Picos.Computation.t
(** Picos compat *)
end
(**/**)

View file

@ -1,7 +0,0 @@
(**/**)
module Private_hmap_ls_ = struct
let copy_fls _ _ = ()
end
(**/**)

View file

@ -1,68 +0,0 @@
open Types_
open struct
module FLS = Picos.Fiber.FLS
end
(** A local hmap, inherited in children fibers *)
let k_local_hmap : Hmap.t FLS.t = FLS.create ()
(** Access the local [hmap], or an empty one if not set *)
let[@inline] get_local_hmap () : Hmap.t =
match TLS.get_exn k_cur_fiber with
| exception TLS.Not_set -> Hmap.empty
| fiber -> FLS.get fiber ~default:Hmap.empty k_local_hmap
let[@inline] set_local_hmap (h : Hmap.t) : unit =
match TLS.get_exn k_cur_fiber with
| exception TLS.Not_set -> ()
| fiber -> FLS.set fiber k_local_hmap h
let[@inline] update_local_hmap (f : Hmap.t -> Hmap.t) : unit =
match TLS.get_exn k_cur_fiber with
| exception TLS.Not_set -> ()
| fiber ->
let h = FLS.get fiber ~default:Hmap.empty k_local_hmap in
let h = f h in
FLS.set fiber k_local_hmap h
(** @raise Invalid_argument if not present *)
let get_in_local_hmap_exn (k : 'a Hmap.key) : 'a =
let h = get_local_hmap () in
Hmap.get k h
let get_in_local_hmap_opt (k : 'a Hmap.key) : 'a option =
let h = get_local_hmap () in
Hmap.find k h
(** Remove given key from the local hmap *)
let[@inline] remove_in_local_hmap (k : _ Hmap.key) : unit =
update_local_hmap (Hmap.rem k)
let[@inline] set_in_local_hmap (k : 'a Hmap.key) (v : 'a) : unit =
update_local_hmap (Hmap.add k v)
(** [with_in_local_hmap k v f] calls [f()] in a context where [k] is bound to
[v] in the local hmap. Then it restores the previous binding for [k]. *)
let with_in_local_hmap (k : 'a Hmap.key) (v : 'a) f =
let h = get_local_hmap () in
match Hmap.find k h with
| None ->
set_in_local_hmap k v;
Fun.protect ~finally:(fun () -> remove_in_local_hmap k) f
| Some old_v ->
set_in_local_hmap k v;
Fun.protect ~finally:(fun () -> set_in_local_hmap k old_v) f
(**/**)
(* private functions, to be used by the rest of moonpool *)
module Private_hmap_ls_ = struct
(** Copy the hmap from f1.fls to f2.fls *)
let copy_fls (f1 : Picos.Fiber.t) (f2 : Picos.Fiber.t) : unit =
match FLS.get_exn f1 k_local_hmap with
| exception FLS.Not_set -> ()
| hmap -> FLS.set f2 k_local_hmap hmap
end
(**/**)

View file

@ -1,58 +0,0 @@
(** Mutex-protected resource.
This lock is a synchronous concurrency primitive, as a thin wrapper around
{!Mutex} that encourages proper management of the critical section in RAII
style:
{[
let (let@) = (@@)
let compute_foo =
(* enter critical section *)
let@ x = Lock.with_ protected_resource in
use_x;
return_foo ()
(* exit critical section *)
in
]}
This lock does not work well with {!Fut.await}. A critical section that
contains a call to [await] might cause deadlocks, or lock starvation,
because it will hold onto the lock while it goes to sleep.
@since 0.3 *)
type 'a t
(** A value protected by a mutex *)
val create : 'a -> 'a t
(** Create a new protected value. *)
val with_ : 'a t -> ('a -> 'b) -> 'b
(** [with_ l f] runs [f x] where [x] is the value protected with the lock [l],
in a critical section. If [f x] fails, [with_lock l f] fails too but the
lock is released. *)
val update : 'a t -> ('a -> 'a) -> unit
(** [update l f] replaces the content [x] of [l] with [f x], while protected by
the mutex. *)
val update_map : 'a t -> ('a -> 'a * 'b) -> 'b
(** [update_map l f] computes [x', y = f (get l)], then puts [x'] in [l] and
returns [y], while protected by the mutex. *)
val mutex : _ t -> Mutex.t
(** Underlying mutex. *)
val get : 'a t -> 'a
(** Atomically get the value in the lock. The value that is returned isn't
protected! *)
val set : 'a t -> 'a -> unit
(** Atomically set the value.
{b NOTE} caution: using {!get} and {!set} as if this were a {!ref} is an
anti pattern and will not protect data against some race conditions. *)

View file

@ -1,26 +0,0 @@
exception Oh_no of Exn_bt.t
let main' ?(block_signals = false) () (f : Runner.t -> 'a) : 'a =
let worker_st =
Fifo_pool.Private_.create_single_threaded_state ~thread:(Thread.self ())
~on_exn:(fun e bt -> raise (Oh_no (Exn_bt.make e bt)))
()
in
let runner = Fifo_pool.Private_.runner_of_state worker_st in
try
let fut = Fut.spawn ~on:runner (fun () -> f runner) in
Fut.on_result fut (fun _ -> Runner.shutdown_without_waiting runner);
(* run the main thread *)
Worker_loop_.worker_loop worker_st
~block_signals (* do not disturb existing thread *)
~ops:Fifo_pool.Private_.worker_ops;
match Fut.peek fut with
| Some (Ok x) -> x
| Some (Error ebt) -> Exn_bt.raise ebt
| None -> assert false
with Oh_no ebt -> Exn_bt.raise ebt
let main f =
main' () f ~block_signals:false (* do not disturb existing thread *)

View file

@ -1,30 +0,0 @@
(** Main thread.
This is evolved from [Moonpool.Immediate_runner], but unlike it, this API
assumes you run it in a thread (possibly the main thread) which will block
until the initial computation is done.
This means it's reasonable to use [Main.main (fun () -> do_everything)] at
the beginning of the program. Other Moonpool pools can be created for
background tasks, etc. to do the heavy lifting, and the main thread (inside
this immediate runner) can coordinate tasks via [Fiber.await].
Aside from the fact that this blocks the caller thread, it is fairly similar
to {!Background_thread} in that there's a single worker to process
tasks/fibers.
This handles the concurency effects used in moonpool, including [await] and
[yield].
This module was migrated from the late [Moonpool_fib].
@since 0.10 *)
val main : (Runner.t -> 'a) -> 'a
(** [main f] runs [f()] in a scope that handles effects, including
{!Fiber.await}.
This scope can run background tasks as well, in a cooperative fashion. *)
val main' : ?block_signals:bool -> unit -> (Runner.t -> 'a) -> 'a
(** Same as {!main} but with room for optional arguments. *)

View file

@ -1,45 +0,0 @@
open Types_
exception Shutdown = Runner.Shutdown
let start_thread_on_some_domain f x =
let did = Random.int (Domain_pool_.max_number_of_domains ()) in
Domain_pool_.run_on_and_wait did (fun () -> Thread.create f x)
let run_async = Runner.run_async
let run_wait_block = Runner.run_wait_block
let get_current_runner = Runner.get_current_runner
let recommended_thread_count () = Domain_.recommended_number ()
let spawn = Fut.spawn
let spawn_on_current_runner = Fut.spawn_on_current_runner
let await = Fut.await
let yield = Picos.Fiber.yield
module Atomic = Atomic
module Blocking_queue = Bb_queue
module Background_thread = Background_thread
module Chan = Chan
module Exn_bt = Exn_bt
module Fifo_pool = Fifo_pool
module Fut = Fut
module Lock = Lock
module Main = Main
module Immediate_runner = struct end
module Runner = Runner
module Task_local_storage = Task_local_storage
module Thread_local_storage = Thread_local_storage
module Trigger = Trigger
module Ws_pool = Ws_pool
(* re-export main *)
include Main
module Private = struct
module Ws_deque_ = Ws_deque_
module Worker_loop_ = Worker_loop_
module Domain_ = Domain_
module Tracing_ = Tracing_
module Types_ = Types_
let num_domains = Domain_pool_.max_number_of_domains
end

View file

@ -1,233 +0,0 @@
(** Moonpool
A pool within a bigger pool (ie the ocean). Here, we're talking about pools
of [Thread.t] that are dispatched over several [Domain.t] to enable
parallelism.
We provide several implementations of pools with distinct scheduling
strategies, alongside some concurrency primitives such as guarding locks
({!Lock.t}) and futures ({!Fut.t}). *)
module Ws_pool = Ws_pool
module Fifo_pool = Fifo_pool
module Background_thread = Background_thread
module Runner = Runner
module Trigger = Trigger
module Main = Main
module Immediate_runner : sig end
[@@deprecated "use Moonpool_fib.Main"]
(** Runner that runs tasks in the caller thread.
This is removed since 0.6, and replaced by {!Moonpool_fib.Main}. *)
module Exn_bt = Exn_bt
exception Shutdown
(** Exception raised when trying to run tasks on runners that have been shut
down.
@since 0.6 *)
val start_thread_on_some_domain : ('a -> unit) -> 'a -> Thread.t
(** Similar to {!Thread.create}, but it picks a background domain at random to
run the thread. This ensures that we don't always pick the same domain to
run all the various threads needed in an application (timers, event loops,
etc.) *)
val run_async : ?fiber:Picos.Fiber.t -> Runner.t -> (unit -> unit) -> unit
(** [run_async runner task] schedules the task to run on the given runner. This
means [task()] will be executed at some point in the future, possibly in
another thread.
@param fiber optional initial (picos) fiber state
@since 0.5 *)
val run_wait_block : ?fiber:Picos.Fiber.t -> Runner.t -> (unit -> 'a) -> 'a
(** [run_wait_block runner f] schedules [f] for later execution on the runner,
like {!run_async}. It then blocks the current thread until [f()] is done
executing, and returns its result. If [f()] raises an exception, then
[run_wait_block pool f] will raise it as well.
See {!run_async} for more details.
{b NOTE} be careful with deadlocks (see notes in {!Fut.wait_block} about the
required discipline to avoid deadlocks).
@raise Shutdown if the runner was already shut down
@since 0.6 *)
val recommended_thread_count : unit -> int
(** Number of threads recommended to saturate the CPU. For IO pools this makes
little sense (you might want more threads than this because many of them
will be blocked most of the time).
@since 0.5 *)
val spawn : on:Runner.t -> (unit -> 'a) -> 'a Fut.t
(** [spawn ~on f] runs [f()] on the runner (a thread pool typically) and returns
a future result for it. See {!Fut.spawn}.
@since 0.5 *)
val spawn_on_current_runner : (unit -> 'a) -> 'a Fut.t
(** See {!Fut.spawn_on_current_runner}.
@since 0.5 *)
val get_current_runner : unit -> Runner.t option
(** See {!Runner.get_current_runner}
@since 0.7 *)
val await : 'a Fut.t -> 'a
(** Await a future, must be run on a moonpool runner. See {!Fut.await}. Only on
OCaml >= 5.0.
@since 0.5 *)
val yield : unit -> unit
(** Yield from the current task, must be run on a moonpool runner. Only on OCaml
>= 5.0.
@since 0.9 *)
module Lock = Lock
module Fut = Fut
module Chan = Chan
module Task_local_storage = Task_local_storage
module Thread_local_storage = Thread_local_storage
(** A simple blocking queue.
This queue is quite basic and will not behave well under heavy contention.
However, it can be sufficient for many practical use cases.
{b NOTE}: this queue will typically block the caller thread in case the
operation (push/pop) cannot proceed. Be wary of deadlocks when using the
queue {i from} a pool when you expect the other end to also be
produced/consumed from the same pool.
See discussion on {!Fut.wait_block} for more details on deadlocks and how to
mitigate the risk of running into them.
More scalable queues can be found in Lockfree
(https://github.com/ocaml-multicore/lockfree/) *)
module Blocking_queue : sig
type 'a t
(** Unbounded blocking queue.
This queue is thread-safe and will block when calling {!pop} on it when
it's empty. *)
val create : unit -> _ t
(** Create a new unbounded queue. *)
val size : _ t -> int
(** Number of items currently in the queue. Note that [pop] might still block
if this returns a non-zero number, since another thread might have
consumed the items in the mean time.
@since 0.2 *)
exception Closed
val push : 'a t -> 'a -> unit
(** [push q x] pushes [x] into [q], and returns [()].
In the current implementation, [push q] will never block for a long time,
it will only block while waiting for a lock so it can push the element.
@raise Closed if the queue is closed (by a previous call to [close q]) *)
val pop : 'a t -> 'a
(** [pop q] pops the next element in [q]. It might block until an element
comes.
@raise Closed if the queue was closed before a new element was available.
*)
val close : _ t -> unit
(** Close the queue, meaning there won't be any more [push] allowed, ie [push]
will raise {!Closed}.
[pop] will keep working and will return the elements present in the queue,
until it's entirely drained; then [pop] will also raise {!Closed}. *)
val try_pop : force_lock:bool -> 'a t -> 'a option
(** [try_pop q] immediately pops the first element of [q], if any, or returns
[None] without blocking.
@param force_lock
if true, use {!Mutex.lock} (which can block under contention); if false,
use {!Mutex.try_lock}, which might return [None] even in presence of an
element if there's contention *)
val try_push : 'a t -> 'a -> bool
(** [try_push q x] tries to push into [q], in which case it returns [true]; or
it fails to push and returns [false] without blocking.
@raise Closed if the locking succeeded but the queue is closed. *)
val transfer : 'a t -> 'a Queue.t -> unit
(** [transfer bq q2] transfers all items presently in [bq] into [q2] in one
atomic section, and clears [bq]. It blocks if no element is in [bq].
This is useful to consume elements from the queue in batch. Create a
[Queue.t] locally:
{[
let dowork (work_queue : job Bb_queue.t) =
(* local queue, not thread safe *)
let local_q = Queue.create () in
try
while true do
(* work on local events, already on this thread *)
while not (Queue.is_empty local_q) do
let job = Queue.pop local_q in
process_job job
done;
(* get all the events in the incoming blocking queue, in
one single critical section. *)
Bb_queue.transfer work_queue local_q
done
with Bb_queue.Closed -> ()
]}
@since 0.4 *)
type 'a gen = unit -> 'a option
type 'a iter = ('a -> unit) -> unit
val to_iter : 'a t -> 'a iter
(** [to_iter q] returns an iterator over all items in the queue. This might
not terminate if [q] is never closed.
@since 0.4 *)
val to_gen : 'a t -> 'a gen
(** [to_gen q] returns a generator from the queue.
@since 0.4 *)
val to_seq : 'a t -> 'a Seq.t
(** [to_gen q] returns a (transient) sequence from the queue.
@since 0.4 *)
end
module Atomic = Atomic
(** Atomic values.
This is either a shim using [ref], on pre-OCaml 5, or the standard [Atomic]
module on OCaml 5. *)
include module type of struct
include Main
end
(**/**)
(** Private internals, with no stability guarantees *)
module Private : sig
module Ws_deque_ = Ws_deque_
(** A deque for work stealing, fixed size. *)
module Worker_loop_ = Worker_loop_
(** Worker loop. This is useful to implement custom runners, it should run on
each thread of the runner.
@since 0.7 *)
module Domain_ = Domain_
(** Utils for domains *)
module Tracing_ = Tracing_
module Types_ = Types_
val num_domains : unit -> int
(** Number of domains in the backing domain pool *)
end

View file

@ -1,88 +0,0 @@
(** Interface for runners.
This provides an abstraction for running tasks in the background, which is
implemented by various thread pools.
@since 0.3 *)
type fiber = Picos.Fiber.t
type task = unit -> unit
type t
(** A runner.
If a runner is no longer needed, {!shutdown} can be used to signal all
worker threads in it to stop (after they finish their work), and wait for
them to stop.
The threads are distributed across a fixed domain pool (whose size is
determined by {!Domain.recommended_domain_count} on OCaml 5, and simple the
single runtime on OCaml 4). *)
val size : t -> int
(** Number of threads/workers. *)
val num_tasks : t -> int
(** Current number of tasks. This is at best a snapshot, useful for metrics and
debugging. *)
val shutdown : t -> unit
(** Shutdown the runner and wait for it to terminate. Idempotent. *)
val shutdown_without_waiting : t -> unit
(** Shutdown the pool, and do not wait for it to terminate. Idempotent. *)
exception Shutdown
val run_async : ?fiber:fiber -> t -> task -> unit
(** [run_async pool f] schedules [f] for later execution on the runner in one of
the threads. [f()] will run on one of the runner's worker threads/domains.
@param fiber if provided, run the task with this initial fiber data
@raise Shutdown if the runner was shut down before [run_async] was called.
*)
val run_wait_block : ?fiber:fiber -> t -> (unit -> 'a) -> 'a
(** [run_wait_block pool f] schedules [f] for later execution on the pool, like
{!run_async}. It then blocks the current thread until [f()] is done
executing, and returns its result. If [f()] raises an exception, then
[run_wait_block pool f] will raise it as well.
{b NOTE} be careful with deadlocks (see notes in {!Fut.wait_block} about the
required discipline to avoid deadlocks).
@raise Shutdown if the runner was already shut down *)
val dummy : t
(** Runner that fails when scheduling tasks on it. Calling {!run_async} on it
will raise Failure.
@since 0.6 *)
(** {2 Implementing runners} *)
(** This module is specifically intended for users who implement their own
runners. Regular users of Moonpool should not need to look at it. *)
module For_runner_implementors : sig
val create :
size:(unit -> int) ->
num_tasks:(unit -> int) ->
shutdown:(wait:bool -> unit -> unit) ->
run_async:(fiber:fiber -> task -> unit) ->
unit ->
t
(** Create a new runner.
{b NOTE}: the runner should support DLA and {!Suspend_} on OCaml 5.x, so
that {!Fork_join} and other 5.x features work properly. *)
val k_cur_runner : t Thread_local_storage.t
(** Key that should be used by each runner to store itself in TLS on every
thread it controls, so that tasks running on these threads can access the
runner. This is necessary for {!get_current_runner} to work. *)
end
val get_current_runner : unit -> t option
(** Access the current runner. This returns [Some r] if the call happens on a
thread that belongs in a runner.
@since 0.5 *)
val get_current_fiber : unit -> fiber option
(** [get_current_storage runner] gets the local storage for the currently
running task. *)

View file

@ -1,44 +0,0 @@
open Types_
module PF = Picos.Fiber
type 'a t = 'a PF.FLS.t
exception Not_set = PF.FLS.Not_set
let create = PF.FLS.create
let[@inline] get_exn k =
let fiber = get_current_fiber_exn () in
PF.FLS.get_exn fiber k
let get_opt k =
match get_current_fiber () with
| None -> None
| Some fiber ->
(match PF.FLS.get_exn fiber k with
| x -> Some x
| exception Not_set -> None)
let[@inline] get k ~default =
match get_current_fiber () with
| None -> default
| Some fiber -> PF.FLS.get fiber ~default k
let[@inline] set k v : unit =
let fiber = get_current_fiber_exn () in
PF.FLS.set fiber k v
let with_value k v (f : _ -> 'b) : 'b =
let fiber = get_current_fiber_exn () in
match PF.FLS.get_exn fiber k with
| exception Not_set ->
PF.FLS.set fiber k v;
(* nothing to restore back to, just call [f] *)
f ()
| old_v ->
PF.FLS.set fiber k v;
let finally () = PF.FLS.set fiber k old_v in
Fun.protect f ~finally
include Hmap_ls_

View file

@ -1,43 +0,0 @@
(** Task-local storage.
This storage is associated to the current task, just like thread-local
storage is associated with the current thread. The storage is carried along
in case the current task is suspended.
@since 0.6 *)
type 'a t = 'a Picos.Fiber.FLS.t
val create : unit -> 'a t
(** [create ()] makes a new key. Keys are expensive and should never be
allocated dynamically or in a loop. *)
exception Not_set
val get_exn : 'a t -> 'a
(** [get k] gets the value for the current task for key [k]. Must be run from
inside a task running on a runner.
@raise Not_set otherwise *)
val get_opt : 'a t -> 'a option
(** [get_opt k] gets the current task's value for key [k], or [None] if not run
from inside the task. *)
val get : 'a t -> default:'a -> 'a
val set : 'a t -> 'a -> unit
(** [set k v] sets the storage for [k] to [v]. Must be run from inside a task
running on a runner.
@raise Failure otherwise *)
val with_value : 'a t -> 'a -> (unit -> 'b) -> 'b
(** [with_value k v f] sets [k] to [v] for the duration of the call to [f()].
When [f()] returns (or fails), [k] is restored to its old value. *)
(** {2 Local [Hmap.t]}
This requires [hmap] to be installed. *)
include module type of struct
include Hmap_ls_
end

View file

@ -1,6 +0,0 @@
(** Triggers from picos
@since 0.7 *)
include Picos.Trigger
let[@inline] await_exn (self : t) = await self |> Option.iter Exn_bt.raise

View file

@ -1,38 +0,0 @@
module TLS = Thread_local_storage
module Domain_pool_ = Moonpool_dpool
type task = unit -> unit
type fiber = Picos.Fiber.t
type runner = {
run_async: fiber:fiber -> task -> unit;
shutdown: wait:bool -> unit -> unit;
size: unit -> int;
num_tasks: unit -> int;
}
let k_cur_runner : runner TLS.t = TLS.create ()
let k_cur_fiber : fiber TLS.t = TLS.create ()
let _dummy_computation : Picos.Computation.packed =
let c = Picos.Computation.create () in
Picos.Computation.cancel c (Failure "dummy fiber") (Printexc.get_callstack 0);
Picos.Computation.Packed c
let _dummy_fiber = Picos.Fiber.create_packed ~forbid:true _dummy_computation
let[@inline] get_current_runner () : _ option = TLS.get_opt k_cur_runner
let[@inline] get_current_fiber () : fiber option =
match TLS.get_exn k_cur_fiber with
| f when f != _dummy_fiber -> Some f
| _ -> None
| exception TLS.Not_set -> None
let error_get_current_fiber_ =
"Moonpool: get_current_fiber was called outside of a fiber."
let[@inline] get_current_fiber_exn () : fiber =
match TLS.get_exn k_cur_fiber with
| f when f != _dummy_fiber -> f
| _ -> failwith error_get_current_fiber_
| exception TLS.Not_set -> failwith error_get_current_fiber_

View file

@ -1,192 +0,0 @@
open Types_
type fiber = Picos.Fiber.t
type task_full =
| T_start of {
fiber: fiber;
f: unit -> unit;
}
| T_resume : {
fiber: fiber;
k: unit -> unit;
}
-> task_full
exception No_more_tasks
type 'st ops = {
schedule: 'st -> task_full -> unit;
get_next_task: 'st -> task_full; (** @raise No_more_tasks *)
on_exn: 'st -> Exn_bt.t -> unit;
runner: 'st -> Runner.t;
before_start: 'st -> unit;
cleanup: 'st -> unit;
}
(** A dummy task. *)
let _dummy_task : task_full = T_start { f = ignore; fiber = _dummy_fiber }
let[@inline] discontinue k exn =
let bt = Printexc.get_raw_backtrace () in
Effect.Deep.discontinue_with_backtrace k exn bt
let[@inline] raise_with_bt exn =
let bt = Printexc.get_raw_backtrace () in
Printexc.raise_with_backtrace exn bt
let with_handler (type st) ~(ops : st ops) (self : st) : (unit -> unit) -> unit
=
let current =
Some
(fun k ->
match get_current_fiber_exn () with
| fiber -> Effect.Deep.continue k fiber
| exception exn -> discontinue k exn)
and yield =
Some
(fun k ->
let fiber = get_current_fiber_exn () in
match
let k () = Effect.Deep.continue k () in
ops.schedule self @@ T_resume { fiber; k }
with
| () -> ()
| exception exn -> discontinue k exn)
and reschedule trigger fiber k : unit =
ignore (Picos.Fiber.unsuspend fiber trigger : bool);
let k () = Picos.Fiber.resume fiber k in
let task = T_resume { fiber; k } in
ops.schedule self task
in
let effc (type a) :
a Effect.t -> ((a, _) Effect.Deep.continuation -> _) option = function
| Picos.Fiber.Current -> current
| Picos.Fiber.Yield -> yield
| Picos.Fiber.Spawn r ->
Some
(fun k ->
match
let f () = r.main r.fiber in
let task = T_start { fiber = r.fiber; f } in
ops.schedule self task
with
| unit -> Effect.Deep.continue k unit
| exception exn -> discontinue k exn)
| Picos.Trigger.Await trigger ->
Some
(fun k ->
let fiber = get_current_fiber_exn () in
(* when triggers is signaled, reschedule task *)
if not (Picos.Fiber.try_suspend fiber trigger fiber k reschedule) then
(* trigger was already signaled, reschedule task now *)
reschedule trigger fiber k)
| Picos.Computation.Cancel_after _r ->
Some
(fun k ->
(* not implemented *)
let exn = Failure "Moonpool: cancel_after is not supported." in
discontinue k exn)
| _ -> None
in
let handler = Effect.Deep.{ retc = Fun.id; exnc = raise_with_bt; effc } in
fun f -> Effect.Deep.match_with f () handler
module type FINE_GRAINED_ARGS = sig
type st
val ops : st ops
val st : st
end
module Fine_grained (Args : FINE_GRAINED_ARGS) () = struct
open Args
let cur_fiber : fiber ref = ref _dummy_fiber
let runner = ops.runner st
type state =
| New
| Ready
| Torn_down
let state = ref New
let run_task (task : task_full) : unit =
let fiber =
match task with
| T_start { fiber; _ } | T_resume { fiber; _ } -> fiber
in
cur_fiber := fiber;
TLS.set k_cur_fiber fiber;
(* let _ctx = before_task runner in *)
(* run the task now, catching errors, handling effects *)
assert (task != _dummy_task);
(try
match task with
| T_start { fiber = _; f } -> with_handler ~ops st f
| T_resume { fiber = _; k } ->
(* this is already in an effect handler *)
k ()
with e ->
let bt = Printexc.get_raw_backtrace () in
let ebt = Exn_bt.make e bt in
ops.on_exn st ebt);
(* after_task runner _ctx; *)
cur_fiber := _dummy_fiber;
TLS.set k_cur_fiber _dummy_fiber
let setup ~block_signals () : unit =
if !state <> New then invalid_arg "worker_loop.setup: not a new instance";
state := Ready;
if block_signals then Signals_.ignore_signals_ ();
TLS.set Runner.For_runner_implementors.k_cur_runner runner;
ops.before_start st
let run ?(max_tasks = max_int) () : unit =
if !state <> Ready then invalid_arg "worker_loop.run: not setup";
let continue = ref true in
let n_tasks = ref 0 in
while !continue && !n_tasks < max_tasks do
match ops.get_next_task st with
| task ->
incr n_tasks;
run_task task
| exception No_more_tasks -> continue := false
done
let teardown () =
if !state <> Torn_down then (
state := Torn_down;
cur_fiber := _dummy_fiber;
ops.cleanup st
)
end
let worker_loop (type st) ~block_signals ~(ops : st ops) (self : st) : unit =
let module FG =
Fine_grained
(struct
type nonrec st = st
let ops = ops
let st = self
end)
()
in
FG.setup ~block_signals ();
try
FG.run ();
FG.teardown ()
with exn ->
let bt = Printexc.get_raw_backtrace () in
FG.teardown ();
Printexc.raise_with_backtrace exn bt

View file

@ -1,51 +0,0 @@
(** Internal module that is used for workers.
A thread pool should use this [worker_loop] to run tasks, handle effects,
etc. *)
open Types_
type task_full =
| T_start of {
fiber: fiber;
f: unit -> unit;
}
| T_resume : {
fiber: fiber;
k: unit -> unit;
}
-> task_full
val _dummy_task : task_full
exception No_more_tasks
type 'st ops = {
schedule: 'st -> task_full -> unit;
get_next_task: 'st -> task_full;
on_exn: 'st -> Exn_bt.t -> unit;
runner: 'st -> Runner.t;
before_start: 'st -> unit;
cleanup: 'st -> unit;
}
module type FINE_GRAINED_ARGS = sig
type st
val ops : st ops
val st : st
end
module Fine_grained (_ : FINE_GRAINED_ARGS) () : sig
val setup : block_signals:bool -> unit -> unit
(** Just initialize the loop *)
val run : ?max_tasks:int -> unit -> unit
(** Run the loop until no task remains or until [max_tasks] tasks have been
run *)
val teardown : unit -> unit
(** Tear down the loop *)
end
val worker_loop : block_signals:bool -> ops:'st ops -> 'st -> unit

View file

@ -1,320 +0,0 @@
open Types_
module A = Atomic
module WSQ = Ws_deque_
module WL = Worker_loop_
include Runner
let ( let@ ) = ( @@ )
module Id = struct
type t = unit ref
(** Unique identifier for a pool *)
let create () : t = Sys.opaque_identity (ref ())
let equal : t -> t -> bool = ( == )
end
type state = {
id_: Id.t;
(** Unique to this pool. Used to make sure tasks stay within the same
pool. *)
active: bool A.t; (** Becomes [false] when the pool is shutdown. *)
mutable workers: worker_state array; (** Fixed set of workers. *)
main_q: WL.task_full Queue.t;
(** Main queue for tasks coming from the outside *)
mutable n_waiting: int; (* protected by mutex *)
mutable n_waiting_nonzero: bool; (** [n_waiting > 0] *)
mutex: Mutex.t;
cond: Condition.t;
mutable as_runner: t;
(* init options *)
name: string option;
on_init_thread: dom_id:int -> t_id:int -> unit -> unit;
on_exit_thread: dom_id:int -> t_id:int -> unit -> unit;
on_exn: exn -> Printexc.raw_backtrace -> unit;
}
(** internal state *)
and worker_state = {
mutable thread: Thread.t;
idx: int;
dom_id: int;
st: state;
q: WL.task_full WSQ.t; (** Work stealing queue *)
rng: Random.State.t;
}
(** State for a given worker. Only this worker is allowed to push into the
queue, but other workers can come and steal from it if they're idle. *)
let[@inline] size_ (self : state) = Array.length self.workers
let num_tasks_ (self : state) : int =
let n = ref 0 in
n := Queue.length self.main_q;
Array.iter (fun w -> n := !n + WSQ.size w.q) self.workers;
!n
(** TLS, used by worker to store their specific state and be able to retrieve it
from tasks when we schedule new sub-tasks. *)
let k_worker_state : worker_state TLS.t = TLS.create ()
let[@inline] get_current_worker_ () : worker_state option =
TLS.get_opt k_worker_state
(** Try to wake up a waiter, if there's any. *)
let[@inline] try_wake_someone_ (self : state) : unit =
if self.n_waiting_nonzero then (
Mutex.lock self.mutex;
Condition.signal self.cond;
Mutex.unlock self.mutex
)
(** Push into worker's local queue, open to work stealing. precondition: this
runs on the worker thread whose state is [self] *)
let schedule_on_current_worker (self : worker_state) task : unit =
(* we're on this same pool, schedule in the worker's state. Otherwise
we might also be on pool A but asking to schedule on pool B,
so we have to check that identifiers match. *)
let pushed = WSQ.push self.q task in
if pushed then
try_wake_someone_ self.st
else (
(* overflow into main queue *)
Mutex.lock self.st.mutex;
Queue.push task self.st.main_q;
if self.st.n_waiting_nonzero then Condition.signal self.st.cond;
Mutex.unlock self.st.mutex
)
(** Push into the shared queue of this pool *)
let schedule_in_main_queue (self : state) task : unit =
if A.get self.active then (
(* push into the main queue *)
Mutex.lock self.mutex;
Queue.push task self.main_q;
if self.n_waiting_nonzero then Condition.signal self.cond;
Mutex.unlock self.mutex
) else
(* notify the caller that scheduling tasks is no
longer permitted *)
raise Shutdown
let schedule_from_w (self : worker_state) (task : WL.task_full) : unit =
match get_current_worker_ () with
| Some w when Id.equal self.st.id_ w.st.id_ ->
(* use worker from the same pool *)
schedule_on_current_worker w task
| _ -> schedule_in_main_queue self.st task
exception Got_task of WL.task_full
(** Try to steal a task.
@raise Got_task if it finds one. *)
let try_to_steal_work_once_ (self : worker_state) : unit =
let init = Random.State.int self.rng (Array.length self.st.workers) in
for i = 0 to Array.length self.st.workers - 1 do
let w' =
Array.unsafe_get self.st.workers
((i + init) mod Array.length self.st.workers)
in
if self != w' then (
match WSQ.steal w'.q with
| Some t -> raise_notrace (Got_task t)
| None -> ()
)
done
(** Wait on condition. Precondition: we hold the mutex. *)
let[@inline] wait_for_condition_ (self : state) : unit =
self.n_waiting <- self.n_waiting + 1;
if self.n_waiting = 1 then self.n_waiting_nonzero <- true;
Condition.wait self.cond self.mutex;
self.n_waiting <- self.n_waiting - 1;
if self.n_waiting = 0 then self.n_waiting_nonzero <- false
let rec get_next_task (self : worker_state) : WL.task_full =
(* see if we can empty the local queue *)
match WSQ.pop_exn self.q with
| task ->
try_wake_someone_ self.st;
task
| exception WSQ.Empty -> try_to_steal_from_other_workers_ self
and try_to_steal_from_other_workers_ (self : worker_state) =
match try_to_steal_work_once_ self with
| exception Got_task task -> task
| () -> wait_on_main_queue self
and wait_on_main_queue (self : worker_state) : WL.task_full =
Mutex.lock self.st.mutex;
match Queue.pop self.st.main_q with
| task ->
Mutex.unlock self.st.mutex;
task
| exception Queue.Empty ->
(* wait here *)
if A.get self.st.active then (
wait_for_condition_ self.st;
(* see if a task became available *)
match Queue.pop self.st.main_q with
| task ->
Mutex.unlock self.st.mutex;
task
| exception Queue.Empty ->
Mutex.unlock self.st.mutex;
try_to_steal_from_other_workers_ self
) else (
(* do nothing more: no task in main queue, and we are shutting
down so no new task should arrive.
The exception is if another task is creating subtasks
that overflow into the main queue, but we can ignore that at
the price of slightly decreased performance for the last few
tasks *)
Mutex.unlock self.st.mutex;
raise WL.No_more_tasks
)
let before_start (self : worker_state) : unit =
let t_id = Thread.id @@ Thread.self () in
self.st.on_init_thread ~dom_id:self.dom_id ~t_id ();
TLS.set k_cur_fiber _dummy_fiber;
TLS.set Runner.For_runner_implementors.k_cur_runner self.st.as_runner;
TLS.set k_worker_state self;
(* set thread name *)
Option.iter
(fun name ->
Tracing_.set_thread_name (Printf.sprintf "%s.worker.%d" name self.idx))
self.st.name
let cleanup (self : worker_state) : unit =
(* on termination, decrease refcount of underlying domain *)
Domain_pool_.decr_on self.dom_id;
let t_id = Thread.id @@ Thread.self () in
self.st.on_exit_thread ~dom_id:self.dom_id ~t_id ()
let worker_ops : worker_state WL.ops =
let runner (st : worker_state) = st.st.as_runner in
let on_exn (st : worker_state) (ebt : Exn_bt.t) =
st.st.on_exn (Exn_bt.exn ebt) (Exn_bt.bt ebt)
in
{
WL.schedule = schedule_from_w;
runner;
get_next_task;
on_exn;
before_start;
cleanup;
}
let default_thread_init_exit_ ~dom_id:_ ~t_id:_ () = ()
let shutdown_ ~wait (self : state) : unit =
if A.exchange self.active false then (
Mutex.lock self.mutex;
Condition.broadcast self.cond;
Mutex.unlock self.mutex;
if wait then Array.iter (fun w -> Thread.join w.thread) self.workers
)
let as_runner_ (self : state) : t =
Runner.For_runner_implementors.create
~shutdown:(fun ~wait () -> shutdown_ self ~wait)
~run_async:(fun ~fiber f ->
schedule_in_main_queue self @@ T_start { fiber; f })
~size:(fun () -> size_ self)
~num_tasks:(fun () -> num_tasks_ self)
()
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?num_threads:int ->
?name:string ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
let create ?(on_init_thread = default_thread_init_exit_)
?(on_exit_thread = default_thread_init_exit_) ?(on_exn = fun _ _ -> ())
?num_threads ?name () : t =
let pool_id_ = Id.create () in
let num_domains = Domain_pool_.max_number_of_domains () in
let num_threads = Util_pool_.num_threads ?num_threads () in
(* make sure we don't bias towards the first domain(s) in {!D_pool_} *)
let offset = Random.int num_domains in
let pool =
{
id_ = pool_id_;
active = A.make true;
workers = [||];
main_q = Queue.create ();
n_waiting = 0;
n_waiting_nonzero = true;
mutex = Mutex.create ();
cond = Condition.create ();
on_exn;
on_init_thread;
on_exit_thread;
name;
as_runner = Runner.dummy;
}
in
pool.as_runner <- as_runner_ pool;
(* temporary queue used to obtain thread handles from domains
on which the thread are started. *)
let receive_threads = Bb_queue.create () in
(* start the thread with index [i] *)
let create_worker_state idx =
let dom_id = (offset + idx) mod num_domains in
{
st = pool;
thread = (* dummy *) Thread.self ();
q = WSQ.create ~dummy:WL._dummy_task ();
rng = Random.State.make [| idx |];
dom_id;
idx;
}
in
pool.workers <- Array.init num_threads create_worker_state;
(* start the thread with index [i] *)
let start_thread_with_idx idx (st : worker_state) =
(* function called in domain with index [i], to
create the thread and push it into [receive_threads] *)
let create_thread_in_domain () =
let thread =
Thread.create (WL.worker_loop ~block_signals:true ~ops:worker_ops) st
in
(* send the thread from the domain back to us *)
Bb_queue.push receive_threads (idx, thread)
in
Domain_pool_.run_on st.dom_id create_thread_in_domain
in
(* start all worker threads, placing them on the domains
according to their index and [offset] in a round-robin fashion. *)
Array.iteri start_thread_with_idx pool.workers;
(* receive the newly created threads back from domains *)
for _j = 1 to num_threads do
let i, th = Bb_queue.pop receive_threads in
let worker_state = pool.workers.(i) in
worker_state.thread <- th
done;
pool.as_runner
let with_ ?on_init_thread ?on_exit_thread ?on_exn ?num_threads ?name () f =
let pool =
create ?on_init_thread ?on_exit_thread ?on_exn ?num_threads ?name ()
in
let@ () = Fun.protect ~finally:(fun () -> shutdown pool) in
f pool

View file

@ -1,57 +0,0 @@
(** Work-stealing thread pool.
A pool of threads with a worker-stealing scheduler. The pool contains a
fixed number of worker threads that wait for work items to come, process
these, and loop.
This is good for CPU-intensive tasks that feature a lot of small tasks. Note
that tasks will not always be processed in the order they are scheduled, so
this is not great for workloads where the latency of individual tasks matter
(for that see {!Fifo_pool}).
This implements {!Runner.t} since 0.3.
If a pool is no longer needed, {!shutdown} can be used to signal all threads
in it to stop (after they finish their work), and wait for them to stop.
The threads are distributed across a fixed domain pool (whose size is
determined by {!Domain.recommended_domain_count}. See {!create} for more
details. *)
include module type of Runner
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?num_threads:int ->
?name:string ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
val create : (unit -> t, _) create_args
(** [create ()] makes a new thread pool.
@param on_init_thread
called at the beginning of each new thread in the pool.
@param num_threads
size of the pool, ie. number of worker threads. It will be at least [1]
internally, so [0] or negative values make no sense. The default is
[Domain.recommended_domain_count()], ie one worker thread per CPU core.
Note that specifying [num_threads=n] means that the degree of parallelism is
at most [n]. This behavior is different than the one of [Domainslib], see
https://github.com/c-cube/moonpool/issues/41 for context.
If you want to use all cores, use [Domain.recommended_domain_count()].
@param on_exit_thread called at the end of each thread in the pool
@param name
a name for this thread pool, used if tracing is enabled (since 0.6) *)
val with_ : (unit -> (t -> 'a) -> 'a, _) create_args
(** [with_ () f] calls [f pool], where [pool] is obtained via {!create}. When
[f pool] returns or fails, [pool] is shutdown and its resources are
released.
Most parameters are the same as in {!create}.
@since 0.3 *)

124
src/cpp/cpp.ml Normal file
View file

@ -0,0 +1,124 @@
type op =
| Le
| Ge
type line =
| If of op * int * int
| Elseif of op * int * int
| Else
| Endif
| Raw of string
| Eof
let prefix ~pre s =
let len = String.length pre in
if len > String.length s then
false
else (
let rec check i =
if i = len then
true
else if String.unsafe_get s i <> String.unsafe_get pre i then
false
else
check (i + 1)
in
check 0
)
let eval ~major ~minor op i j =
match op with
| Le -> (major, minor) <= (i, j)
| Ge -> (major, minor) >= (i, j)
let preproc_lines ~file ~major ~minor (ic : in_channel) : unit =
let pos = ref 0 in
let fail msg =
failwith (Printf.sprintf "at line %d in '%s': %s" !pos file msg)
in
let pp_pos () = Printf.printf "#%d %S\n" !pos file in
let parse_line () : line =
match input_line ic with
| exception End_of_file -> Eof
| line ->
let line' = String.trim line in
incr pos;
if line' <> "" && line'.[0] = '[' then
if prefix line' ~pre:"[@@@ifle" then
Scanf.sscanf line' "[@@@ifle %d.%d]" (fun x y -> If (Le, x, y))
else if prefix line' ~pre:"[@@@ifge" then
Scanf.sscanf line' "[@@@ifge %d.%d]" (fun x y -> If (Ge, x, y))
else if prefix line' ~pre:"[@@@elifle" then
Scanf.sscanf line' "[@@@elifle %d.%d]" (fun x y -> Elseif (Le, x, y))
else if prefix line' ~pre:"[@@@elifge" then
Scanf.sscanf line' "[@@@elifge %d.%d]" (fun x y -> Elseif (Ge, x, y))
else if line' = "[@@@else_]" then
Else
else if line' = "[@@@endif]" then
Endif
else
Raw line
else
Raw line
in
(* entry point *)
let rec top () =
match parse_line () with
| Eof -> ()
| If (op, i, j) ->
if eval ~major ~minor op i j then (
pp_pos ();
cat_block ()
) else
skip_block ~elseok:true ()
| Raw s ->
print_endline s;
top ()
| Elseif _ | Else | Endif -> fail "unexpected elseif|else|endif"
(* current block is the valid one *)
and cat_block () =
match parse_line () with
| Eof -> fail "unexpected EOF"
| If _ -> fail "nested if not supported"
| Raw s ->
print_endline s;
cat_block ()
| Endif ->
pp_pos ();
top ()
| Elseif _ | Else -> skip_block ~elseok:false ()
(* skip current block.
@param elseok if true, we should evaluate "elseif" *)
and skip_block ~elseok () =
match parse_line () with
| Eof -> fail "unexpected EOF"
| If _ -> fail "nested if not supported"
| Raw _ -> skip_block ~elseok ()
| Endif ->
pp_pos ();
top ()
| Elseif (op, i, j) ->
if elseok && eval ~major ~minor op i j then (
pp_pos ();
cat_block ()
) else
skip_block ~elseok ()
| Else ->
if elseok then (
pp_pos ();
cat_block ()
) else
skip_block ~elseok ()
in
top ()
let () =
let file = Sys.argv.(1) in
let version = Sys.ocaml_version in
let major, minor = Scanf.sscanf version "%u.%u" (fun maj min -> maj, min) in
let ic = open_in file in
preproc_lines ~file ~major ~minor ic;
()

5
src/cpp/dune Normal file
View file

@ -0,0 +1,5 @@
; our little preprocessor (ported from containers)
(executable
(name cpp)
(modes (best exe)))

122
src/d_pool_.ml Normal file
View file

@ -0,0 +1,122 @@
type domain = Domain_.t
type event =
| Run of (unit -> unit) (** Run this function *)
| Decr (** Decrease count *)
(* State for a domain worker. It should not do too much except for starting
new threads for pools. *)
type worker_state = {
q: event Bb_queue.t;
th_count: int Atomic_.t; (** Number of threads on this *)
}
(** Array of (optional) workers.
Workers are started/stop on demand. For each index we have
the (currently active) domain's state
including a work queue and a thread refcount; and the domain itself,
if any, in a separate option because it might outlive its own state. *)
let domains_ : (worker_state option * Domain_.t option) Lock.t array =
let n = max 1 (Domain_.recommended_number ()) in
Array.init n (fun _ -> Lock.create (None, None))
(** main work loop for a domain worker.
A domain worker does two things:
- run functions it's asked to (mainly, to start new threads inside it)
- decrease the refcount when one of these threads stops. The thread
will notify the domain that it's exiting, so the domain can know
how many threads are still using it. If all threads exit, the domain
polls a bit (in case new threads are created really shortly after,
which happens with a [Pool.with_] or [Pool.create() Pool.shutdown()]
in a tight loop), and if nothing happens it tries to stop to free resources.
*)
let work_ idx (st : worker_state) : unit =
Dla_.setup_domain ();
let main_loop () =
let continue = ref true in
while !continue do
match Bb_queue.pop st.q with
| Run f -> (try f () with _ -> ())
| Decr ->
if Atomic_.fetch_and_add st.th_count (-1) = 1 then (
continue := false;
(* wait a bit, we might be needed again in a short amount of time *)
try
for _n_attempt = 1 to 50 do
Thread.delay 0.001;
if Atomic_.get st.th_count > 0 then (
(* needed again! *)
continue := true;
raise Exit
)
done
with Exit -> ()
)
done
in
while
main_loop ();
(* exit: try to remove ourselves from [domains]. If that fails, keep living. *)
let is_alive =
Lock.update_map domains_.(idx) (function
| None, _ -> assert false
| Some _st', dom ->
assert (st == _st');
if Atomic_.get st.th_count > 0 then
(* still alive! *)
(Some st, dom), true
else
(None, dom), false)
in
is_alive
do
()
done;
()
(* special case for main domain: we start a worker immediately *)
let () =
assert (Domain_.is_main_domain ());
let w = { th_count = Atomic_.make 1; q = Bb_queue.create () } in
(* thread that stays alive *)
ignore (Thread.create (fun () -> work_ 0 w) () : Thread.t);
domains_.(0) <- Lock.create (Some w, None)
let[@inline] n_domains () : int = Array.length domains_
let run_on (i : int) (f : unit -> unit) : unit =
assert (i < Array.length domains_);
let w =
Lock.update_map domains_.(i) (function
| (Some w, _) as st ->
Atomic_.incr w.th_count;
st, w
| None, dying_dom ->
(* join previous dying domain, to free its resources, if any *)
Option.iter Domain_.join dying_dom;
let w = { th_count = Atomic_.make 1; q = Bb_queue.create () } in
let worker : domain = Domain_.spawn (fun () -> work_ i w) in
(Some w, Some worker), w)
in
Bb_queue.push w.q (Run f)
let decr_on (i : int) : unit =
assert (i < Array.length domains_);
match Lock.get domains_.(i) with
| Some st, _ -> Bb_queue.push st.q Decr
| None, _ -> ()
let run_on_and_wait (i : int) (f : unit -> 'a) : 'a =
let q = Bb_queue.create () in
run_on i (fun () ->
let x = f () in
Bb_queue.push q x);
Bb_queue.pop q

23
src/d_pool_.mli Normal file
View file

@ -0,0 +1,23 @@
(** Static pool of domains.
These domains are shared between {b all} the pools in moonpool.
The rationale is that we should not have more domains than cores, so
it's easier to pre-allocate exactly that many domains, and run more flexible
thread pools on top.
*)
type domain = Domain_.t
val n_domains : unit -> int
(** Number of domains in the pool *)
val run_on : int -> (unit -> unit) -> unit
(** [run_on i f] runs [f()] on the domain with index [i].
Precondition: [0 <= i < n_domains()] *)
val decr_on : int -> unit
(** Signal that a thread is stopping on the domain with index [i]. *)
val run_on_and_wait : int -> (unit -> 'a) -> 'a
(** [run_on_and_wait i f] runs [f()] on the domain with index [i],
and blocks until the result of [f()] is returned back. *)

13
src/dla_.dummy.ml Normal file
View file

@ -0,0 +1,13 @@
(** Interface to Domain-local-await.
This is used to handle the presence or absence of DLA. *)
type t = {
release: unit -> unit;
await: unit -> unit;
}
let using : prepare_for_await:(unit -> t) -> while_running:(unit -> 'a) -> 'a =
fun ~prepare_for_await:_ ~while_running -> while_running ()
let setup_domain () = ()

9
src/dla_.real.ml Normal file
View file

@ -0,0 +1,9 @@
type t = Domain_local_await.t = {
release: unit -> unit;
await: unit -> unit;
}
let using : prepare_for_await:(unit -> t) -> while_running:(unit -> 'a) -> 'a =
Domain_local_await.using
let setup_domain () = Domain_local_await.per_thread (module Thread)

View file

@ -1,3 +1,4 @@
[@@@ifge 5.0]
[@@@ocaml.alert "-unstable"]
let recommended_number () = Domain.recommended_domain_count ()
@ -9,3 +10,18 @@ let spawn : _ -> t = Domain.spawn
let relax = Domain.cpu_relax
let join = Domain.join
let is_main_domain = Domain.is_main_domain
[@@@ocaml.alert "+unstable"]
[@@@else_]
let recommended_number () = 1
type t = Thread.t
let get_id (self : t) : int = Thread.id self
let spawn f : t = Thread.create f ()
let relax () = Thread.yield ()
let join = Thread.join
let is_main_domain () = true
[@@@endif]

View file

@ -1,6 +0,0 @@
(library
(name moonpool_dpool)
(public_name moonpool.dpool)
(synopsis "Moonpool's domain pool (used to start worker threads)")
(flags :standard -open Moonpool_private)
(libraries moonpool.private))

View file

@ -1,188 +0,0 @@
module Bb_queue = struct
type 'a t = {
mutex: Mutex.t;
cond: Condition.t;
q: 'a Queue.t;
}
let create () : _ t =
{ mutex = Mutex.create (); cond = Condition.create (); q = Queue.create () }
let push (self : _ t) x : unit =
Mutex.lock self.mutex;
let was_empty = Queue.is_empty self.q in
Queue.push x self.q;
if was_empty then Condition.broadcast self.cond;
Mutex.unlock self.mutex
let pop (type a) (self : a t) : a =
let module M = struct
exception Found of a
end in
try
Mutex.lock self.mutex;
while true do
if Queue.is_empty self.q then
Condition.wait self.cond self.mutex
else (
let x = Queue.pop self.q in
Mutex.unlock self.mutex;
raise (M.Found x)
)
done;
assert false
with M.Found x -> x
end
module Lock = struct
type 'a t = {
mutex: Mutex.t;
mutable content: 'a;
}
let create content : _ t = { mutex = Mutex.create (); content }
let[@inline never] with_ (self : _ t) f =
Mutex.lock self.mutex;
match f self.content with
| x ->
Mutex.unlock self.mutex;
x
| exception e ->
Mutex.unlock self.mutex;
raise e
let[@inline] update_map l f =
with_ l (fun x ->
let x', y = f x in
l.content <- x';
y)
let get l =
Mutex.lock l.mutex;
let x = l.content in
Mutex.unlock l.mutex;
x
end
type domain = Domain_.t
type event =
| Run of (unit -> unit) (** Run this function *)
| Decr (** Decrease count *)
(* State for a domain worker. It should not do too much except for starting
new threads for pools. *)
type worker_state = {
q: event Bb_queue.t;
th_count: int Atomic.t; (** Number of threads on this *)
}
(** Array of (optional) workers.
Workers are started/stop on demand. For each index we have the (currently
active) domain's state including a work queue and a thread refcount; and the
domain itself, if any, in a separate option because it might outlive its own
state. *)
let domains_ : (worker_state option * Domain_.t option) Lock.t array =
let n = max 1 (Domain_.recommended_number ()) in
Array.init n (fun _ -> Lock.create (None, None))
(** main work loop for a domain worker.
A domain worker does two things:
- run functions it's asked to (mainly, to start new threads inside it)
- decrease the refcount when one of these threads stops. The thread will
notify the domain that it's exiting, so the domain can know how many
threads are still using it. If all threads exit, the domain polls a bit
(in case new threads are created really shortly after, which happens with
a [Pool.with_] or [Pool.create() Pool.shutdown()] in a tight loop), and
if nothing happens it tries to stop to free resources. *)
let work_ idx (st : worker_state) : unit =
Signals_.ignore_signals_ ();
let main_loop () =
let continue = ref true in
while !continue do
match Bb_queue.pop st.q with
| Run f -> (try f () with _ -> ())
| Decr ->
if Atomic.fetch_and_add st.th_count (-1) = 1 then (
continue := false;
(* wait a bit, we might be needed again in a short amount of time *)
try
for _n_attempt = 1 to 50 do
Thread.delay 0.001;
if Atomic.get st.th_count > 0 then (
(* needed again! *)
continue := true;
raise Exit
)
done
with Exit -> ()
)
done
in
while
main_loop ();
(* exit: try to remove ourselves from [domains]. If that fails, keep living. *)
let is_alive =
Lock.update_map domains_.(idx) (function
| None, _ -> assert false
| Some _st', dom ->
assert (st == _st');
if Atomic.get st.th_count > 0 then
(* still alive! *)
(Some st, dom), true
else
(None, dom), false)
in
is_alive
do
()
done;
()
(* special case for main domain: we start a worker immediately *)
let () =
assert (Domain_.is_main_domain ());
let w = { th_count = Atomic.make 1; q = Bb_queue.create () } in
(* thread that stays alive since [th_count>0] will always hold *)
ignore (Thread.create (fun () -> work_ 0 w) () : Thread.t);
domains_.(0) <- Lock.create (Some w, None)
let[@inline] max_number_of_domains () : int = Array.length domains_
let run_on (i : int) (f : unit -> unit) : unit =
assert (i < Array.length domains_);
let w : worker_state =
Lock.update_map domains_.(i) (function
| (Some w, _) as st ->
Atomic.incr w.th_count;
st, w
| None, dying_dom ->
(* join previous dying domain, to free its resources, if any *)
Option.iter Domain_.join dying_dom;
let w = { th_count = Atomic.make 1; q = Bb_queue.create () } in
let worker : domain = Domain_.spawn (fun () -> work_ i w) in
(Some w, Some worker), w)
in
Bb_queue.push w.q (Run f)
let decr_on (i : int) : unit =
assert (i < Array.length domains_);
match Lock.get domains_.(i) with
| Some st, _ -> Bb_queue.push st.q Decr
| None, _ -> ()
let run_on_and_wait (i : int) (f : unit -> 'a) : 'a =
let q = Bb_queue.create () in
run_on i (fun () ->
let x = f () in
Bb_queue.push q x);
Bb_queue.pop q

View file

@ -1,35 +0,0 @@
(** Static pool of domains.
These domains are shared between {b all} the pools in moonpool. The
rationale is that we should not have more domains than cores, so it's easier
to reserve exactly that many domain slots, and run more flexible thread
pools on top (each domain being shared by potentially multiple threads from
multiple pools).
The pool should not contain actual domains if it's not in use, ie if no
runner is presently actively using one or more of the domain slots.
{b NOTE}: Interface is still experimental.
@since 0.6 *)
type domain = Domain_.t
val max_number_of_domains : unit -> int
(** Number of domains in the pool when all domains are active. *)
(** {2 Low level interface for resouce handling}
Be very cautious with this interface, or resource leaks might occur. *)
val run_on : int -> (unit -> unit) -> unit
(** [run_on i f] runs [f()] on the domain with index [i]. Precondition:
[0 <= i < n_domains()]. The thread must call {!decr_on} with [i] once it's
done. *)
val decr_on : int -> unit
(** Signal that a thread is stopping on the domain with index [i]. *)
val run_on_and_wait : int -> (unit -> 'a) -> 'a
(** [run_on_and_wait i f] runs [f()] on the domain with index [i], and blocks
until the result of [f()] is returned back. *)

14
src/dune Normal file
View file

@ -0,0 +1,14 @@
(library
(public_name moonpool)
(name moonpool)
(private_modules d_pool_ dla_)
(preprocess
(action
(run %{project_root}/src/cpp/cpp.exe %{input-file})))
(libraries threads either
(select thread_local_storage_.ml from
(thread-local-storage -> thread_local_storage_.stub.ml)
(-> thread_local_storage_.real.ml))
(select dla_.ml from
(domain-local-await -> dla_.real.ml)
( -> dla_.dummy.ml))))

150
src/fifo_pool.ml Normal file
View file

@ -0,0 +1,150 @@
module TLS = Thread_local_storage_
include Runner
let ( let@ ) = ( @@ )
type state = {
threads: Thread.t array;
q: task Bb_queue.t; (** Queue for tasks. *)
}
(** internal state *)
let[@inline] size_ (self : state) = Array.length self.threads
let[@inline] num_tasks_ (self : state) : int = Bb_queue.size self.q
(** Run [task] as is, on the pool. *)
let schedule_ (self : state) (task : task) : unit =
try Bb_queue.push self.q task with Bb_queue.Closed -> raise Shutdown
type around_task = AT_pair : (t -> 'a) * (t -> 'a -> unit) -> around_task
let worker_thread_ (self : state) (runner : t) ~on_exn ~around_task : unit =
TLS.get Runner.For_runner_implementors.k_cur_runner := Some runner;
let (AT_pair (before_task, after_task)) = around_task in
let run_task task : unit =
let _ctx = before_task runner in
(* run the task now, catching errors *)
(try Suspend_.with_suspend task ~run:(fun task' -> schedule_ self task')
with e ->
let bt = Printexc.get_raw_backtrace () in
on_exn e bt);
after_task runner _ctx
in
let main_loop () =
let continue = ref true in
while !continue do
match Bb_queue.pop self.q with
| task -> run_task task
| exception Bb_queue.Closed -> continue := false
done
in
try
(* handle domain-local await *)
Dla_.using ~prepare_for_await:Suspend_.prepare_for_await
~while_running:main_loop
with Bb_queue.Closed -> ()
let default_thread_init_exit_ ~dom_id:_ ~t_id:_ () = ()
let shutdown_ ~wait (self : state) : unit =
Bb_queue.close self.q;
if wait then Array.iter Thread.join self.threads
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?around_task:(t -> 'b) * (t -> 'b -> unit) ->
?num_threads:int ->
'a
let create ?(on_init_thread = default_thread_init_exit_)
?(on_exit_thread = default_thread_init_exit_) ?(on_exn = fun _ _ -> ())
?around_task ?num_threads () : t =
(* wrapper *)
let around_task =
match around_task with
| Some (f, g) -> AT_pair (f, g)
| None -> AT_pair (ignore, fun _ _ -> ())
in
let num_domains = D_pool_.n_domains () in
(* number of threads to run *)
let num_threads = Util_pool_.num_threads ?num_threads () in
(* make sure we don't bias towards the first domain(s) in {!D_pool_} *)
let offset = Random.int num_domains in
let pool =
let dummy = Thread.self () in
{ threads = Array.make num_threads dummy; q = Bb_queue.create () }
in
let runner =
Runner.For_runner_implementors.create
~shutdown:(fun ~wait () -> shutdown_ pool ~wait)
~run_async:(fun f -> schedule_ pool f)
~size:(fun () -> size_ pool)
~num_tasks:(fun () -> num_tasks_ pool)
()
in
(* temporary queue used to obtain thread handles from domains
on which the thread are started. *)
let receive_threads = Bb_queue.create () in
(* start the thread with index [i] *)
let start_thread_with_idx i =
let dom_idx = (offset + i) mod num_domains in
(* function run in the thread itself *)
let main_thread_fun () : unit =
let thread = Thread.self () in
let t_id = Thread.id thread in
on_init_thread ~dom_id:dom_idx ~t_id ();
let run () = worker_thread_ pool runner ~on_exn ~around_task in
(* now run the main loop *)
Fun.protect run ~finally:(fun () ->
(* on termination, decrease refcount of underlying domain *)
D_pool_.decr_on dom_idx);
on_exit_thread ~dom_id:dom_idx ~t_id ()
in
(* function called in domain with index [i], to
create the thread and push it into [receive_threads] *)
let create_thread_in_domain () =
let thread = Thread.create main_thread_fun () in
(* send the thread from the domain back to us *)
Bb_queue.push receive_threads (i, thread)
in
D_pool_.run_on dom_idx create_thread_in_domain
in
(* start all threads, placing them on the domains
according to their index and [offset] in a round-robin fashion. *)
for i = 0 to num_threads - 1 do
start_thread_with_idx i
done;
(* receive the newly created threads back from domains *)
for _j = 1 to num_threads do
let i, th = Bb_queue.pop receive_threads in
pool.threads.(i) <- th
done;
runner
let with_ ?on_init_thread ?on_exit_thread ?on_exn ?around_task ?num_threads () f
=
let pool =
create ?on_init_thread ?on_exit_thread ?on_exn ?around_task ?num_threads ()
in
let@ () = Fun.protect ~finally:(fun () -> shutdown pool) in
f pool

44
src/fifo_pool.mli Normal file
View file

@ -0,0 +1,44 @@
(** A simple thread pool in FIFO order.
FIFO: first-in, first-out. Basically tasks are put into a queue,
and worker threads pull them out of the queue at the other end.
Since this uses a single blocking queue to manage tasks, it's very
simple and reliable. The number of worker threads is fixed, but
they are spread over several domains to enable parallelism.
This can be useful for latency-sensitive applications (e.g. as a
pool of workers for network servers). Work-stealing pools might
have higher throughput but they're very unfair to some tasks; by
contrast, here, older tasks have priority over younger tasks.
@since 0.5 *)
include module type of Runner
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?around_task:(t -> 'b) * (t -> 'b -> unit) ->
?num_threads:int ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
val create : (unit -> t, _) create_args
(** [create ()] makes a new thread pool.
@param on_init_thread called at the beginning of each new thread in the pool.
@param min minimum size of the pool. See {!Pool.create_args}.
The default is [Domain.recommended_domain_count()], ie one worker per
CPU core.
On OCaml 4 the default is [4] (since there is only one domain).
@param on_exit_thread called at the end of each worker thread in the pool.
@param around_task a pair of [before, after] functions
ran around each task. See {!Pool.create_args}.
*)
val with_ : (unit -> (t -> 'a) -> 'a, _) create_args
(** [with_ () f] calls [f pool], where [pool] is obtained via {!create}.
When [f pool] returns or fails, [pool] is shutdown and its resources
are released.
Most parameters are the same as in {!create}. *)

View file

@ -1,5 +1,6 @@
module A = Moonpool.Atomic
module Domain_ = Moonpool_private.Domain_
[@@@ifge 5.0]
module A = Atomic_
module State_ = struct
type error = exn * Printexc.raw_backtrace
@ -8,7 +9,7 @@ module State_ = struct
type ('a, 'b) t =
| Init
| Left_solved of 'a or_error
| Right_solved of 'b or_error * Trigger.t
| Right_solved of 'b or_error * Suspend_.suspension
| Both_solved of 'a or_error * 'b or_error
let get_exn_ (self : _ t A.t) =
@ -27,13 +28,13 @@ module State_ = struct
Domain_.relax ();
set_left_ self left
)
| Right_solved (right, tr) ->
| Right_solved (right, cont) ->
let new_st = Both_solved (left, right) in
if not (A.compare_and_set self old_st new_st) then (
Domain_.relax ();
set_left_ self left
) else
Trigger.signal tr
cont (Ok ())
| Left_solved _ | Both_solved _ -> assert false
let rec set_right_ (self : _ t A.t) (right : _ or_error) : unit =
@ -44,27 +45,27 @@ module State_ = struct
if not (A.compare_and_set self old_st new_st) then set_right_ self right
| Init ->
(* we are first arrived, we suspend until the left computation is done *)
let trigger = Trigger.create () in
let must_await = ref true in
while
let old_st = A.get self in
match old_st with
| Init ->
(* setup trigger so that left computation will wake us up *)
not (A.compare_and_set self old_st (Right_solved (right, trigger)))
| Left_solved left ->
(* other thread is done, no risk of race condition *)
A.set self (Both_solved (left, right));
must_await := false;
false
| Right_solved _ | Both_solved _ -> assert false
do
()
done;
(* wait for the other computation to be done *)
if !must_await then Trigger.await_exn trigger
Suspend_.suspend
{
Suspend_.handle =
(fun ~run:_ suspension ->
while
let old_st = A.get self in
match old_st with
| Init ->
not
(A.compare_and_set self old_st
(Right_solved (right, suspension)))
| Left_solved left ->
(* other thread is done, no risk of race condition *)
A.set self (Both_solved (left, right));
suspension (Ok ());
false
| Right_solved _ | Both_solved _ -> assert false
do
()
done);
}
| Right_solved _ | Both_solved _ -> assert false
end
@ -101,12 +102,7 @@ let both_ignore f g = ignore (both f g : _ * _)
let for_ ?chunk_size n (f : int -> int -> unit) : unit =
if n > 0 then (
let runner =
match Runner.get_current_runner () with
| None -> failwith "forkjoin.for_: must be run inside a moonpool runner."
| Some r -> r
in
let failure = A.make None in
let has_failed = A.make false in
let missing = A.make n in
let chunk_size =
@ -114,39 +110,43 @@ let for_ ?chunk_size n (f : int -> int -> unit) : unit =
| Some cs -> max 1 (min n cs)
| None ->
(* guess: try to have roughly one task per core *)
max 1 (1 + (n / Moonpool.Private.num_domains ()))
max 1 (1 + (n / D_pool_.n_domains ()))
in
let trigger = Trigger.create () in
let start_tasks ~run (suspension : Suspend_.suspension) =
let task_for ~offset ~len_range =
match f offset (offset + len_range - 1) with
| () ->
if A.fetch_and_add missing (-len_range) = len_range then
(* all tasks done successfully *)
suspension (Ok ())
| exception exn ->
let bt = Printexc.get_raw_backtrace () in
if not (A.exchange has_failed true) then
(* first one to fail, and [missing] must be >= 2
because we're not decreasing it. *)
suspension (Error (exn, bt))
in
let task_for ~offset ~len_range =
match f offset (offset + len_range - 1) with
| () ->
if A.fetch_and_add missing (-len_range) = len_range then
(* all tasks done successfully *)
Trigger.signal trigger
| exception exn ->
let bt = Printexc.get_raw_backtrace () in
if Option.is_none (A.exchange failure (Some (Exn_bt.make exn bt))) then
(* first one to fail, and [missing] must be >= 2
because we're not decreasing it. *)
Trigger.signal trigger
let i = ref 0 in
while !i < n do
let offset = !i in
let len_range = min chunk_size (n - offset) in
assert (offset + len_range <= n);
run (fun () -> task_for ~offset ~len_range);
i := !i + len_range
done
in
let i = ref 0 in
while !i < n do
let offset = !i in
let len_range = min chunk_size (n - offset) in
assert (offset + len_range <= n);
Runner.run_async runner (fun () -> task_for ~offset ~len_range);
i := !i + len_range
done;
Trigger.await_exn trigger;
Option.iter Exn_bt.raise @@ A.get failure;
()
Suspend_.suspend
{
Suspend_.handle =
(fun ~run suspension ->
(* run tasks, then we'll resume [suspension] *)
start_tasks ~run suspension);
}
)
let all_array ?chunk_size (fs : _ array) : _ array =
@ -216,3 +216,5 @@ let map_list ?chunk_size f (l : _ list) : _ list =
match res.(i) with
| None -> assert false
| Some x -> x)
[@@@endif]

View file

@ -4,23 +4,23 @@
@since 0.3 *)
val both : (unit -> 'a) -> (unit -> 'b) -> 'a * 'b
(** [both f g] runs [f()] and [g()], potentially in parallel, and returns their
result when both are done. If any of [f()] and [g()] fails, then the whole
computation fails.
[@@@ifge 5.0]
This must be run from within the pool: for example, inside {!Pool.run} or
inside a {!Fut.spawn} computation. This is because it relies on an effect
handler to be installed.
val both : (unit -> 'a) -> (unit -> 'b) -> 'a * 'b
(** [both f g] runs [f()] and [g()], potentially in parallel,
and returns their result when both are done.
If any of [f()] and [g()] fails, then the whole computation fails.
This must be run from within the pool: for example, inside {!Pool.run}
or inside a {!Fut.spawn} computation.
This is because it relies on an effect handler to be installed.
@since 0.3
{b NOTE} this is only available on OCaml 5. *)
val both_ignore : (unit -> _) -> (unit -> _) -> unit
(** Same as [both f g |> ignore].
@since 0.3
{b NOTE} this is only available on OCaml 5. *)
val for_ : ?chunk_size:int -> int -> (int -> int -> unit) -> unit
@ -65,49 +65,45 @@ val for_ : ?chunk_size:int -> int -> (int -> int -> unit) -> unit
{b NOTE} this is only available on OCaml 5. *)
val all_array : ?chunk_size:int -> (unit -> 'a) array -> 'a array
(** [all_array fs] runs all functions in [fs] in tasks, and waits for all the
results.
(** [all_array fs] runs all functions in [fs] in tasks, and waits for
all the results.
@param chunk_size
if equal to [n], groups items by [n] to be run in a single task. Default
is [1].
@param chunk_size if equal to [n], groups items by [n] to be run in
a single task. Default is [1].
@since 0.3
{b NOTE} this is only available on OCaml 5. *)
val all_list : ?chunk_size:int -> (unit -> 'a) list -> 'a list
(** [all_list fs] runs all functions in [fs] in tasks, and waits for all the
results.
(** [all_list fs] runs all functions in [fs] in tasks, and waits for
all the results.
@param chunk_size
if equal to [n], groups items by [n] to be run in a single task. Default
is not specified. This parameter is available since 0.3.
@param chunk_size if equal to [n], groups items by [n] to be run in
a single task. Default is not specified.
This parameter is available since 0.3.
@since 0.3
{b NOTE} this is only available on OCaml 5. *)
val all_init : ?chunk_size:int -> int -> (int -> 'a) -> 'a list
(** [all_init n f] runs functions [f 0], [f 1], … [f (n-1)] in tasks, and waits
for all the results.
(** [all_init n f] runs functions [f 0], [f 1], … [f (n-1)] in tasks, and waits for
all the results.
@param chunk_size
if equal to [n], groups items by [n] to be run in a single task. Default
is not specified. This parameter is available since 0.3.
@param chunk_size if equal to [n], groups items by [n] to be run in
a single task. Default is not specified.
This parameter is available since 0.3.
@since 0.3
{b NOTE} this is only available on OCaml 5. *)
val map_array : ?chunk_size:int -> ('a -> 'b) -> 'a array -> 'b array
(** [map_array f arr] is like [Array.map f arr], but runs in parallel.
@since 0.3
{b NOTE} this is only available on OCaml 5. *)
val map_list : ?chunk_size:int -> ('a -> 'b) -> 'a list -> 'b list
(** [map_list f l] is like [List.map f l], but runs in parallel.
@since 0.3
{b NOTE} this is only available on OCaml 5. *)
[@@@endif]

View file

@ -1,7 +0,0 @@
(library
(name moonpool_forkjoin)
(public_name moonpool.forkjoin)
(synopsis "Fork-join parallelism for moonpool")
(flags :standard -open Moonpool)
(optional)
(libraries moonpool moonpool.private picos))

View file

@ -1,115 +1,100 @@
module A = Atomic
module C = Picos.Computation
module A = Atomic_
type 'a or_error = ('a, Exn_bt.t) result
type 'a or_error = ('a, exn * Printexc.raw_backtrace) result
type 'a waiter = 'a or_error -> unit
type 'a t = 'a C.t
type 'a state =
| Done of 'a or_error
| Waiting of 'a waiter list
type 'a t = { st: 'a state A.t } [@@unboxed]
type 'a promise = 'a t
let[@inline] make_promise () : _ t =
let fut = C.create ~mode:`LIFO () in
fut
let make () =
let fut = make_promise () in
let fut = { st = A.make (Waiting []) } in
fut, fut
let[@inline] return x : _ t = C.returned x
let[@inline] cancel x ebt = C.cancel x (fst ebt) (snd ebt)
let[@inline] try_cancel x ebt = C.try_cancel x (fst ebt) (snd ebt)
let of_result x : _ t = { st = A.make (Done x) }
let[@inline] return x : _ t = of_result (Ok x)
let[@inline] fail e bt : _ t = of_result (Error (e, bt))
let[@inline] fail exn bt : _ t =
let fut = C.create () in
C.cancel fut exn bt;
fut
let[@inline] is_resolved self : bool =
match A.get self.st with
| Done _ -> true
| Waiting _ -> false
let[@inline] fail_exn_bt ebt = fail (Exn_bt.exn ebt) (Exn_bt.bt ebt)
let[@inline] peek self : _ option =
match A.get self.st with
| Done x -> Some x
| Waiting _ -> None
let[@inline] of_result = function
| Ok x -> return x
| Error ebt -> fail_exn_bt ebt
let[@inline] is_resolved self : bool = not (C.is_running self)
let is_done = is_resolved
let peek : 'a t -> _ option = C.peek
let raise_if_failed : _ t -> unit = C.check
let[@inline] is_success self =
match C.peek_exn self with
| _ -> true
| exception _ -> false
let is_failed : _ t -> bool = C.is_canceled
let[@inline] is_done self : bool =
match A.get self.st with
| Done _ -> true
| Waiting _ -> false
exception Not_ready
let[@inline] get_or_fail self =
match C.peek self with
| Some x -> x
| None -> raise Not_ready
match A.get self.st with
| Done x -> x
| Waiting _ -> raise Not_ready
let[@inline] get_or_fail_exn self =
match C.peek_exn self with
| x -> x
| exception C.Running -> raise Not_ready
let[@inline] peek_or_assert_ (self : 'a t) : 'a =
match C.peek_exn self with
| x -> x
| exception C.Running -> assert false
let on_result_cb_ _tr f self : unit =
match peek_or_assert_ self with
| x -> f (Ok x)
| exception exn ->
let ebt = Exn_bt.get exn in
f (Error ebt)
match A.get self.st with
| Done (Ok x) -> x
| Done (Error (exn, bt)) -> Printexc.raise_with_backtrace exn bt
| Waiting _ -> raise Not_ready
let on_result (self : _ t) (f : _ waiter) : unit =
let trigger =
(Trigger.from_action f self on_result_cb_ [@alert "-handler"])
in
if not (C.try_attach self trigger) then on_result_cb_ () f self
let on_result_ignore_cb_ _tr f (self : _ t) =
f (Picos.Computation.canceled self)
let on_result_ignore (self : _ t) f : unit =
if Picos.Computation.is_running self then (
let trigger =
(Trigger.from_action f self on_result_ignore_cb_ [@alert "-handler"])
in
if not (C.try_attach self trigger) then on_result_ignore_cb_ () f self
) else
on_result_ignore_cb_ () f self
let[@inline] fulfill_idempotent self r =
match r with
| Ok x -> C.return self x
| Error ebt -> C.cancel self (Exn_bt.exn ebt) (Exn_bt.bt ebt)
while
let st = A.get self.st in
match st with
| Done x ->
f x;
false
| Waiting l ->
let must_retry = not (A.compare_and_set self.st st (Waiting (f :: l))) in
must_retry
do
Domain_.relax ()
done
exception Already_fulfilled
let fulfill (self : _ t) (r : _ result) : unit =
let ok =
match r with
| Ok x -> C.try_return self x
| Error ebt -> C.try_cancel self (Exn_bt.exn ebt) (Exn_bt.bt ebt)
in
if not ok then raise Already_fulfilled
while
let st = A.get self.st in
match st with
| Done _ -> raise Already_fulfilled
| Waiting l ->
let did_swap = A.compare_and_set self.st st (Done r) in
if did_swap then (
(* success, now call all the waiters *)
List.iter (fun f -> try f r with _ -> ()) l;
false
) else
true
do
Domain_.relax ()
done
let[@inline] fulfill_idempotent self r =
try fulfill self r with Already_fulfilled -> ()
(* ### combinators ### *)
let spawn ~on f : _ t =
let fut = make_promise () in
let fut, promise = make () in
let task () =
try
let res = f () in
C.return fut res
with exn ->
let bt = Printexc.get_raw_backtrace () in
C.cancel fut exn bt
let res =
try Ok (f ())
with e ->
let bt = Printexc.get_raw_backtrace () in
Error (e, bt)
in
fulfill promise res
in
Runner.run_async on task;
@ -124,11 +109,11 @@ let reify_error (f : 'a t) : 'a or_error t =
match peek f with
| Some res -> return res
| None ->
let fut = make_promise () in
on_result f (fun r -> fulfill fut (Ok r));
let fut, promise = make () in
on_result f (fun r -> fulfill promise (Ok r));
fut
let[@inline] get_runner_ ?on () : Runner.t option =
let get_runner_ ?on () : Runner.t option =
match on with
| Some _ as r -> r
| None -> Runner.get_current_runner ()
@ -138,9 +123,9 @@ let map ?on ~f fut : _ t =
match r with
| Ok x ->
(try Ok (f x)
with exn ->
with e ->
let bt = Printexc.get_raw_backtrace () in
Error (Exn_bt.make exn bt))
Error (e, bt))
| Error e_bt -> Error e_bt
in
@ -164,7 +149,7 @@ let map ?on ~f fut : _ t =
let join (fut : 'a t t) : 'a t =
match peek fut with
| Some (Ok f) -> f
| Some (Error ebt) -> fail_exn_bt ebt
| Some (Error (e, bt)) -> fail e bt
| None ->
let fut2, promise = make () in
on_result fut (function
@ -180,7 +165,7 @@ let bind ?on ~f fut : _ t =
with e ->
let bt = Printexc.get_raw_backtrace () in
fail e bt)
| Error ebt -> fail_exn_bt ebt
| Error (e, bt) -> fail e bt
in
let bind_and_fulfill (r : _ result) promise () : unit =
@ -223,7 +208,7 @@ let update_atomic_ (st : 'a A.t) f : 'a =
let both a b : _ t =
match peek a, peek b with
| Some (Ok x), Some (Ok y) -> return (x, y)
| Some (Error ebt), _ | _, Some (Error ebt) -> fail_exn_bt ebt
| Some (Error (e, bt)), _ | _, Some (Error (e, bt)) -> fail e bt
| _ ->
let fut, promise = make () in
@ -256,7 +241,7 @@ let choose a b : _ t =
match peek a, peek b with
| Some (Ok x), _ -> return (Either.Left x)
| _, Some (Ok y) -> return (Either.Right y)
| Some (Error ebt), Some (Error _) -> fail_exn_bt ebt
| Some (Error (e, bt)), Some (Error _) -> fail e bt
| _ ->
let fut, promise = make () in
@ -279,7 +264,7 @@ let choose_same a b : _ t =
match peek a, peek b with
| Some (Ok x), _ -> return x
| _, Some (Ok y) -> return y
| Some (Error ebt), Some (Error _) -> fail_exn_bt ebt
| Some (Error (e, bt)), Some (Error _) -> fail e bt
| _ ->
let fut, promise = make () in
@ -296,12 +281,16 @@ let choose_same a b : _ t =
| Ok y -> fulfill_idempotent promise (Ok y));
fut
let barrier_on_abstract_container_of_futures ~iter ~len ~aggregate_results cont
: _ t =
let peek_ok_assert_ (self : 'a t) : 'a =
match A.get self.st with
| Done (Ok x) -> x
| _ -> assert false
let join_container_ ~iter ~map ~len cont : _ t =
let n_items = len cont in
if n_items = 0 then (
(* no items, return now. *)
let cont_empty = aggregate_results (fun _ -> assert false) cont in
let cont_empty = map (fun _ -> assert false) cont in
return cont_empty
) else (
let fut, promise = make () in
@ -309,14 +298,14 @@ let barrier_on_abstract_container_of_futures ~iter ~len ~aggregate_results cont
(* callback called when a future in [a] is resolved *)
let on_res = function
| None ->
| Ok _ ->
let n = A.fetch_and_add missing (-1) in
if n = 1 then (
(* last future, we know they all succeeded, so resolve [fut] *)
let res = aggregate_results peek_or_assert_ cont in
let res = map peek_ok_assert_ cont in
fulfill promise (Ok res)
)
| Some e_bt ->
| Error e_bt ->
(* immediately cancel all other [on_res] *)
let n = A.exchange missing 0 in
if n > 0 then
@ -325,49 +314,36 @@ let barrier_on_abstract_container_of_futures ~iter ~len ~aggregate_results cont
fulfill promise (Error e_bt)
in
iter (fun fut -> on_result_ignore fut on_res) cont;
iter (fun fut -> on_result fut on_res) cont;
fut
)
module Advanced = struct
let barrier_on_abstract_container_of_futures =
barrier_on_abstract_container_of_futures
end
let join_array (a : _ t array) : _ array t =
match Array.length a with
| 0 -> return [||]
| 1 -> map ?on:None a.(0) ~f:(fun x -> [| x |])
| _ ->
barrier_on_abstract_container_of_futures ~len:Array.length
~aggregate_results:Array.map ~iter:Array.iter a
| _ -> join_container_ ~len:Array.length ~map:Array.map ~iter:Array.iter a
let join_list (l : _ t list) : _ list t =
match l with
| [] -> return []
| [ x ] -> map ?on:None x ~f:(fun x -> [ x ])
| _ ->
barrier_on_abstract_container_of_futures ~len:List.length
~aggregate_results:List.map ~iter:List.iter l
let[@inline] map_list ~f l : _ list t = List.map f l |> join_list
| _ -> join_container_ ~len:List.length ~map:List.map ~iter:List.iter l
let wait_array (a : _ t array) : unit t =
barrier_on_abstract_container_of_futures a ~iter:Array.iter ~len:Array.length
~aggregate_results:(fun _f _ -> ())
join_container_ a ~iter:Array.iter ~len:Array.length ~map:(fun _f _ -> ())
let wait_list (a : _ t list) : unit t =
barrier_on_abstract_container_of_futures a ~iter:List.iter ~len:List.length
~aggregate_results:(fun _f _ -> ())
join_container_ a ~iter:List.iter ~len:List.length ~map:(fun _f _ -> ())
let for_ ~on n f : unit t =
barrier_on_abstract_container_of_futures
join_container_
~len:(fun () -> n)
~iter:(fun yield () ->
for i = 0 to n - 1 do
yield (spawn ~on (fun () -> f i))
done)
~aggregate_results:(fun _f () -> ())
~map:(fun _f () -> ())
()
let for_array ~on arr f : unit t =
@ -379,64 +355,61 @@ let for_list ~on l f : unit t =
(* ### blocking ### *)
let push_queue_ _tr q () = Bb_queue.push q ()
let wait_block_exn (self : 'a t) : 'a =
match C.peek_exn self with
| x -> x (* fast path *)
| exception C.Running ->
let wait_block (self : 'a t) : 'a or_error =
match A.get self.st with
| Done x -> x (* fast path *)
| Waiting _ ->
let real_block () =
(* use queue only once *)
let q = Bb_queue.create () in
let trigger = Trigger.create () in
let attached =
(Trigger.on_signal trigger q () push_queue_ [@alert "-handler"])
in
assert attached;
(* blockingly wait for trigger if computation didn't complete in the mean time *)
if C.try_attach self trigger then Bb_queue.pop q;
(* trigger was signaled! computation must be done*)
peek_or_assert_ self
on_result self (fun r -> Bb_queue.push q r);
Bb_queue.pop q
in
(* TODO: use backoff? *)
(* a bit of spinning before we block *)
let rec loop i =
if i = 0 then
real_block ()
else (
match C.peek_exn self with
| x -> x
| exception C.Running ->
match A.get self.st with
| Done x -> x
| Waiting _ ->
Domain_.relax ();
(loop [@tailcall]) (i - 1)
)
in
loop 50
let wait_block self =
match wait_block_exn self with
| x -> Ok x
| exception exn ->
let bt = Printexc.get_raw_backtrace () in
Error (Exn_bt.make exn bt)
let wait_block_exn self =
match wait_block self with
| Ok x -> x
| Error (e, bt) -> Printexc.raise_with_backtrace e bt
let await (self : 'a t) : 'a =
(* fast path: peek *)
match C.peek_exn self with
| res -> res
| exception C.Running ->
let trigger = Trigger.create () in
[@@@ifge 5.0]
let await (fut : 'a t) : 'a =
match peek fut with
| Some res ->
(* fast path: peek *)
(match res with
| Ok x -> x
| Error (exn, bt) -> Printexc.raise_with_backtrace exn bt)
| None ->
(* suspend until the future is resolved *)
if C.try_attach self trigger then Trigger.await_exn trigger;
Suspend_.suspend
{
Suspend_.handle =
(fun ~run k ->
on_result fut (function
| Ok _ -> run (fun () -> k (Ok ()))
| Error (exn, bt) ->
(* fail continuation immediately *)
k (Error (exn, bt))));
};
(* un-suspended: we should have a result! *)
get_or_fail_exn self
get_or_fail_exn fut
let yield = Picos.Fiber.yield
[@@@endif]
module Infix = struct
let[@inline] ( >|= ) x f = map ~f x
@ -449,8 +422,3 @@ end
include Infix
module Infix_local = Infix [@@deprecated "use Infix"]
module Private_ = struct
let[@inline] unsafe_promise_of_fut x = x
let[@inline] as_computation self = self
end

237
src/fut.mli Normal file
View file

@ -0,0 +1,237 @@
(** Futures.
A future of type ['a t] represents the result of a computation
that will yield a value of type ['a].
Typically, the computation is running on a thread pool {!Runner.t}
and will proceed on some worker. Once set, a future cannot change.
It either succeeds (storing a [Ok x] with [x: 'a]), or fail
(storing a [Error (exn, bt)] with an exception and the corresponding
backtrace).
Combinators such as {!map} and {!join_array} can be used to produce
futures from other futures (in a monadic way). Some combinators take
a [on] argument to specify a runner on which the intermediate computation takes
place; for example [map ~on:pool ~f fut] maps the value in [fut]
using function [f], applicatively; the call to [f] happens on
the runner [pool] (once [fut] resolves successfully with a value).
*)
type 'a or_error = ('a, exn * Printexc.raw_backtrace) result
type 'a t
(** A future with a result of type ['a]. *)
type 'a promise
(** A promise, which can be fulfilled exactly once to set
the corresponding future *)
val make : unit -> 'a t * 'a promise
(** Make a new future with the associated promise *)
val on_result : 'a t -> ('a or_error -> unit) -> unit
(** [on_result fut f] registers [f] to be called in the future
when [fut] is set ;
or calls [f] immediately if [fut] is already set. *)
exception Already_fulfilled
val fulfill : 'a promise -> 'a or_error -> unit
(** Fullfill the promise, setting the future at the same time.
@raise Already_fulfilled if the promise is already fulfilled. *)
val fulfill_idempotent : 'a promise -> 'a or_error -> unit
(** Fullfill the promise, setting the future at the same time.
Does nothing if the promise is already fulfilled. *)
val return : 'a -> 'a t
(** Already settled future, with a result *)
val fail : exn -> Printexc.raw_backtrace -> _ t
(** Already settled future, with a failure *)
val of_result : 'a or_error -> 'a t
val is_resolved : _ t -> bool
(** [is_resolved fut] is [true] iff [fut] is resolved. *)
val peek : 'a t -> 'a or_error option
(** [peek fut] returns [Some r] if [fut] is currently resolved with [r],
and [None] if [fut] is not resolved yet. *)
exception Not_ready
(** @since 0.2 *)
val get_or_fail : 'a t -> 'a or_error
(** [get_or_fail fut] obtains the result from [fut] if it's fulfilled
(i.e. if [peek fut] returns [Some res], [get_or_fail fut] returns [res]).
@raise Not_ready if the future is not ready.
@since 0.2 *)
val get_or_fail_exn : 'a t -> 'a
(** [get_or_fail_exn fut] obtains the result from [fut] if it's fulfilled,
like {!get_or_fail}. If the result is an [Error _], the exception inside
is re-raised.
@raise Not_ready if the future is not ready.
@since 0.2 *)
val is_done : _ t -> bool
(** Is the future resolved? This is the same as [peek fut |> Option.is_some].
@since 0.2 *)
(** {2 Combinators} *)
val spawn : on:Runner.t -> (unit -> 'a) -> 'a t
(** [spaw ~on f] runs [f()] on the given runner [on], and return a future that will
hold its result. *)
val spawn_on_current_runner : (unit -> 'a) -> 'a t
(** This must be run from inside a runner, and schedules
the new task on it as well.
See {!Runner.get_current_runner} to see how the runner is found.
@since 0.5
@raise Failure if run from outside a runner. *)
val reify_error : 'a t -> 'a or_error t
(** [reify_error fut] turns a failing future into a non-failing
one that contain [Error (exn, bt)]. A non-failing future
returning [x] is turned into [Ok x]
@since 0.4 *)
val map : ?on:Runner.t -> f:('a -> 'b) -> 'a t -> 'b t
(** [map ?on ~f fut] returns a new future [fut2] that resolves
with [f x] if [fut] resolved with [x];
and fails with [e] if [fut] fails with [e] or [f x] raises [e].
@param on if provided, [f] runs on the given runner *)
val bind : ?on:Runner.t -> f:('a -> 'b t) -> 'a t -> 'b t
(** [bind ?on ~f fut] returns a new future [fut2] that resolves
like the future [f x] if [fut] resolved with [x];
and fails with [e] if [fut] fails with [e] or [f x] raises [e].
@param on if provided, [f] runs on the given runner *)
val bind_reify_error : ?on:Runner.t -> f:('a or_error -> 'b t) -> 'a t -> 'b t
(** [bind_reify_error ?on ~f fut] returns a new future [fut2] that resolves
like the future [f (Ok x)] if [fut] resolved with [x];
and resolves like the future [f (Error (exn, bt))]
if [fut] fails with [exn] and backtrace [bt].
@param on if provided, [f] runs on the given runner
@since 0.4 *)
val join : 'a t t -> 'a t
(** [join fut] is [fut >>= Fun.id]. It joins the inner layer of the future.
@since 0.2 *)
val both : 'a t -> 'b t -> ('a * 'b) t
(** [both a b] succeeds with [x, y] if [a] succeeds with [x] and
[b] succeeds with [y], or fails if any of them fails. *)
val choose : 'a t -> 'b t -> ('a, 'b) Either.t t
(** [choose a b] succeeds [Left x] or [Right y] if [a] succeeds with [x] or
[b] succeeds with [y], or fails if both of them fails.
If they both succeed, it is not specified which result is used. *)
val choose_same : 'a t -> 'a t -> 'a t
(** [choose_same a b] succeeds with the value of one of [a] or [b] if
they succeed, or fails if both fail.
If they both succeed, it is not specified which result is used. *)
val join_array : 'a t array -> 'a array t
(** Wait for all the futures in the array. Fails if any future fails. *)
val join_list : 'a t list -> 'a list t
(** Wait for all the futures in the list. Fails if any future fails. *)
val wait_array : _ t array -> unit t
(** [wait_array arr] waits for all futures in [arr] to resolve. It discards
the individual results of futures in [arr]. It fails if any future fails. *)
val wait_list : _ t list -> unit t
(** [wait_list l] waits for all futures in [l] to resolve. It discards
the individual results of futures in [l]. It fails if any future fails. *)
val for_ : on:Runner.t -> int -> (int -> unit) -> unit t
(** [for_ ~on n f] runs [f 0], [f 1], …, [f (n-1)] on the runner, and returns
a future that resolves when all the tasks have resolved, or fails
as soon as one task has failed. *)
val for_array : on:Runner.t -> 'a array -> (int -> 'a -> unit) -> unit t
(** [for_array ~on arr f] runs [f 0 arr.(0)], …, [f (n-1) arr.(n-1)] in
the runner (where [n = Array.length arr]), and returns a future
that resolves when all the tasks are done,
or fails if any of them fails.
@since 0.2 *)
val for_list : on:Runner.t -> 'a list -> ('a -> unit) -> unit t
(** [for_list ~on l f] is like [for_array ~on (Array.of_list l) f].
@since 0.2 *)
(** {2 Await}
{b NOTE} This is only available on OCaml 5. *)
[@@@ifge 5.0]
val await : 'a t -> 'a
(** [await fut] suspends the current tasks until [fut] is fulfilled, then
resumes the task on this same runner.
@since 0.3
This must only be run from inside the runner itself. The runner must
support {!Suspend_}.
{b NOTE}: only on OCaml 5.x
*)
[@@@endif]
(** {2 Blocking} *)
val wait_block : 'a t -> 'a or_error
(** [wait_block fut] blocks the current thread until [fut] is resolved,
and returns its value.
{b NOTE}: A word of warning: this will monopolize the calling thread until the future
resolves. This can also easily cause deadlocks, if enough threads in a pool
call [wait_block] on futures running on the same pool or a pool depending on it.
A good rule to avoid deadlocks is to run this from outside of any pool,
or to have an acyclic order between pools where [wait_block]
is only called from a pool on futures evaluated in a pool that comes lower
in the hierarchy.
If this rule is broken, it is possible for all threads in a pool to wait
for futures that can only make progress on these same threads,
hence the deadlock.
*)
val wait_block_exn : 'a t -> 'a
(** Same as {!wait_block} but re-raises the exception if the future failed. *)
(** {2 Infix operators}
These combinators run on either the current pool (if present),
or on the same thread that just fulfilled the previous future
if not.
They were previously present as [module Infix_local] and [val infix],
but are now simplified.
@since 0.5 *)
(** @since 0.5 *)
module Infix : sig
val ( >|= ) : 'a t -> ('a -> 'b) -> 'b t
val ( >>= ) : 'a t -> ('a -> 'b t) -> 'b t
val ( let+ ) : 'a t -> ('a -> 'b) -> 'b t
val ( and+ ) : 'a t -> 'b t -> ('a * 'b) t
val ( let* ) : 'a t -> ('a -> 'b t) -> 'b t
val ( and* ) : 'a t -> 'b t -> ('a * 'b) t
end
include module type of Infix
module Infix_local = Infix
[@@deprecated "Use Infix"]
(** @deprecated use Infix instead *)

9
src/immediate_runner.ml Normal file
View file

@ -0,0 +1,9 @@
include Runner
let runner : t =
Runner.For_runner_implementors.create
~size:(fun () -> 0)
~num_tasks:(fun () -> 0)
~shutdown:(fun ~wait:_ () -> ())
~run_async:(fun f -> f ())
()

20
src/immediate_runner.mli Normal file
View file

@ -0,0 +1,20 @@
(** Runner that runs tasks immediately in the caller thread.
Whenever a task is submitted to this runner via [Runner.run_async r task],
the task is run immediately in the caller thread as [task()].
There are no background threads, no resource, this is just a trivial
implementation of the interface.
This can be useful when an implementation needs a runner, but there isn't
enough work to justify starting an actual full thread pool.
Another situation is when threads cannot be used at all (e.g. because you
plan to call [Unix.fork] later).
@since 0.5
*)
include module type of Runner
val runner : t
(** The trivial runner that actually runs tasks at the calling point. *)

View file

@ -5,13 +5,13 @@ type 'a t = {
let create content : _ t = { mutex = Mutex.create (); content }
let[@inline never] with_ (self : _ t) f =
let with_ (self : _ t) f =
Mutex.lock self.mutex;
match f self.content with
| x ->
try
let x = f self.content in
Mutex.unlock self.mutex;
x
| exception e ->
with e ->
Mutex.unlock self.mutex;
raise e
@ -24,13 +24,13 @@ let[@inline] update_map l f =
l.content <- x';
y)
let[@inline never] get l =
let get l =
Mutex.lock l.mutex;
let x = l.content in
Mutex.unlock l.mutex;
x
let[@inline never] set l x =
let set l x =
Mutex.lock l.mutex;
l.content <- x;
Mutex.unlock l.mutex

35
src/lock.mli Normal file
View file

@ -0,0 +1,35 @@
(** Mutex-protected resource.
@since 0.3 *)
type 'a t
(** A value protected by a mutex *)
val create : 'a -> 'a t
(** Create a new protected value. *)
val with_ : 'a t -> ('a -> 'b) -> 'b
(** [with_ l f] runs [f x] where [x] is the value protected with
the lock [l], in a critical section. If [f x] fails, [with_lock l f]
fails too but the lock is released. *)
val update : 'a t -> ('a -> 'a) -> unit
(** [update l f] replaces the content [x] of [l] with [f x], while protected
by the mutex. *)
val update_map : 'a t -> ('a -> 'a * 'b) -> 'b
(** [update_map l f] computes [x', y = f (get l)], then puts [x'] in [l]
and returns [y], while protected by the mutex. *)
val mutex : _ t -> Mutex.t
(** Underlying mutex. *)
val get : 'a t -> 'a
(** Atomically get the value in the lock. The value that is returned
isn't protected! *)
val set : 'a t -> 'a -> unit
(** Atomically set the value.
{b NOTE} caution: using {!get} and {!set} as if this were a {!ref}
is an anti pattern and will not protect data against some race conditions. *)

View file

@ -1,10 +0,0 @@
(library
(name moonpool_lwt)
(public_name moonpool-lwt)
(enabled_if
(>= %{ocaml_version} 5.0))
(libraries
(re_export moonpool)
picos
(re_export lwt)
lwt.unix))

View file

@ -1,310 +0,0 @@
module Exn_bt = Moonpool.Exn_bt
open struct
module WL = Moonpool.Private.Worker_loop_
module M = Moonpool
end
module Fut = Moonpool.Fut
let on_uncaught_exn : (Moonpool.Exn_bt.t -> unit) ref =
ref (fun ebt ->
Printf.eprintf "uncaught exception in moonpool-lwt:\n%s" (Exn_bt.show ebt))
module Scheduler_state = struct
type st = {
tasks: WL.task_full Queue.t;
actions_from_other_threads: (unit -> unit) Queue.t;
(** Other threads ask us to run closures in the lwt thread *)
mutex: Mutex.t;
mutable thread: int;
closed: bool Atomic.t;
cleanup_done: bool Atomic.t;
mutable as_runner: Moonpool.Runner.t;
mutable enter_hook: Lwt_main.Enter_iter_hooks.hook option;
mutable leave_hook: Lwt_main.Leave_iter_hooks.hook option;
mutable notification: int;
(** A lwt_unix notification to wake up the event loop *)
has_notified: bool Atomic.t;
}
(** Main state *)
let cur_st : st option Atomic.t = Atomic.make None
let create_new () : st =
{
tasks = Queue.create ();
actions_from_other_threads = Queue.create ();
mutex = Mutex.create ();
thread = Thread.id (Thread.self ());
closed = Atomic.make false;
cleanup_done = Atomic.make false;
as_runner = Moonpool.Runner.dummy;
enter_hook = None;
leave_hook = None;
notification = 0;
has_notified = Atomic.make false;
}
let[@inline] notify_ (self : st) : unit =
if not (Atomic.exchange self.has_notified true) then
Lwt_unix.send_notification self.notification
let[@inline never] add_action_from_another_thread_ (self : st) f : unit =
Mutex.lock self.mutex;
Queue.push f self.actions_from_other_threads;
Mutex.unlock self.mutex;
notify_ self
let[@inline] on_lwt_thread_ (self : st) : bool =
Thread.id (Thread.self ()) = self.thread
let[@inline] run_on_lwt_thread_ (self : st) (f : unit -> unit) : unit =
if on_lwt_thread_ self then
f ()
else
add_action_from_another_thread_ self f
let cleanup (st : st) =
match Atomic.get cur_st with
| Some st' ->
if st != st' then
failwith
"moonpool-lwt: cleanup failed (state is not the currently active \
one!)";
if not (on_lwt_thread_ st) then
failwith "moonpool-lwt: cleanup from the wrong thread";
Atomic.set st.closed true;
if not (Atomic.exchange st.cleanup_done true) then (
Option.iter Lwt_main.Enter_iter_hooks.remove st.enter_hook;
Option.iter Lwt_main.Leave_iter_hooks.remove st.leave_hook;
Lwt_unix.stop_notification st.notification
);
Atomic.set cur_st None
| None -> failwith "moonpool-lwt: cleanup failed (no current active state)"
end
module Ops = struct
type st = Scheduler_state.st
let schedule (self : st) t =
if Atomic.get self.closed then
failwith "moonpool-lwt.schedule: scheduler is closed";
Scheduler_state.run_on_lwt_thread_ self (fun () -> Queue.push t self.tasks)
let get_next_task (self : st) =
if Atomic.get self.closed then raise WL.No_more_tasks;
try Queue.pop self.tasks with Queue.Empty -> raise WL.No_more_tasks
let on_exn _ ebt = !on_uncaught_exn ebt
let runner (self : st) = self.as_runner
let cleanup = Scheduler_state.cleanup
let as_runner (self : st) : Moonpool.Runner.t =
Moonpool.Runner.For_runner_implementors.create
~size:(fun () -> 1)
~num_tasks:(fun () ->
Mutex.lock self.mutex;
let n = Queue.length self.tasks in
Mutex.unlock self.mutex;
n)
~run_async:(fun ~fiber f -> schedule self @@ WL.T_start { fiber; f })
~shutdown:(fun ~wait:_ () -> Atomic.set self.closed true)
()
let before_start (self : st) : unit =
self.as_runner <- as_runner self;
()
let ops : st WL.ops =
{ schedule; get_next_task; on_exn; runner; before_start; cleanup }
let setup st =
if Atomic.compare_and_set Scheduler_state.cur_st None (Some st) then
before_start st
else
failwith "moonpool-lwt: setup failed (state already in place)"
end
(** Resolve [prom] with the result of [lwt_fut] *)
let transfer_lwt_to_fut (lwt_fut : 'a Lwt.t) (prom : 'a Fut.promise) : unit =
Lwt.on_any lwt_fut
(fun x -> M.Fut.fulfill_idempotent prom (Ok x))
(fun exn ->
let bt = Printexc.get_callstack 10 in
M.Fut.fulfill_idempotent prom (Error (Exn_bt.make exn bt)))
let[@inline] register_trigger_on_lwt_termination (lwt_fut : _ Lwt.t)
(tr : M.Trigger.t) : unit =
Lwt.on_termination lwt_fut (fun _ -> M.Trigger.signal tr)
let[@inline] await_lwt_terminated (fut : _ Lwt.t) =
match Lwt.state fut with
| Return x -> x
| Fail exn -> raise exn
| Sleep -> assert false
module Main_state = struct
let[@inline] get_st () : Scheduler_state.st =
match Atomic.get Scheduler_state.cur_st with
| Some st ->
if Atomic.get st.closed then failwith "moonpool-lwt: scheduler is closed";
st
| None -> failwith "moonpool-lwt: scheduler is not setup"
let[@inline] run_on_lwt_thread f =
Scheduler_state.run_on_lwt_thread_ (get_st ()) f
let[@inline] on_lwt_thread () : bool =
Scheduler_state.on_lwt_thread_ (get_st ())
let[@inline] add_action_from_another_thread f : unit =
Scheduler_state.add_action_from_another_thread_ (get_st ()) f
end
let await_lwt_from_another_thread fut =
let tr = M.Trigger.create () in
Main_state.add_action_from_another_thread (fun () ->
register_trigger_on_lwt_termination fut tr);
M.Trigger.await_exn tr;
await_lwt_terminated fut
let await_lwt (fut : _ Lwt.t) =
if Scheduler_state.on_lwt_thread_ (Main_state.get_st ()) then (
(* can directly access the future *)
match Lwt.state fut with
| Return x -> x
| Fail exn -> raise exn
| Sleep ->
let tr = M.Trigger.create () in
register_trigger_on_lwt_termination fut tr;
M.Trigger.await_exn tr;
await_lwt_terminated fut
) else
await_lwt_from_another_thread fut
let lwt_of_fut (fut : 'a M.Fut.t) : 'a Lwt.t =
if not (Main_state.on_lwt_thread ()) then
failwith "lwt_of_fut: not on the lwt thread";
let lwt_fut, lwt_prom = Lwt.wait () in
(* in lwt thread, resolve [lwt_fut] *)
let wakeup_using_res = function
| Ok x -> Lwt.wakeup lwt_prom x
| Error ebt ->
let exn = Exn_bt.exn ebt in
Lwt.wakeup_exn lwt_prom exn
in
M.Fut.on_result fut (fun res ->
Main_state.run_on_lwt_thread (fun () ->
(* can safely wakeup from the lwt thread *)
wakeup_using_res res));
lwt_fut
let fut_of_lwt (lwt_fut : _ Lwt.t) : _ M.Fut.t =
if Main_state.on_lwt_thread () then (
match Lwt.state lwt_fut with
| Return x -> M.Fut.return x
| _ ->
let fut, prom = M.Fut.make () in
transfer_lwt_to_fut lwt_fut prom;
fut
) else (
let fut, prom = M.Fut.make () in
Main_state.add_action_from_another_thread (fun () ->
transfer_lwt_to_fut lwt_fut prom);
fut
)
module Setup_lwt_hooks (ARG : sig
val st : Scheduler_state.st
end) =
struct
open ARG
module FG =
WL.Fine_grained
(struct
include Scheduler_state
let st = st
let ops = Ops.ops
end)
()
let run_in_hook () =
(* execute actions sent from other threads; first transfer them
all atomically to a local queue to reduce contention *)
let local_acts = Queue.create () in
Mutex.lock st.mutex;
Queue.transfer st.actions_from_other_threads local_acts;
Atomic.set st.has_notified false;
Mutex.unlock st.mutex;
Queue.iter (fun f -> f ()) local_acts;
(* run tasks *)
FG.run ~max_tasks:1000 ();
if not (Queue.is_empty st.tasks) then ignore (Lwt.pause () : unit Lwt.t);
()
let setup () =
(* only one thread does this *)
FG.setup ~block_signals:false ();
st.thread <- Thread.self () |> Thread.id;
st.enter_hook <- Some (Lwt_main.Enter_iter_hooks.add_last run_in_hook);
st.leave_hook <- Some (Lwt_main.Leave_iter_hooks.add_last run_in_hook);
(* notification used to wake lwt up *)
st.notification <- Lwt_unix.make_notification ~once:false run_in_hook
end
let setup () : Scheduler_state.st =
let st = Scheduler_state.create_new () in
Ops.setup st;
let module Setup_lwt_hooks' = Setup_lwt_hooks (struct
let st = st
end) in
Setup_lwt_hooks'.setup ();
st
let[@inline] is_setup () = Option.is_some @@ Atomic.get Scheduler_state.cur_st
let spawn_lwt f : _ Lwt.t =
let st = Main_state.get_st () in
let lwt_fut, lwt_prom = Lwt.wait () in
M.run_async st.as_runner (fun () ->
try
let x = f () in
Lwt.wakeup lwt_prom x
with exn -> Lwt.wakeup_exn lwt_prom exn);
lwt_fut
let spawn_lwt_ignore f = ignore (spawn_lwt f : unit Lwt.t)
let on_lwt_thread = Main_state.on_lwt_thread
let run_in_lwt_and_await (f : unit -> 'a) : 'a =
let st = Main_state.get_st () in
if Scheduler_state.on_lwt_thread_ st then
(* run immediately *)
f ()
else
await_lwt_from_another_thread @@ spawn_lwt f
let lwt_main (f : _ -> 'a) : 'a =
let st = setup () in
(* make sure to cleanup *)
let finally () = Scheduler_state.cleanup st in
Fun.protect ~finally @@ fun () ->
let fut = spawn_lwt (fun () -> f st.as_runner) in
(* make sure the scheduler isn't already sleeping *)
Scheduler_state.notify_ st;
Lwt_main.run fut
let[@inline] lwt_main_runner () =
let st = Main_state.get_st () in
st.as_runner

View file

@ -1,73 +0,0 @@
(** Lwt_engine-based event loop for Moonpool.
In what follows, we mean by "lwt thread" the thread running {!lwt_main}
(which wraps [Lwt_main.run]; so, the thread where the Lwt event loop and all
Lwt callbacks execute).
{b NOTE}: this is experimental and might change in future versions.
@since 0.6
The API has entirely changed since 0.9 , see
https://github.com/c-cube/moonpool/pull/37 *)
module Fut = Moonpool.Fut
(** {2 Basic conversions} *)
val fut_of_lwt : 'a Lwt.t -> 'a Moonpool.Fut.t
(** [fut_of_lwt lwt_fut] makes a thread-safe moonpool future that completes when
[lwt_fut] does. This can be run from any thread. *)
val lwt_of_fut : 'a Moonpool.Fut.t -> 'a Lwt.t
(** [lwt_of_fut fut] makes a lwt future that completes when [fut] does. This
must be called from the Lwt thread, and the result must always be used only
from inside the Lwt thread.
@raise Failure if not run from the lwt thread. *)
(** {2 Helpers on the moonpool side} *)
val spawn_lwt : (unit -> 'a) -> 'a Lwt.t
(** This spawns a task that runs in the Lwt scheduler. This function is thread
safe.
@raise Failure if {!lwt_main} was not called. *)
val spawn_lwt_ignore : (unit -> unit) -> unit
(** Like {!spawn_lwt} but ignores the result, like [Lwt.async]. This function is
thread safe. *)
val await_lwt : 'a Lwt.t -> 'a
(** [await_lwt fut] awaits a Lwt future from inside a task running on a moonpool
runner. This must be run from within a Moonpool runner so that the await-ing
effect is handled, but it doesn't have to run from inside the Lwt thread. *)
val run_in_lwt_and_await : (unit -> 'a) -> 'a
(** [run_in_lwt_and_await f] runs [f()] in the lwt thread, just like
[spawn_lwt f], and then calls {!await_lwt} on the result. This means [f()]
can use Lwt functions and libraries, use {!await_lwt} on them freely, etc.
This function must run from within a task running on a moonpool runner so
that it can [await_lwt]. *)
(** {2 Wrappers around Lwt_main} *)
val on_uncaught_exn : (Moonpool.Exn_bt.t -> unit) ref
(** Exception handler for tasks that raise an uncaught exception. *)
val lwt_main : (Moonpool.Runner.t -> 'a) -> 'a
(** [lwt_main f] sets the moonpool-lwt bridge up, runs lwt main, calls [f],
destroys the bridge, and return the result of [f()]. Only one thread should
call this at a time. *)
val on_lwt_thread : unit -> bool
(** [on_lwt_thread ()] is true if the current thread is the one currently
running {!lwt_main}. This is thread safe.
@raise Failure if {!lwt_main} was not called. *)
val lwt_main_runner : unit -> Moonpool.Runner.t
(** The runner from {!lwt_main}. The runner is only going to work if {!lwt_main}
is currently running in some thread. This is thread safe.
@raise Failure if {!lwt_main} was not called. *)
val is_setup : unit -> bool
(** Is the moonpool-lwt bridge setup? This is thread safe. *)

33
src/moonpool.ml Normal file
View file

@ -0,0 +1,33 @@
let start_thread_on_some_domain f x =
let did = Random.int (D_pool_.n_domains ()) in
D_pool_.run_on_and_wait did (fun () -> Thread.create f x)
let run_async = Runner.run_async
let recommended_thread_count () = Domain_.recommended_number ()
let spawn = Fut.spawn
let spawn_on_current_runner = Fut.spawn_on_current_runner
[@@@ifge 5.0]
let await = Fut.await
[@@@endif]
module Atomic = Atomic_
module Blocking_queue = Bb_queue
module Bounded_queue = Bounded_queue
module Chan = Chan
module Fifo_pool = Fifo_pool
module Fork_join = Fork_join
module Fut = Fut
module Lock = Lock
module Immediate_runner = Immediate_runner
module Pool = Fifo_pool
module Runner = Runner
module Thread_local_storage = Thread_local_storage_
module Ws_pool = Ws_pool
module Private = struct
module Ws_deque_ = Ws_deque_
module Suspend_ = Suspend_
end

201
src/moonpool.mli Normal file
View file

@ -0,0 +1,201 @@
(** Moonpool
A pool within a bigger pool (ie the ocean). Here, we're talking about
pools of [Thread.t] that are dispatched over several [Domain.t] to
enable parallelism.
We provide several implementations of pools
with distinct scheduling strategies, alongside some concurrency
primitives such as guarding locks ({!Lock.t}) and futures ({!Fut.t}).
*)
module Ws_pool = Ws_pool
module Fifo_pool = Fifo_pool
module Runner = Runner
module Immediate_runner = Immediate_runner
module Pool = Fifo_pool
[@@deprecated "use Fifo_pool or Ws_pool to be more explicit"]
(** Default pool. Please explicitly pick an implementation instead. *)
val start_thread_on_some_domain : ('a -> unit) -> 'a -> Thread.t
(** Similar to {!Thread.create}, but it picks a background domain at random
to run the thread. This ensures that we don't always pick the same domain
to run all the various threads needed in an application (timers, event loops, etc.) *)
val run_async : Runner.t -> (unit -> unit) -> unit
(** [run_async runner task] schedules the task to run
on the given runner. This means [task()] will be executed
at some point in the future, possibly in another thread.
@since 0.5 *)
val recommended_thread_count : unit -> int
(** Number of threads recommended to saturate the CPU.
For IO pools this makes little sense (you might want more threads than
this because many of them will be blocked most of the time).
@since 0.5 *)
val spawn : on:Runner.t -> (unit -> 'a) -> 'a Fut.t
(** [spawn ~on f] runs [f()] on the runner (a thread pool typically)
and returns a future result for it. See {!Fut.spawn}.
@since 0.5 *)
val spawn_on_current_runner : (unit -> 'a) -> 'a Fut.t
(** See {!Fut.spawn_on_current_runner}.
@since 0.5 *)
[@@@ifge 5.0]
val await : 'a Fut.t -> 'a
(** Await a future. See {!Fut.await}.
Only on OCaml >= 5.0.
@since 0.5 *)
[@@@endif]
module Lock = Lock
module Fut = Fut
module Chan = Chan
module Fork_join = Fork_join
module Thread_local_storage = Thread_local_storage_
(** A simple blocking queue.
This queue is quite basic and will not behave well under heavy
contention. However, it can be sufficient for many practical use cases.
{b NOTE}: this queue will typically block the caller thread
in case the operation (push/pop) cannot proceed.
Be wary of deadlocks when using the queue {i from} a pool
when you expect the other end to also be produced/consumed from
the same pool.
See discussion on {!Fut.wait_block} for more details on deadlocks
and how to mitigate the risk of running into them.
More scalable queues can be found in
Lockfree (https://github.com/ocaml-multicore/lockfree/)
*)
module Blocking_queue : sig
type 'a t
(** Unbounded blocking queue.
This queue is thread-safe and will block when calling {!pop}
on it when it's empty. *)
val create : unit -> _ t
(** Create a new unbounded queue. *)
val size : _ t -> int
(** Number of items currently in the queue. Note that [pop]
might still block if this returns a non-zero number, since another
thread might have consumed the items in the mean time.
@since 0.2 *)
exception Closed
val push : 'a t -> 'a -> unit
(** [push q x] pushes [x] into [q], and returns [()].
In the current implementation, [push q] will never block for
a long time, it will only block while waiting for a lock
so it can push the element.
@raise Closed if the queue is closed (by a previous call to [close q]) *)
val pop : 'a t -> 'a
(** [pop q] pops the next element in [q]. It might block until an element comes.
@raise Closed if the queue was closed before a new element was available. *)
val close : _ t -> unit
(** Close the queue, meaning there won't be any more [push] allowed,
ie [push] will raise {!Closed}.
[pop] will keep working and will return the elements present in the
queue, until it's entirely drained; then [pop] will
also raise {!Closed}. *)
val try_pop : force_lock:bool -> 'a t -> 'a option
(** [try_pop q] immediately pops the first element of [q], if any,
or returns [None] without blocking.
@param force_lock if true, use {!Mutex.lock} (which can block under contention);
if false, use {!Mutex.try_lock}, which might return [None] even in
presence of an element if there's contention *)
val try_push : 'a t -> 'a -> bool
(** [try_push q x] tries to push into [q], in which case
it returns [true]; or it fails to push and returns [false]
without blocking.
@raise Closed if the locking succeeded but the queue is closed.
*)
val transfer : 'a t -> 'a Queue.t -> unit
(** [transfer bq q2] transfers all items presently
in [bq] into [q2] in one atomic section, and clears [bq].
It blocks if no element is in [bq].
This is useful to consume elements from the queue in batch.
Create a [Queue.t] locally:
{[
let dowork (work_queue: job Bb_queue.t) =
(* local queue, not thread safe *)
let local_q = Queue.create() in
try
while true do
(* work on local events, already on this thread *)
while not (Queue.is_empty local_q) do
let job = Queue.pop local_q in
process_job job
done;
(* get all the events in the incoming blocking queue, in
one single critical section. *)
Bb_queue.transfer work_queue local_q
done
with Bb_queue.Closed -> ()
]}
@since 0.4 *)
type 'a gen = unit -> 'a option
type 'a iter = ('a -> unit) -> unit
val to_iter : 'a t -> 'a iter
(** [to_iter q] returns an iterator over all items in the queue.
This might not terminate if [q] is never closed.
@since 0.4 *)
val to_gen : 'a t -> 'a gen
(** [to_gen q] returns a generator from the queue.
@since 0.4 *)
val to_seq : 'a t -> 'a Seq.t
(** [to_gen q] returns a (transient) sequence from the queue.
@since 0.4 *)
end
module Bounded_queue = Bounded_queue
module Atomic = Atomic_
(** Atomic values.
This is either a shim using [ref], on pre-OCaml 5, or the
standard [Atomic] module on OCaml 5. *)
(**/**)
module Private : sig
module Ws_deque_ = Ws_deque_
(** {2 Suspensions} *)
module Suspend_ = Suspend_
[@@alert
unstable "this module is an implementation detail of moonpool for now"]
(** Suspensions.
This is only going to work on OCaml 5.x.
{b NOTE}: this is not stable for now. *)
end

View file

@ -1,12 +0,0 @@
(library
(name moonpool_private)
(public_name moonpool.private)
(synopsis "Private internal utils for Moonpool (do not rely on)")
(libraries
threads
either
(select
tracing_.ml
from
(trace.core -> tracing_.real.ml)
(-> tracing_.dummy.ml))))

View file

@ -1,15 +0,0 @@
let ignore_signals_ () =
try
Thread.sigmask SIG_BLOCK
[
Sys.sigpipe;
Sys.sigbus;
Sys.sigterm;
Sys.sigchld;
Sys.sigalrm;
Sys.sigint;
Sys.sigusr1;
Sys.sigusr2;
]
|> ignore
with _ -> ()

View file

@ -1,6 +0,0 @@
let enabled () = false
let dummy_span = 0L
let enter_span _name = dummy_span
let exit_span = ignore
let set_thread_name = ignore
let with_span _ f = f dummy_span

View file

@ -1,6 +0,0 @@
val dummy_span : int64
val enter_span : string -> int64
val exit_span : int64 -> unit
val with_span : string -> (int64 -> 'a) -> 'a
val enabled : unit -> bool
val set_thread_name : string -> unit

View file

@ -1,25 +0,0 @@
module Trace = Trace_core
let enabled = Trace.enabled
let dummy_span = Int64.min_int
let dummy_file_ = "<unknown file>"
let set_thread_name = Trace.set_thread_name
let[@inline] enter_span name : int64 =
if name = "" then
dummy_span
else
Trace.enter_span ~__FILE__:dummy_file_ ~__LINE__:0 name
let[@inline] exit_span sp = if sp <> dummy_span then Trace.exit_span sp
let with_span name f =
let sp = enter_span name in
try
let x = f sp in
exit_span sp;
x
with exn ->
let bt = Printexc.get_raw_backtrace () in
exit_span sp;
Printexc.raise_with_backtrace exn bt

View file

@ -1,30 +0,0 @@
(** Work-stealing deque.
Adapted from "Dynamic circular work stealing deque", Chase & Lev.
However note that this one is not dynamic in the sense that there is no
resizing. Instead we return [false] when [push] fails, which keeps the
implementation fairly lightweight. *)
type 'a t
(** Deque containing values of type ['a] *)
val create : dummy:'a -> unit -> 'a t
(** Create a new deque. *)
val push : 'a t -> 'a -> bool
(** Push value at the bottom of deque. returns [true] if it succeeds. This must
be called only by the owner thread. *)
val pop : 'a t -> 'a option
(** Pop value from the bottom of deque. This must be called only by the owner
thread. *)
exception Empty
val pop_exn : 'a t -> 'a
val steal : 'a t -> 'a option
(** Try to steal from the top of deque. This is thread-safe. *)
val size : _ t -> int

View file

@ -1,10 +1,9 @@
open Types_
module TLS = Thread_local_storage_
type fiber = Picos.Fiber.t
type task = unit -> unit
type t = runner = {
run_async: fiber:fiber -> task -> unit;
type t = {
run_async: (unit -> unit) -> unit;
shutdown: wait:bool -> unit -> unit;
size: unit -> int;
num_tasks: unit -> int;
@ -12,16 +11,7 @@ type t = runner = {
exception Shutdown
let[@inline] run_async ?fiber (self : t) f : unit =
let fiber =
match fiber with
| Some f -> f
| None ->
let comp = Picos.Computation.create () in
Picos.Fiber.create ~forbid:false comp
in
self.run_async ~fiber f
let[@inline] run_async (self : t) f : unit = self.run_async f
let[@inline] shutdown (self : t) : unit = self.shutdown ~wait:true ()
let[@inline] shutdown_without_waiting (self : t) : unit =
@ -30,9 +20,9 @@ let[@inline] shutdown_without_waiting (self : t) : unit =
let[@inline] num_tasks (self : t) : int = self.num_tasks ()
let[@inline] size (self : t) : int = self.size ()
let run_wait_block ?fiber self (f : unit -> 'a) : 'a =
let run_wait_block self (f : unit -> 'a) : 'a =
let q = Bb_queue.create () in
run_async ?fiber self (fun () ->
run_async self (fun () ->
try
let x = f () in
Bb_queue.push q (Ok x)
@ -47,17 +37,8 @@ module For_runner_implementors = struct
let create ~size ~num_tasks ~shutdown ~run_async () : t =
{ size; num_tasks; shutdown; run_async }
let k_cur_runner : t TLS.t = Types_.k_cur_runner
let k_cur_runner : t option ref TLS.key = TLS.new_key (fun () -> ref None)
end
let dummy : t =
For_runner_implementors.create
~size:(fun () -> 0)
~num_tasks:(fun () -> 0)
~shutdown:(fun ~wait:_ () -> ())
~run_async:(fun ~fiber:_ _ ->
failwith "Runner.dummy: cannot actually run tasks")
()
let get_current_runner = get_current_runner
let get_current_fiber = get_current_fiber
let[@inline] get_current_runner () : _ option =
!(TLS.get For_runner_implementors.k_cur_runner)

73
src/runner.mli Normal file
View file

@ -0,0 +1,73 @@
(** Interface for runners.
This provides an abstraction for running tasks in the background,
which is implemented by various thread pools.
@since 0.3
*)
type task = unit -> unit
type t
(** A runner.
If a runner is no longer needed, {!shutdown} can be used to signal all
worker threads
in it to stop (after they finish their work), and wait for them to stop.
The threads are distributed across a fixed domain pool
(whose size is determined by {!Domain.recommended_domain_count} on OCaml 5, and
simple the single runtime on OCaml 4). *)
val size : t -> int
(** Number of threads/workers. *)
val num_tasks : t -> int
(** Current number of tasks. This is at best a snapshot, useful for metrics
and debugging. *)
val shutdown : t -> unit
(** Shutdown the runner and wait for it to terminate. Idempotent. *)
val shutdown_without_waiting : t -> unit
(** Shutdown the pool, and do not wait for it to terminate. Idempotent. *)
exception Shutdown
val run_async : t -> task -> unit
(** [run_async pool f] schedules [f] for later execution on the runner
in one of the threads. [f()] will run on one of the runner's
worker threads/domains.
@raise Shutdown if the runner was shut down before [run_async] was called. *)
val run_wait_block : t -> (unit -> 'a) -> 'a
(** [run_wait_block pool f] schedules [f] for later execution
on the pool, like {!run_async}.
It then blocks the current thread until [f()] is done executing,
and returns its result. If [f()] raises an exception, then [run_wait_block pool f]
will raise it as well.
{b NOTE} be careful with deadlocks (see notes in {!Fut.wait_block}
about the required discipline to avoid deadlocks). *)
(** This module is specifically intended for users who implement their
own runners. Regular users of Moonpool should not need to look at it. *)
module For_runner_implementors : sig
val create :
size:(unit -> int) ->
num_tasks:(unit -> int) ->
shutdown:(wait:bool -> unit -> unit) ->
run_async:(task -> unit) ->
unit ->
t
(** Create a new runner.
{b NOTE}: the runner should support DLA and {!Suspend_} on OCaml 5.x,
so that {!Fork_join} and other 5.x features work properly. *)
val k_cur_runner : t option ref Thread_local_storage_.key
end
val get_current_runner : unit -> t option
(** Access the current runner. This returns [Some r] if the call
happens on a thread that belongs in a runner.
@since 0.5 *)

56
src/suspend_.ml Normal file
View file

@ -0,0 +1,56 @@
type suspension = (unit, exn * Printexc.raw_backtrace) result -> unit
type task = unit -> unit
type suspension_handler = { handle: run:(task -> unit) -> suspension -> unit }
[@@unboxed]
[@@@ifge 5.0]
[@@@ocaml.alert "-unstable"]
module A = Atomic_
type _ Effect.t += Suspend : suspension_handler -> unit Effect.t
let[@inline] suspend h = Effect.perform (Suspend h)
let with_suspend ~(run : task -> unit) (f : unit -> unit) : unit =
let module E = Effect.Deep in
(* effect handler *)
let effc : type e. e Effect.t -> ((e, _) E.continuation -> _) option =
function
| Suspend h ->
Some
(fun k ->
let k' : suspension = function
| Ok () -> E.continue k ()
| Error (exn, bt) -> E.discontinue_with_backtrace k exn bt
in
h.handle ~run k')
| _ -> None
in
E.try_with f () { E.effc }
(* DLA interop *)
let prepare_for_await () : Dla_.t =
(* current state *)
let st : ((task -> unit) * suspension) option A.t = A.make None in
let release () : unit =
match A.exchange st None with
| None -> ()
| Some (run, k) -> run (fun () -> k (Ok ()))
and await () : unit =
suspend { handle = (fun ~run k -> A.set st (Some (run, k))) }
in
let t = { Dla_.release; await } in
t
[@@@ocaml.alert "+unstable"]
[@@@else_]
let[@inline] with_suspend ~run:_ f = f ()
let[@inline] prepare_for_await () = { Dla_.release = ignore; await = ignore }
[@@@endif]

60
src/suspend_.mli Normal file
View file

@ -0,0 +1,60 @@
(** (Private) suspending tasks using Effects.
This module is an implementation detail of Moonpool and should
not be used outside of it, except by experts to implement {!Runner}. *)
type suspension = (unit, exn * Printexc.raw_backtrace) result -> unit
(** A suspended computation *)
type task = unit -> unit
type suspension_handler = { handle: run:(task -> unit) -> suspension -> unit }
[@@unboxed]
(** The handler that knows what to do with the suspended computation.
The handler is given two things:
- the suspended computation (which can be resumed with a result
eventually);
- a [run] function that can be used to start tasks to perform some
computation.
This means that a fork-join primitive, for example, can use a single call
to {!suspend} to:
- suspend the caller until the fork-join is done
- use [run] to start all the tasks. Typically [run] is called multiple times,
which is where the "fork" part comes from. Each call to [run] potentially
runs in parallel with the other calls. The calls must coordinate so
that, once they are all done, the suspended caller is resumed with the
aggregated result of the computation.
*)
[@@@ifge 5.0]
[@@@ocaml.alert "-unstable"]
type _ Effect.t +=
| Suspend : suspension_handler -> unit Effect.t
(** The effect used to suspend the current thread and pass it, suspended,
to the handler. The handler will ensure that the suspension is resumed later
once some computation has been done. *)
[@@@ocaml.alert "+unstable"]
val suspend : suspension_handler -> unit
(** [suspend h] jumps back to the nearest {!with_suspend}
and calls [h.handle] with the current continuation [k]
and a task runner function.
*)
[@@@endif]
val prepare_for_await : unit -> Dla_.t
(** Our stub for DLA. Unstable. *)
val with_suspend : run:(task -> unit) -> (unit -> unit) -> unit
(** [with_suspend ~run f] runs [f()] in an environment where [suspend]
will work. If [f()] suspends with suspension handler [h],
this calls [h ~run k] where [k] is the suspension.
This will not do anything on OCaml 4.x.
*)

View file

@ -0,0 +1,21 @@
(** Thread local storage *)
(* TODO: alias this to the library if present *)
type 'a key
(** A TLS key for values of type ['a]. This allows the
storage of a single value of type ['a] per thread. *)
val new_key : (unit -> 'a) -> 'a key
(** Allocate a new, generative key.
When the key is used for the first time on a thread,
the function is called to produce it.
This should only ever be called at toplevel to produce
constants, do not use it in a loop. *)
val get : 'a key -> 'a
(** Get the value for the current thread. *)
val set : 'a key -> 'a -> unit
(** Set the value for the current thread. *)

View file

@ -0,0 +1,82 @@
(* see: https://discuss.ocaml.org/t/a-hack-to-implement-efficient-tls-thread-local-storage/13264 *)
module A = Atomic_
(* sanity check *)
let () = assert (Obj.field (Obj.repr (Thread.self ())) 1 = Obj.repr ())
type 'a key = {
index: int; (** Unique index for this key. *)
compute: unit -> 'a;
(** Initializer for values for this key. Called at most
once per thread. *)
}
(** Counter used to allocate new keys *)
let counter = A.make 0
(** Value used to detect a TLS slot that was not initialized yet *)
let[@inline] sentinel_value_for_uninit_tls_ () : Obj.t = Obj.repr counter
let new_key compute : _ key =
let index = A.fetch_and_add counter 1 in
{ index; compute }
type thread_internal_state = {
_id: int; (** Thread ID (here for padding reasons) *)
mutable tls: Obj.t; (** Our data, stowed away in this unused field *)
}
(** A partial representation of the internal type [Thread.t], allowing
us to access the second field (unused after the thread
has started) and stash TLS data in it. *)
let ceil_pow_2_minus_1 (n : int) : int =
let n = n lor (n lsr 1) in
let n = n lor (n lsr 2) in
let n = n lor (n lsr 4) in
let n = n lor (n lsr 8) in
let n = n lor (n lsr 16) in
if Sys.int_size > 32 then
n lor (n lsr 32)
else
n
(** Grow the array so that [index] is valid. *)
let[@inline never] grow_tls (old : Obj.t array) (index : int) : Obj.t array =
let new_length = ceil_pow_2_minus_1 (index + 1) in
let new_ = Array.make new_length (sentinel_value_for_uninit_tls_ ()) in
Array.blit old 0 new_ 0 (Array.length old);
new_
let[@inline] get_tls_ (index : int) : Obj.t array =
let thread : thread_internal_state = Obj.magic (Thread.self ()) in
let tls = thread.tls in
if Obj.is_int tls then (
let new_tls = grow_tls [||] index in
thread.tls <- Obj.magic new_tls;
new_tls
) else (
let tls = (Obj.magic tls : Obj.t array) in
if index < Array.length tls then
tls
else (
let new_tls = grow_tls tls index in
thread.tls <- Obj.magic new_tls;
new_tls
)
)
let get key =
let tls = get_tls_ key.index in
let value = Array.unsafe_get tls key.index in
if value != sentinel_value_for_uninit_tls_ () then
Obj.magic value
else (
let value = key.compute () in
Array.unsafe_set tls key.index (Obj.repr (Sys.opaque_identity value));
value
)
let set key value =
let tls = get_tls_ key.index in
Array.unsafe_set tls key.index (Obj.repr (Sys.opaque_identity value))

View file

@ -0,0 +1,3 @@
(* just defer to library *)
include Thread_local_storage

View file

@ -1,5 +1,5 @@
let num_threads ?num_threads () : int =
let n_domains = Moonpool_dpool.max_number_of_domains () in
let n_domains = D_pool_.n_domains () in
(* number of threads to run *)
let num_threads =

View file

@ -1,4 +1,4 @@
module A = Atomic
module A = Atomic_
(* terminology:
@ -72,9 +72,7 @@ let push (self : 'a t) (x : 'a) : bool =
true
with Full -> false
exception Empty
let pop_exn (self : 'a t) : 'a =
let pop (self : 'a t) : 'a option =
let b = A.get self.bottom in
let b = b - 1 in
A.set self.bottom b;
@ -86,11 +84,11 @@ let pop_exn (self : 'a t) : 'a =
if size < 0 then (
(* reset to basic empty state *)
A.set self.bottom t;
raise_notrace Empty
None
) else if size > 0 then (
(* can pop without modifying [top] *)
let x = CA.get self.arr b in
x
Some x
) else (
assert (size = 0);
(* there was exactly one slot, so we might be racing against stealers
@ -98,18 +96,13 @@ let pop_exn (self : 'a t) : 'a =
if A.compare_and_set self.top t (t + 1) then (
let x = CA.get self.arr b in
A.set self.bottom (t + 1);
x
Some x
) else (
A.set self.bottom (t + 1);
raise_notrace Empty
None
)
)
let[@inline] pop self : _ option =
match pop_exn self with
| exception Empty -> None
| t -> Some t
let steal (self : 'a t) : 'a option =
(* read [top], but do not update [top_cached]
as we're in another thread *)

27
src/ws_deque_.mli Normal file
View file

@ -0,0 +1,27 @@
(** Work-stealing deque.
Adapted from "Dynamic circular work stealing deque", Chase & Lev.
However note that this one is not dynamic in the sense that there
is no resizing. Instead we return [false] when [push] fails, which
keeps the implementation fairly lightweight.
*)
type 'a t
(** Deque containing values of type ['a] *)
val create : dummy:'a -> unit -> 'a t
(** Create a new deque. *)
val push : 'a t -> 'a -> bool
(** Push value at the bottom of deque. returns [true] if it succeeds.
This must be called only by the owner thread. *)
val pop : 'a t -> 'a option
(** Pop value from the bottom of deque.
This must be called only by the owner thread. *)
val steal : 'a t -> 'a option
(** Try to steal from the top of deque. This is thread-safe. *)
val size : _ t -> int

337
src/ws_pool.ml Normal file
View file

@ -0,0 +1,337 @@
module WSQ = Ws_deque_
module A = Atomic_
module TLS = Thread_local_storage_
include Runner
let ( let@ ) = ( @@ )
module Id = struct
type t = unit ref
(** Unique identifier for a pool *)
let create () : t = Sys.opaque_identity (ref ())
let equal : t -> t -> bool = ( == )
end
type worker_state = {
pool_id_: Id.t; (** Unique per pool *)
mutable thread: Thread.t;
q: task WSQ.t; (** Work stealing queue *)
rng: Random.State.t;
}
(** State for a given worker. Only this worker is
allowed to push into the queue, but other workers
can come and steal from it if they're idle. *)
type around_task = AT_pair : (t -> 'a) * (t -> 'a -> unit) -> around_task
type state = {
id_: Id.t;
active: bool A.t; (** Becomes [false] when the pool is shutdown. *)
workers: worker_state array; (** Fixed set of workers. *)
main_q: task Queue.t; (** Main queue for tasks coming from the outside *)
mutable n_waiting: int; (* protected by mutex *)
mutable n_waiting_nonzero: bool; (** [n_waiting > 0] *)
mutex: Mutex.t;
cond: Condition.t;
on_exn: exn -> Printexc.raw_backtrace -> unit;
around_task: around_task;
}
(** internal state *)
let[@inline] size_ (self : state) = Array.length self.workers
let num_tasks_ (self : state) : int =
let n = ref 0 in
n := Queue.length self.main_q;
Array.iter (fun w -> n := !n + WSQ.size w.q) self.workers;
!n
(** TLS, used by worker to store their specific state
and be able to retrieve it from tasks when we schedule new
sub-tasks. *)
let k_worker_state : worker_state option ref TLS.key =
TLS.new_key (fun () -> ref None)
let[@inline] find_current_worker_ () : worker_state option =
!(TLS.get k_worker_state)
(** Try to wake up a waiter, if there's any. *)
let[@inline] try_wake_someone_ (self : state) : unit =
if self.n_waiting_nonzero then (
Mutex.lock self.mutex;
Condition.signal self.cond;
Mutex.unlock self.mutex
)
(** Run [task] as is, on the pool. *)
let schedule_task_ (self : state) (w : worker_state option) (task : task) : unit
=
(* Printf.printf "schedule task now (%d)\n%!" (Thread.id @@ Thread.self ()); *)
match w with
| Some w when Id.equal self.id_ w.pool_id_ ->
(* we're on this same pool, schedule in the worker's state. Otherwise
we might also be on pool A but asking to schedule on pool B,
so we have to check that identifiers match. *)
let pushed = WSQ.push w.q task in
if pushed then
try_wake_someone_ self
else (
(* overflow into main queue *)
Mutex.lock self.mutex;
Queue.push task self.main_q;
if self.n_waiting_nonzero then Condition.signal self.cond;
Mutex.unlock self.mutex
)
| _ ->
if A.get self.active then (
(* push into the main queue *)
Mutex.lock self.mutex;
Queue.push task self.main_q;
if self.n_waiting_nonzero then Condition.signal self.cond;
Mutex.unlock self.mutex
) else
(* notify the caller that scheduling tasks is no
longer permitted *)
raise Shutdown
(** Run this task, now. Must be called from a worker. *)
let run_task_now_ (self : state) ~runner task : unit =
(* Printf.printf "run task now (%d)\n%!" (Thread.id @@ Thread.self ()); *)
let (AT_pair (before_task, after_task)) = self.around_task in
let _ctx = before_task runner in
(* run the task now, catching errors *)
(try
(* run [task()] and handle [suspend] in it *)
Suspend_.with_suspend task ~run:(fun task' ->
let w = find_current_worker_ () in
schedule_task_ self w task')
with e ->
let bt = Printexc.get_raw_backtrace () in
self.on_exn e bt);
after_task runner _ctx
let[@inline] run_async_ (self : state) (task : task) : unit =
let w = find_current_worker_ () in
schedule_task_ self w task
(* TODO: function to schedule many tasks from the outside.
- build a queue
- lock
- queue transfer
- wakeup all (broadcast)
- unlock *)
let run = run_async
(** Wait on condition. Precondition: we hold the mutex. *)
let[@inline] wait_ (self : state) : unit =
self.n_waiting <- self.n_waiting + 1;
if self.n_waiting = 1 then self.n_waiting_nonzero <- true;
Condition.wait self.cond self.mutex;
self.n_waiting <- self.n_waiting - 1;
if self.n_waiting = 0 then self.n_waiting_nonzero <- false
exception Got_task of task
(** Try to steal a task *)
let try_to_steal_work_once_ (self : state) (w : worker_state) : task option =
let init = Random.State.int w.rng (Array.length self.workers) in
try
for i = 0 to Array.length self.workers - 1 do
let w' =
Array.unsafe_get self.workers ((i + init) mod Array.length self.workers)
in
if w != w' then (
match WSQ.steal w'.q with
| Some t -> raise_notrace (Got_task t)
| None -> ()
)
done;
None
with Got_task t -> Some t
(** Worker runs tasks from its queue until none remains *)
let worker_run_self_tasks_ (self : state) ~runner w : unit =
let continue = ref true in
while !continue && A.get self.active do
match WSQ.pop w.q with
| Some task ->
try_wake_someone_ self;
run_task_now_ self ~runner task
| None -> continue := false
done
(** Main loop for a worker thread. *)
let worker_thread_ (self : state) ~(runner : t) (w : worker_state) : unit =
TLS.get Runner.For_runner_implementors.k_cur_runner := Some runner;
TLS.get k_worker_state := Some w;
let rec main () : unit =
if A.get self.active then (
worker_run_self_tasks_ self ~runner w;
try_steal ()
)
and run_task task : unit =
run_task_now_ self ~runner task;
main ()
and try_steal () =
if A.get self.active then (
match try_to_steal_work_once_ self w with
| Some task -> run_task task
| None -> wait ()
)
and wait () =
Mutex.lock self.mutex;
match Queue.pop self.main_q with
| task ->
Mutex.unlock self.mutex;
run_task task
| exception Queue.Empty ->
(* wait here *)
if A.get self.active then wait_ self;
(* see if a task became available *)
let task = try Some (Queue.pop self.main_q) with Queue.Empty -> None in
Mutex.unlock self.mutex;
(match task with
| Some t -> run_task t
| None -> try_steal ())
in
(* handle domain-local await *)
Dla_.using ~prepare_for_await:Suspend_.prepare_for_await ~while_running:main
let default_thread_init_exit_ ~dom_id:_ ~t_id:_ () = ()
let shutdown_ ~wait (self : state) : unit =
if A.exchange self.active false then (
Mutex.lock self.mutex;
Condition.broadcast self.cond;
Mutex.unlock self.mutex;
if wait then Array.iter (fun w -> Thread.join w.thread) self.workers
)
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?around_task:(t -> 'b) * (t -> 'b -> unit) ->
?num_threads:int ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
let dummy_task_ () = assert false
let create ?(on_init_thread = default_thread_init_exit_)
?(on_exit_thread = default_thread_init_exit_) ?(on_exn = fun _ _ -> ())
?around_task ?num_threads () : t =
let pool_id_ = Id.create () in
(* wrapper *)
let around_task =
match around_task with
| Some (f, g) -> AT_pair (f, g)
| None -> AT_pair (ignore, fun _ _ -> ())
in
let num_domains = D_pool_.n_domains () in
let num_threads = Util_pool_.num_threads ?num_threads () in
(* make sure we don't bias towards the first domain(s) in {!D_pool_} *)
let offset = Random.int num_domains in
let workers : worker_state array =
let dummy = Thread.self () in
Array.init num_threads (fun i ->
{
pool_id_;
thread = dummy;
q = WSQ.create ~dummy:dummy_task_ ();
rng = Random.State.make [| i |];
})
in
let pool =
{
id_ = pool_id_;
active = A.make true;
workers;
main_q = Queue.create ();
n_waiting = 0;
n_waiting_nonzero = true;
mutex = Mutex.create ();
cond = Condition.create ();
around_task;
on_exn;
}
in
let runner =
Runner.For_runner_implementors.create
~shutdown:(fun ~wait () -> shutdown_ pool ~wait)
~run_async:(fun f -> run_async_ pool f)
~size:(fun () -> size_ pool)
~num_tasks:(fun () -> num_tasks_ pool)
()
in
(* temporary queue used to obtain thread handles from domains
on which the thread are started. *)
let receive_threads = Bb_queue.create () in
(* start the thread with index [i] *)
let start_thread_with_idx i =
let w = pool.workers.(i) in
let dom_idx = (offset + i) mod num_domains in
(* function run in the thread itself *)
let main_thread_fun () : unit =
let thread = Thread.self () in
let t_id = Thread.id thread in
on_init_thread ~dom_id:dom_idx ~t_id ();
let run () = worker_thread_ pool ~runner w in
(* now run the main loop *)
Fun.protect run ~finally:(fun () ->
(* on termination, decrease refcount of underlying domain *)
D_pool_.decr_on dom_idx);
on_exit_thread ~dom_id:dom_idx ~t_id ()
in
(* function called in domain with index [i], to
create the thread and push it into [receive_threads] *)
let create_thread_in_domain () =
let thread = Thread.create main_thread_fun () in
(* send the thread from the domain back to us *)
Bb_queue.push receive_threads (i, thread)
in
D_pool_.run_on dom_idx create_thread_in_domain
in
(* start all threads, placing them on the domains
according to their index and [offset] in a round-robin fashion. *)
for i = 0 to num_threads - 1 do
start_thread_with_idx i
done;
(* receive the newly created threads back from domains *)
for _j = 1 to num_threads do
let i, th = Bb_queue.pop receive_threads in
let worker_state = pool.workers.(i) in
worker_state.thread <- th
done;
runner
let with_ ?on_init_thread ?on_exit_thread ?on_exn ?around_task ?num_threads () f
=
let pool =
create ?on_init_thread ?on_exit_thread ?on_exn ?around_task ?num_threads ()
in
let@ () = Fun.protect ~finally:(fun () -> shutdown pool) in
f pool

59
src/ws_pool.mli Normal file
View file

@ -0,0 +1,59 @@
(** Work-stealing thread pool.
A pool of threads with a worker-stealing scheduler.
The pool contains a fixed number of threads that wait for work
items to come, process these, and loop.
This is good for CPU-intensive tasks that feature a lot of small tasks.
Note that tasks will not always be processed in the order they are
scheduled, so this is not great for workloads where the latency
of individual tasks matter (for that see {!Fifo_pool}).
This implements {!Runner.t} since 0.3.
If a pool is no longer needed, {!shutdown} can be used to signal all threads
in it to stop (after they finish their work), and wait for them to stop.
The threads are distributed across a fixed domain pool
(whose size is determined by {!Domain.recommended_domain_count} on OCaml 5,
and simply the single runtime on OCaml 4).
*)
include module type of Runner
type ('a, 'b) create_args =
?on_init_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exit_thread:(dom_id:int -> t_id:int -> unit -> unit) ->
?on_exn:(exn -> Printexc.raw_backtrace -> unit) ->
?around_task:(t -> 'b) * (t -> 'b -> unit) ->
?num_threads:int ->
'a
(** Arguments used in {!create}. See {!create} for explanations. *)
val create : (unit -> t, _) create_args
(** [create ()] makes a new thread pool.
@param on_init_thread called at the beginning of each new thread
in the pool.
@param num_threads size of the pool, ie. number of worker threads.
It will be at least [1] internally, so [0] or negative values make no sense.
The default is [Domain.recommended_domain_count()], ie one worker
thread per CPU core.
On OCaml 4 the default is [4] (since there is only one domain).
@param on_exit_thread called at the end of each thread in the pool
@param around_task a pair of [before, after], where [before pool] is called
before a task is processed,
on the worker thread about to run it, and returns [x]; and [after pool x] is called by
the same thread after the task is over. (since 0.2)
*)
val with_ : (unit -> (t -> 'a) -> 'a, _) create_args
(** [with_ () f] calls [f pool], where [pool] is obtained via {!create}.
When [f pool] returns or fails, [pool] is shutdown and its resources
are released.
Most parameters are the same as in {!create}.
@since 0.3 *)
val run : t -> (unit -> unit) -> unit
[@@deprecated "use run_async"]
(** deprecated alias to {!run_async} *)

Some files were not shown because too many files have changed in this diff Show more