src/corosio/src/detail/timer_service.cpp

86.2% Lines (301/349) 92.9% Functions (39/42)
src/corosio/src/detail/timer_service.cpp
Line Hits Source Code
1 //
2 // Copyright (c) 2026 Steve Gerbino
3 //
4 // Distributed under the Boost Software License, Version 1.0. (See accompanying
5 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // Official repository: https://github.com/cppalliance/corosio
8 //
9
10 #include "src/detail/timer_service.hpp"
11 #include "src/detail/scheduler_impl.hpp"
12
13 #include <boost/corosio/basic_io_context.hpp>
14 #include <boost/corosio/detail/thread_local_ptr.hpp>
15 #include "src/detail/scheduler_op.hpp"
16 #include "src/detail/intrusive.hpp"
17 #include <boost/capy/error.hpp>
18 #include <boost/capy/ex/executor_ref.hpp>
19 #include <system_error>
20
21 #include <atomic>
22 #include <coroutine>
23 #include <limits>
24 #include <mutex>
25 #include <optional>
26 #include <stop_token>
27 #include <vector>
28
29 /*
30 Timer Service
31 =============
32
33 The public timer class holds an opaque implementation* and forwards
34 all operations through extern free functions defined at the bottom
35 of this file.
36
37 Data Structures
38 ---------------
39 waiter_node holds per-waiter state: coroutine handle, executor,
40 error output, stop_token, embedded completion_op. Each concurrent
41 co_await t.wait() allocates one waiter_node.
42
43 implementation holds per-timer state: expiry, heap index, and an
44 intrusive_list of waiter_nodes. Multiple coroutines can wait on
45 the same timer simultaneously.
46
47 timer_service_impl owns a min-heap of active timers, a free list
48 of recycled impls, and a free list of recycled waiter_nodes. The
49 heap is ordered by expiry time; the scheduler queries
50 nearest_expiry() to set the epoll/timerfd timeout.
51
52 Optimization Strategy
53 ---------------------
54 The common timer lifecycle is: construct, set expiry, cancel or
55 wait, destroy. Several optimizations target this path:
56
57 1. Deferred heap insertion — expires_after() stores the expiry
58 but does not insert into the heap. Insertion happens in
59 wait(). If the timer is cancelled or destroyed before wait(),
60 the heap is never touched and no mutex is taken. This also
61 enables the already-expired fast path: when wait() sees
62 expiry <= now before inserting, it posts the coroutine
63 handle to the executor and returns noop_coroutine — no
64 heap, no mutex, no epoll. This is only possible because
65 the coroutine API guarantees wait() always follows
66 expires_after(); callback APIs cannot assume this call
67 order.
68
69 2. Thread-local impl cache — A single-slot per-thread cache of
70 implementation avoids the mutex on create/destroy for the common
71 create-then-destroy-on-same-thread pattern. On pop, if the
72 cached impl's svc_ doesn't match the current service, the
73 stale impl is deleted eagerly rather than reused.
74
75 3. Embedded completion_op — Each waiter_node embeds a
76 scheduler_op subclass, eliminating heap allocation per
77 fire/cancel. Its destroy() is a no-op since the waiter_node
78 owns the lifetime.
79
80 4. Cached nearest expiry — An atomic<int64_t> mirrors the heap
81 root's time, updated under the lock. nearest_expiry() and
82 empty() read the atomic without locking.
83
84 5. might_have_pending_waits_ flag — Set on wait(), cleared on
85 cancel. Lets cancel_timer() return without locking when no
86 wait was ever issued.
87
88 6. Thread-local waiter cache — Single-slot per-thread cache of
89 waiter_node avoids the free-list mutex for the common
90 wait-then-complete-on-same-thread pattern.
91
92 With all fast paths hit (idle timer, same thread), the
93 schedule/cancel cycle takes zero mutex locks.
94
95 Concurrency
96 -----------
97 stop_token callbacks can fire from any thread. The impl_
98 pointer on waiter_node is used as a "still in list" marker:
99 set to nullptr under the mutex when a waiter is removed by
100 cancel_timer() or process_expired(). cancel_waiter() checks
101 this under the mutex to avoid double-removal races.
102
103 Multiple io_contexts in the same program are safe. The
104 service pointer is obtained directly from the scheduler,
105 and TL-cached impls are validated by comparing svc_ against
106 the current service pointer. Waiter nodes have no service
107 affinity and can safely migrate between contexts.
108 */
109
110 namespace boost::corosio::detail {
111
112 class timer_service_impl;
113 struct implementation;
114 struct waiter_node;
115
116 void timer_service_invalidate_cache() noexcept;
117
118 struct waiter_node : intrusive_list<waiter_node>::node
119 {
120 // Embedded completion op — avoids heap allocation per fire/cancel
121 struct completion_op final : scheduler_op
122 {
123 waiter_node* waiter_ = nullptr;
124
125 static void do_complete(
126 void* owner, scheduler_op* base, std::uint32_t, std::uint32_t);
127
128 142 completion_op() noexcept : scheduler_op(&do_complete) {}
129
130 void operator()() override;
131 // No-op — lifetime owned by waiter_node, not the scheduler queue
132 void destroy() override {}
133 };
134
135 // Per-waiter stop_token cancellation
136 struct canceller
137 {
138 waiter_node* waiter_;
139 void operator()() const;
140 };
141
142 // nullptr once removed from timer's waiter list (concurrency marker)
143 implementation* impl_ = nullptr;
144 timer_service_impl* svc_ = nullptr;
145 std::coroutine_handle<> h_;
146 capy::executor_ref d_;
147 std::error_code* ec_out_ = nullptr;
148 std::stop_token token_;
149 std::optional<std::stop_callback<canceller>> stop_cb_;
150 completion_op op_;
151 std::error_code ec_value_;
152 waiter_node* next_free_ = nullptr;
153
154 142 waiter_node() noexcept
155 142 {
156 142 op_.waiter_ = this;
157 142 }
158 };
159
160 struct implementation final : timer::implementation
161 {
162 using clock_type = std::chrono::steady_clock;
163 using time_point = clock_type::time_point;
164 using duration = clock_type::duration;
165
166 timer_service_impl* svc_ = nullptr;
167 intrusive_list<waiter_node> waiters_;
168
169 // Free list linkage (reused when impl is on free_list)
170 implementation* next_free_ = nullptr;
171
172 explicit implementation(timer_service_impl& svc) noexcept;
173
174 std::coroutine_handle<> wait(
175 std::coroutine_handle<>,
176 capy::executor_ref,
177 std::stop_token,
178 std::error_code*) override;
179 };
180
181 implementation* try_pop_tl_cache(timer_service_impl*) noexcept;
182 bool try_push_tl_cache(implementation*) noexcept;
183 waiter_node* try_pop_waiter_tl_cache() noexcept;
184 bool try_push_waiter_tl_cache(waiter_node*) noexcept;
185
186 class timer_service_impl final : public timer_service
187 {
188 public:
189 using clock_type = std::chrono::steady_clock;
190 using time_point = clock_type::time_point;
191 using key_type = timer_service;
192
193 private:
194 struct heap_entry
195 {
196 time_point time_;
197 implementation* timer_;
198 };
199
200 scheduler* sched_ = nullptr;
201 mutable std::mutex mutex_;
202 std::vector<heap_entry> heap_;
203 implementation* free_list_ = nullptr;
204 waiter_node* waiter_free_list_ = nullptr;
205 callback on_earliest_changed_;
206 // Avoids mutex in nearest_expiry() and empty()
207 mutable std::atomic<std::int64_t> cached_nearest_ns_{
208 (std::numeric_limits<std::int64_t>::max)()};
209
210 public:
211 336 timer_service_impl(capy::execution_context&, scheduler& sched)
212 336 : timer_service()
213 336 , sched_(&sched)
214 {
215 336 }
216
217 17672 scheduler& get_scheduler() noexcept
218 {
219 17672 return *sched_;
220 }
221
222 672 ~timer_service_impl() override = default;
223
224 timer_service_impl(timer_service_impl const&) = delete;
225 timer_service_impl& operator=(timer_service_impl const&) = delete;
226
227 336 void set_on_earliest_changed(callback cb) override
228 {
229 336 on_earliest_changed_ = cb;
230 336 }
231
232 336 void shutdown() override
233 {
234 336 timer_service_invalidate_cache();
235
236 // Cancel waiting timers still in the heap
237 336 for (auto& entry : heap_)
238 {
239 auto* impl = entry.timer_;
240 while (auto* w = impl->waiters_.pop_front())
241 {
242 w->stop_cb_.reset();
243 w->h_.destroy();
244 sched_->work_finished();
245 delete w;
246 }
247 impl->heap_index_ = (std::numeric_limits<std::size_t>::max)();
248 delete impl;
249 }
250 336 heap_.clear();
251 336 cached_nearest_ns_.store(
252 (std::numeric_limits<std::int64_t>::max)(),
253 std::memory_order_release);
254
255 // Delete free-listed impls
256 384 while (free_list_)
257 {
258 48 auto* next = free_list_->next_free_;
259 48 delete free_list_;
260 48 free_list_ = next;
261 }
262
263 // Delete free-listed waiters
264 394 while (waiter_free_list_)
265 {
266 58 auto* next = waiter_free_list_->next_free_;
267 58 delete waiter_free_list_;
268 58 waiter_free_list_ = next;
269 }
270 336 }
271
272 9122 io_object::implementation* construct() override
273 {
274 9122 implementation* impl = try_pop_tl_cache(this);
275 9122 if (impl)
276 {
277 8949 impl->svc_ = this;
278 8949 impl->heap_index_ = (std::numeric_limits<std::size_t>::max)();
279 8949 impl->might_have_pending_waits_ = false;
280 8949 return impl;
281 }
282
283 173 std::lock_guard lock(mutex_);
284 173 if (free_list_)
285 {
286 impl = free_list_;
287 free_list_ = impl->next_free_;
288 impl->next_free_ = nullptr;
289 impl->svc_ = this;
290 impl->heap_index_ = (std::numeric_limits<std::size_t>::max)();
291 impl->might_have_pending_waits_ = false;
292 }
293 else
294 {
295 173 impl = new implementation(*this);
296 }
297 173 return impl;
298 173 }
299
300 9122 void destroy(io_object::implementation* p) override
301 {
302 9122 destroy_impl(static_cast<implementation&>(*p));
303 9122 }
304
305 9122 void destroy_impl(implementation& impl)
306 {
307 9122 cancel_timer(impl);
308
309 9122 if (impl.heap_index_ != (std::numeric_limits<std::size_t>::max)())
310 {
311 std::lock_guard lock(mutex_);
312 remove_timer_impl(impl);
313 refresh_cached_nearest();
314 }
315
316 9122 if (try_push_tl_cache(&impl))
317 9074 return;
318
319 48 std::lock_guard lock(mutex_);
320 48 impl.next_free_ = free_list_;
321 48 free_list_ = &impl;
322 48 }
323
324 8836 waiter_node* create_waiter()
325 {
326 8836 if (auto* w = try_pop_waiter_tl_cache())
327 8694 return w;
328
329 142 std::lock_guard lock(mutex_);
330 142 if (waiter_free_list_)
331 {
332 auto* w = waiter_free_list_;
333 waiter_free_list_ = w->next_free_;
334 w->next_free_ = nullptr;
335 return w;
336 }
337
338 142 return new waiter_node();
339 142 }
340
341 8836 void destroy_waiter(waiter_node* w)
342 {
343 8836 if (try_push_waiter_tl_cache(w))
344 8778 return;
345
346 58 std::lock_guard lock(mutex_);
347 58 w->next_free_ = waiter_free_list_;
348 58 waiter_free_list_ = w;
349 58 }
350
351 // Heap insertion deferred to wait() — avoids lock when timer is idle
352 6 std::size_t update_timer(implementation& impl, time_point new_time)
353 {
354 bool in_heap =
355 6 (impl.heap_index_ != (std::numeric_limits<std::size_t>::max)());
356 6 if (!in_heap && impl.waiters_.empty())
357 return 0;
358
359 6 bool notify = false;
360 6 intrusive_list<waiter_node> canceled;
361
362 {
363 6 std::lock_guard lock(mutex_);
364
365 16 while (auto* w = impl.waiters_.pop_front())
366 {
367 10 w->impl_ = nullptr;
368 10 canceled.push_back(w);
369 10 }
370
371 6 if (impl.heap_index_ < heap_.size())
372 {
373 6 time_point old_time = heap_[impl.heap_index_].time_;
374 6 heap_[impl.heap_index_].time_ = new_time;
375
376 6 if (new_time < old_time)
377 6 up_heap(impl.heap_index_);
378 else
379 down_heap(impl.heap_index_);
380
381 6 notify = (impl.heap_index_ == 0);
382 }
383
384 6 refresh_cached_nearest();
385 6 }
386
387 6 std::size_t count = 0;
388 16 while (auto* w = canceled.pop_front())
389 {
390 10 w->ec_value_ = make_error_code(capy::error::canceled);
391 10 sched_->post(&w->op_);
392 10 ++count;
393 10 }
394
395 6 if (notify)
396 6 on_earliest_changed_();
397
398 6 return count;
399 }
400
401 // Inserts timer into heap if needed and pushes waiter, all under
402 // one lock to prevent races with cancel_waiter/process_expired
403 8836 void insert_waiter(implementation& impl, waiter_node* w)
404 {
405 8836 bool notify = false;
406 {
407 8836 std::lock_guard lock(mutex_);
408 8836 if (impl.heap_index_ == (std::numeric_limits<std::size_t>::max)())
409 {
410 8814 impl.heap_index_ = heap_.size();
411 8814 heap_.push_back({impl.expiry_, &impl});
412 8814 up_heap(heap_.size() - 1);
413 8814 notify = (impl.heap_index_ == 0);
414 8814 refresh_cached_nearest();
415 }
416 8836 impl.waiters_.push_back(w);
417 8836 }
418 8836 if (notify)
419 8802 on_earliest_changed_();
420 8836 }
421
422 9130 std::size_t cancel_timer(implementation& impl)
423 {
424 9130 if (!impl.might_have_pending_waits_)
425 9114 return 0;
426
427 // Not in heap and no waiters — just clear the flag
428 16 if (impl.heap_index_ == (std::numeric_limits<std::size_t>::max)() &&
429 impl.waiters_.empty())
430 {
431 impl.might_have_pending_waits_ = false;
432 return 0;
433 }
434
435 16 intrusive_list<waiter_node> canceled;
436
437 {
438 16 std::lock_guard lock(mutex_);
439 16 remove_timer_impl(impl);
440 36 while (auto* w = impl.waiters_.pop_front())
441 {
442 20 w->impl_ = nullptr;
443 20 canceled.push_back(w);
444 20 }
445 16 refresh_cached_nearest();
446 16 }
447
448 16 impl.might_have_pending_waits_ = false;
449
450 16 std::size_t count = 0;
451 36 while (auto* w = canceled.pop_front())
452 {
453 20 w->ec_value_ = make_error_code(capy::error::canceled);
454 20 sched_->post(&w->op_);
455 20 ++count;
456 20 }
457
458 16 return count;
459 }
460
461 // Cancel a single waiter (called from stop_token callback, any thread)
462 4 void cancel_waiter(waiter_node* w)
463 {
464 {
465 4 std::lock_guard lock(mutex_);
466 // Already removed by cancel_timer or process_expired
467 4 if (!w->impl_)
468 return;
469 4 auto* impl = w->impl_;
470 4 w->impl_ = nullptr;
471 4 impl->waiters_.remove(w);
472 4 if (impl->waiters_.empty())
473 {
474 2 remove_timer_impl(*impl);
475 2 impl->might_have_pending_waits_ = false;
476 }
477 4 refresh_cached_nearest();
478 4 }
479
480 4 w->ec_value_ = make_error_code(capy::error::canceled);
481 4 sched_->post(&w->op_);
482 }
483
484 // Cancel front waiter only (FIFO), return 0 or 1
485 2 std::size_t cancel_one_waiter(implementation& impl)
486 {
487 2 if (!impl.might_have_pending_waits_)
488 return 0;
489
490 2 waiter_node* w = nullptr;
491
492 {
493 2 std::lock_guard lock(mutex_);
494 2 w = impl.waiters_.pop_front();
495 2 if (!w)
496 return 0;
497 2 w->impl_ = nullptr;
498 2 if (impl.waiters_.empty())
499 {
500 remove_timer_impl(impl);
501 impl.might_have_pending_waits_ = false;
502 }
503 2 refresh_cached_nearest();
504 2 }
505
506 2 w->ec_value_ = make_error_code(capy::error::canceled);
507 2 sched_->post(&w->op_);
508 2 return 1;
509 }
510
511 bool empty() const noexcept override
512 {
513 return cached_nearest_ns_.load(std::memory_order_acquire) ==
514 (std::numeric_limits<std::int64_t>::max)();
515 }
516
517 20980 time_point nearest_expiry() const noexcept override
518 {
519 20980 auto ns = cached_nearest_ns_.load(std::memory_order_acquire);
520 20980 return time_point(time_point::duration(ns));
521 }
522
523 144388 std::size_t process_expired() override
524 {
525 144388 intrusive_list<waiter_node> expired;
526
527 {
528 144388 std::lock_guard lock(mutex_);
529 144388 auto now = clock_type::now();
530
531 153184 while (!heap_.empty() && heap_[0].time_ <= now)
532 {
533 8796 implementation* t = heap_[0].timer_;
534 8796 remove_timer_impl(*t);
535 17596 while (auto* w = t->waiters_.pop_front())
536 {
537 8800 w->impl_ = nullptr;
538 8800 w->ec_value_ = {};
539 8800 expired.push_back(w);
540 8800 }
541 8796 t->might_have_pending_waits_ = false;
542 }
543
544 144388 refresh_cached_nearest();
545 144388 }
546
547 144388 std::size_t count = 0;
548 153188 while (auto* w = expired.pop_front())
549 {
550 8800 sched_->post(&w->op_);
551 8800 ++count;
552 8800 }
553
554 144388 return count;
555 }
556
557 private:
558 153230 void refresh_cached_nearest() noexcept
559 {
560 153230 auto ns = heap_.empty() ? (std::numeric_limits<std::int64_t>::max)()
561 152848 : heap_[0].time_.time_since_epoch().count();
562 153230 cached_nearest_ns_.store(ns, std::memory_order_release);
563 153230 }
564
565 8814 void remove_timer_impl(implementation& impl)
566 {
567 8814 std::size_t index = impl.heap_index_;
568 8814 if (index >= heap_.size())
569 return; // Not in heap
570
571 8814 if (index == heap_.size() - 1)
572 {
573 // Last element, just pop
574 103 impl.heap_index_ = (std::numeric_limits<std::size_t>::max)();
575 103 heap_.pop_back();
576 }
577 else
578 {
579 // Swap with last and reheapify
580 8711 swap_heap(index, heap_.size() - 1);
581 8711 impl.heap_index_ = (std::numeric_limits<std::size_t>::max)();
582 8711 heap_.pop_back();
583
584 8711 if (index > 0 && heap_[index].time_ < heap_[(index - 1) / 2].time_)
585 up_heap(index);
586 else
587 8711 down_heap(index);
588 }
589 }
590
591 8820 void up_heap(std::size_t index)
592 {
593 17521 while (index > 0)
594 {
595 8713 std::size_t parent = (index - 1) / 2;
596 8713 if (!(heap_[index].time_ < heap_[parent].time_))
597 12 break;
598 8701 swap_heap(index, parent);
599 8701 index = parent;
600 }
601 8820 }
602
603 8711 void down_heap(std::size_t index)
604 {
605 8711 std::size_t child = index * 2 + 1;
606 8711 while (child < heap_.size())
607 {
608 std::size_t min_child =
609 4 (child + 1 == heap_.size() ||
610 heap_[child].time_ < heap_[child + 1].time_)
611 4 ? child
612 4 : child + 1;
613
614 4 if (heap_[index].time_ < heap_[min_child].time_)
615 4 break;
616
617 swap_heap(index, min_child);
618 index = min_child;
619 child = index * 2 + 1;
620 }
621 8711 }
622
623 17412 void swap_heap(std::size_t i1, std::size_t i2)
624 {
625 17412 heap_entry tmp = heap_[i1];
626 17412 heap_[i1] = heap_[i2];
627 17412 heap_[i2] = tmp;
628 17412 heap_[i1].timer_->heap_index_ = i1;
629 17412 heap_[i2].timer_->heap_index_ = i2;
630 17412 }
631 };
632
633 173 implementation::implementation(timer_service_impl& svc) noexcept : svc_(&svc) {}
634
635 void
636 4 waiter_node::canceller::operator()() const
637 {
638 4 waiter_->svc_->cancel_waiter(waiter_);
639 4 }
640
641 void
642 waiter_node::completion_op::do_complete(
643 void* owner, scheduler_op* base, std::uint32_t, std::uint32_t)
644 {
645 if (!owner)
646 return;
647 static_cast<completion_op*>(base)->operator()();
648 }
649
650 void
651 8836 waiter_node::completion_op::operator()()
652 {
653 8836 auto* w = waiter_;
654 8836 w->stop_cb_.reset();
655 8836 if (w->ec_out_)
656 8836 *w->ec_out_ = w->ec_value_;
657
658 8836 auto h = w->h_;
659 8836 auto d = w->d_;
660 8836 auto* svc = w->svc_;
661 8836 auto& sched = svc->get_scheduler();
662
663 8836 svc->destroy_waiter(w);
664
665 8836 d.post(h);
666 8836 sched.work_finished();
667 8836 }
668
669 std::coroutine_handle<>
670 8851 implementation::wait(
671 std::coroutine_handle<> h,
672 capy::executor_ref d,
673 std::stop_token token,
674 std::error_code* ec)
675 {
676 // Already-expired fast path — no waiter_node, no mutex.
677 // Post instead of dispatch so the coroutine yields to the
678 // scheduler, allowing other queued work to run.
679 8851 if (heap_index_ == (std::numeric_limits<std::size_t>::max)())
680 {
681 8829 if (expiry_ == (time_point::min)() || expiry_ <= clock_type::now())
682 {
683 15 if (ec)
684 15 *ec = {};
685 15 d.post(h);
686 15 return std::noop_coroutine();
687 }
688 }
689
690 8836 auto* w = svc_->create_waiter();
691 8836 w->impl_ = this;
692 8836 w->svc_ = svc_;
693 8836 w->h_ = h;
694 8836 w->d_ = d;
695 8836 w->token_ = std::move(token);
696 8836 w->ec_out_ = ec;
697
698 8836 svc_->insert_waiter(*this, w);
699 8836 might_have_pending_waits_ = true;
700 8836 svc_->get_scheduler().work_started();
701
702 8836 if (w->token_.stop_possible())
703 4 w->stop_cb_.emplace(w->token_, waiter_node::canceller{w});
704
705 8836 return std::noop_coroutine();
706 }
707
708 // Extern free functions called from timer.cpp
709 //
710 // Two thread-local caches avoid hot-path mutex acquisitions:
711 //
712 // 1. Impl cache — single-slot, validated by comparing svc_ on the
713 // impl against the current service pointer.
714 //
715 // 2. Waiter cache — single-slot, no service affinity.
716 //
717 // The service pointer is obtained from the scheduler_impl's
718 // timer_svc_ member, avoiding find_service() on the hot path.
719 // All caches are cleared by timer_service_invalidate_cache()
720 // during shutdown.
721
722 thread_local_ptr<implementation> tl_cached_impl;
723 thread_local_ptr<waiter_node> tl_cached_waiter;
724
725 implementation*
726 9122 try_pop_tl_cache(timer_service_impl* svc) noexcept
727 {
728 9122 auto* impl = tl_cached_impl.get();
729 9122 if (impl)
730 {
731 8949 tl_cached_impl.set(nullptr);
732 8949 if (impl->svc_ == svc)
733 8949 return impl;
734 // Stale impl from a destroyed service
735 delete impl;
736 }
737 173 return nullptr;
738 }
739
740 bool
741 9122 try_push_tl_cache(implementation* impl) noexcept
742 {
743 9122 if (!tl_cached_impl.get())
744 {
745 9074 tl_cached_impl.set(impl);
746 9074 return true;
747 }
748 48 return false;
749 }
750
751 waiter_node*
752 8836 try_pop_waiter_tl_cache() noexcept
753 {
754 8836 auto* w = tl_cached_waiter.get();
755 8836 if (w)
756 {
757 8694 tl_cached_waiter.set(nullptr);
758 8694 return w;
759 }
760 142 return nullptr;
761 }
762
763 bool
764 8836 try_push_waiter_tl_cache(waiter_node* w) noexcept
765 {
766 8836 if (!tl_cached_waiter.get())
767 {
768 8778 tl_cached_waiter.set(w);
769 8778 return true;
770 }
771 58 return false;
772 }
773
774 void
775 336 timer_service_invalidate_cache() noexcept
776 {
777 336 delete tl_cached_impl.get();
778 336 tl_cached_impl.set(nullptr);
779
780 336 delete tl_cached_waiter.get();
781 336 tl_cached_waiter.set(nullptr);
782 336 }
783
784 struct timer_service_access
785 {
786 9122 static scheduler_impl& get_scheduler(basic_io_context& ctx) noexcept
787 {
788 9122 return static_cast<scheduler_impl&>(*ctx.sched_);
789 }
790 };
791
792 // Bypass find_service() mutex by reading the scheduler's cached pointer
793 io_object::io_service&
794 9122 timer_service_direct(capy::execution_context& ctx) noexcept
795 {
796 9122 return *timer_service_access::get_scheduler(
797 9122 static_cast<basic_io_context&>(ctx)).timer_svc_;
798 }
799
800 std::size_t
801 6 timer_service_update_expiry(timer::implementation& base)
802 {
803 6 auto& impl = static_cast<implementation&>(base);
804 6 return impl.svc_->update_timer(impl, impl.expiry_);
805 }
806
807 std::size_t
808 8 timer_service_cancel(timer::implementation& base) noexcept
809 {
810 8 auto& impl = static_cast<implementation&>(base);
811 8 return impl.svc_->cancel_timer(impl);
812 }
813
814 std::size_t
815 2 timer_service_cancel_one(timer::implementation& base) noexcept
816 {
817 2 auto& impl = static_cast<implementation&>(base);
818 2 return impl.svc_->cancel_one_waiter(impl);
819 }
820
821 timer_service&
822 336 get_timer_service(capy::execution_context& ctx, scheduler& sched)
823 {
824 336 return ctx.make_service<timer_service_impl>(sched);
825 }
826
827 } // namespace boost::corosio::detail
828