src/corosio/src/detail/select/sockets.cpp

75.3% Lines (295/392) 94.3% Functions (33/35)
src/corosio/src/detail/select/sockets.cpp
Line Hits Source Code
1 //
2 // Copyright (c) 2026 Steve Gerbino
3 //
4 // Distributed under the Boost Software License, Version 1.0. (See accompanying
5 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // Official repository: https://github.com/cppalliance/corosio
8 //
9
10 #include <boost/corosio/detail/platform.hpp>
11
12 #if BOOST_COROSIO_HAS_SELECT
13
14 #include "src/detail/select/sockets.hpp"
15 #include "src/detail/endpoint_convert.hpp"
16 #include "src/detail/dispatch_coro.hpp"
17 #include "src/detail/make_err.hpp"
18
19 #include <boost/corosio/detail/except.hpp>
20
21 #include <boost/capy/buffers.hpp>
22
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <netinet/in.h>
26 #include <netinet/tcp.h>
27 #include <sys/socket.h>
28 #include <unistd.h>
29
30 namespace boost::corosio::detail {
31
32 void
33 99 select_op::canceller::operator()() const noexcept
34 {
35 99 op->cancel();
36 99 }
37
38 void
39 select_connect_op::cancel() noexcept
40 {
41 if (socket_impl_)
42 socket_impl_->cancel_single_op(*this);
43 else
44 request_cancel();
45 }
46
47 void
48 99 select_read_op::cancel() noexcept
49 {
50 99 if (socket_impl_)
51 99 socket_impl_->cancel_single_op(*this);
52 else
53 request_cancel();
54 99 }
55
56 void
57 select_write_op::cancel() noexcept
58 {
59 if (socket_impl_)
60 socket_impl_->cancel_single_op(*this);
61 else
62 request_cancel();
63 }
64
65 void
66 3562 select_connect_op::operator()()
67 {
68 3562 stop_cb.reset();
69
70 3562 bool success = (errn == 0 && !cancelled.load(std::memory_order_acquire));
71
72 // Cache endpoints on successful connect
73 3562 if (success && socket_impl_)
74 {
75 // Query local endpoint via getsockname (may fail, but remote is always known)
76 3561 endpoint local_ep;
77 3561 sockaddr_in local_addr{};
78 3561 socklen_t local_len = sizeof(local_addr);
79 3561 if (::getsockname(
80 3561 fd, reinterpret_cast<sockaddr*>(&local_addr), &local_len) == 0)
81 3561 local_ep = from_sockaddr_in(local_addr);
82 // Always cache remote endpoint; local may be default if getsockname failed
83 3561 static_cast<select_socket_impl*>(socket_impl_)
84 3561 ->set_endpoints(local_ep, target_endpoint);
85 }
86
87 3562 if (ec_out)
88 {
89 3562 if (cancelled.load(std::memory_order_acquire))
90 *ec_out = capy::error::canceled;
91 3562 else if (errn != 0)
92 1 *ec_out = make_err(errn);
93 else
94 3561 *ec_out = {};
95 }
96
97 3562 if (bytes_out)
98 *bytes_out = bytes_transferred;
99
100 // Move to stack before destroying the frame
101 3562 capy::executor_ref saved_ex(ex);
102 3562 std::coroutine_handle<> saved_h(h);
103 3562 impl_ptr.reset();
104 3562 dispatch_coro(saved_ex, saved_h).resume();
105 3562 }
106
107 10702 select_socket_impl::select_socket_impl(select_socket_service& svc) noexcept
108 10702 : svc_(svc)
109 {
110 10702 }
111
112 std::coroutine_handle<>
113 3562 select_socket_impl::connect(
114 std::coroutine_handle<> h,
115 capy::executor_ref ex,
116 endpoint ep,
117 std::stop_token token,
118 std::error_code* ec)
119 {
120 3562 auto& op = conn_;
121 3562 op.reset();
122 3562 op.h = h;
123 3562 op.ex = ex;
124 3562 op.ec_out = ec;
125 3562 op.fd = fd_;
126 3562 op.target_endpoint = ep; // Store target for endpoint caching
127 3562 op.start(token, this);
128
129 3562 sockaddr_in addr = detail::to_sockaddr_in(ep);
130 int result =
131 3562 ::connect(fd_, reinterpret_cast<sockaddr*>(&addr), sizeof(addr));
132
133 3562 if (result == 0)
134 {
135 // Sync success - cache endpoints immediately
136 sockaddr_in local_addr{};
137 socklen_t local_len = sizeof(local_addr);
138 if (::getsockname(
139 fd_, reinterpret_cast<sockaddr*>(&local_addr), &local_len) == 0)
140 local_endpoint_ = detail::from_sockaddr_in(local_addr);
141 remote_endpoint_ = ep;
142
143 op.complete(0, 0);
144 op.impl_ptr = shared_from_this();
145 svc_.post(&op);
146 // completion is always posted to scheduler queue, never inline.
147 return std::noop_coroutine();
148 }
149
150 3562 if (errno == EINPROGRESS)
151 {
152 3562 svc_.work_started();
153 3562 op.impl_ptr = shared_from_this();
154
155 // Set registering BEFORE register_fd to close the race window where
156 // reactor sees an event before we set registered. The reactor treats
157 // registering the same as registered when claiming the op.
158 3562 op.registered.store(
159 select_registration_state::registering, std::memory_order_release);
160 3562 svc_.scheduler().register_fd(fd_, &op, select_scheduler::event_write);
161
162 // Transition to registered. If this fails, reactor or cancel already
163 // claimed the op (state is now unregistered), so we're done. However,
164 // we must still deregister the fd because cancel's deregister_fd may
165 // have run before our register_fd, leaving the fd orphaned.
166 3562 auto expected = select_registration_state::registering;
167 3562 if (!op.registered.compare_exchange_strong(
168 expected, select_registration_state::registered,
169 std::memory_order_acq_rel))
170 {
171 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_write);
172 // completion is always posted to scheduler queue, never inline.
173 return std::noop_coroutine();
174 }
175
176 // If cancelled was set before we registered, handle it now.
177 3562 if (op.cancelled.load(std::memory_order_acquire))
178 {
179 auto prev = op.registered.exchange(
180 select_registration_state::unregistered,
181 std::memory_order_acq_rel);
182 if (prev != select_registration_state::unregistered)
183 {
184 svc_.scheduler().deregister_fd(
185 fd_, select_scheduler::event_write);
186 op.impl_ptr = shared_from_this();
187 svc_.post(&op);
188 svc_.work_finished();
189 }
190 }
191 // completion is always posted to scheduler queue, never inline.
192 3562 return std::noop_coroutine();
193 }
194
195 op.complete(errno, 0);
196 op.impl_ptr = shared_from_this();
197 svc_.post(&op);
198 // completion is always posted to scheduler queue, never inline.
199 return std::noop_coroutine();
200 }
201
202 std::coroutine_handle<>
203 124145 select_socket_impl::read_some(
204 std::coroutine_handle<> h,
205 capy::executor_ref ex,
206 io_buffer_param param,
207 std::stop_token token,
208 std::error_code* ec,
209 std::size_t* bytes_out)
210 {
211 124145 auto& op = rd_;
212 124145 op.reset();
213 124145 op.h = h;
214 124145 op.ex = ex;
215 124145 op.ec_out = ec;
216 124145 op.bytes_out = bytes_out;
217 124145 op.fd = fd_;
218 124145 op.start(token, this);
219
220 124145 capy::mutable_buffer bufs[select_read_op::max_buffers];
221 124145 op.iovec_count =
222 124145 static_cast<int>(param.copy_to(bufs, select_read_op::max_buffers));
223
224 124145 if (op.iovec_count == 0 || (op.iovec_count == 1 && bufs[0].size() == 0))
225 {
226 1 op.empty_buffer_read = true;
227 1 op.complete(0, 0);
228 1 op.impl_ptr = shared_from_this();
229 1 svc_.post(&op);
230 1 return std::noop_coroutine();
231 }
232
233 248288 for (int i = 0; i < op.iovec_count; ++i)
234 {
235 124144 op.iovecs[i].iov_base = bufs[i].data();
236 124144 op.iovecs[i].iov_len = bufs[i].size();
237 }
238
239 124144 ssize_t n = ::readv(fd_, op.iovecs, op.iovec_count);
240
241 124144 if (n > 0)
242 {
243 123856 op.complete(0, static_cast<std::size_t>(n));
244 123856 op.impl_ptr = shared_from_this();
245 123856 svc_.post(&op);
246 123856 return std::noop_coroutine();
247 }
248
249 288 if (n == 0)
250 {
251 5 op.complete(0, 0);
252 5 op.impl_ptr = shared_from_this();
253 5 svc_.post(&op);
254 5 return std::noop_coroutine();
255 }
256
257 283 if (errno == EAGAIN || errno == EWOULDBLOCK)
258 {
259 283 svc_.work_started();
260 283 op.impl_ptr = shared_from_this();
261
262 // Set registering BEFORE register_fd to close the race window where
263 // reactor sees an event before we set registered.
264 283 op.registered.store(
265 select_registration_state::registering, std::memory_order_release);
266 283 svc_.scheduler().register_fd(fd_, &op, select_scheduler::event_read);
267
268 // Transition to registered. If this fails, reactor or cancel already
269 // claimed the op (state is now unregistered), so we're done. However,
270 // we must still deregister the fd because cancel's deregister_fd may
271 // have run before our register_fd, leaving the fd orphaned.
272 283 auto expected = select_registration_state::registering;
273 283 if (!op.registered.compare_exchange_strong(
274 expected, select_registration_state::registered,
275 std::memory_order_acq_rel))
276 {
277 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_read);
278 return std::noop_coroutine();
279 }
280
281 // If cancelled was set before we registered, handle it now.
282 283 if (op.cancelled.load(std::memory_order_acquire))
283 {
284 auto prev = op.registered.exchange(
285 select_registration_state::unregistered,
286 std::memory_order_acq_rel);
287 if (prev != select_registration_state::unregistered)
288 {
289 svc_.scheduler().deregister_fd(
290 fd_, select_scheduler::event_read);
291 op.impl_ptr = shared_from_this();
292 svc_.post(&op);
293 svc_.work_finished();
294 }
295 }
296 283 return std::noop_coroutine();
297 }
298
299 op.complete(errno, 0);
300 op.impl_ptr = shared_from_this();
301 svc_.post(&op);
302 return std::noop_coroutine();
303 }
304
305 std::coroutine_handle<>
306 123981 select_socket_impl::write_some(
307 std::coroutine_handle<> h,
308 capy::executor_ref ex,
309 io_buffer_param param,
310 std::stop_token token,
311 std::error_code* ec,
312 std::size_t* bytes_out)
313 {
314 123981 auto& op = wr_;
315 123981 op.reset();
316 123981 op.h = h;
317 123981 op.ex = ex;
318 123981 op.ec_out = ec;
319 123981 op.bytes_out = bytes_out;
320 123981 op.fd = fd_;
321 123981 op.start(token, this);
322
323 123981 capy::mutable_buffer bufs[select_write_op::max_buffers];
324 123981 op.iovec_count =
325 123981 static_cast<int>(param.copy_to(bufs, select_write_op::max_buffers));
326
327 123981 if (op.iovec_count == 0 || (op.iovec_count == 1 && bufs[0].size() == 0))
328 {
329 1 op.complete(0, 0);
330 1 op.impl_ptr = shared_from_this();
331 1 svc_.post(&op);
332 1 return std::noop_coroutine();
333 }
334
335 247960 for (int i = 0; i < op.iovec_count; ++i)
336 {
337 123980 op.iovecs[i].iov_base = bufs[i].data();
338 123980 op.iovecs[i].iov_len = bufs[i].size();
339 }
340
341 123980 msghdr msg{};
342 123980 msg.msg_iov = op.iovecs;
343 123980 msg.msg_iovlen = static_cast<std::size_t>(op.iovec_count);
344
345 123980 ssize_t n = ::sendmsg(fd_, &msg, MSG_NOSIGNAL);
346
347 123980 if (n > 0)
348 {
349 123979 op.complete(0, static_cast<std::size_t>(n));
350 123979 op.impl_ptr = shared_from_this();
351 123979 svc_.post(&op);
352 123979 return std::noop_coroutine();
353 }
354
355 1 if (errno == EAGAIN || errno == EWOULDBLOCK)
356 {
357 svc_.work_started();
358 op.impl_ptr = shared_from_this();
359
360 // Set registering BEFORE register_fd to close the race window where
361 // reactor sees an event before we set registered.
362 op.registered.store(
363 select_registration_state::registering, std::memory_order_release);
364 svc_.scheduler().register_fd(fd_, &op, select_scheduler::event_write);
365
366 // Transition to registered. If this fails, reactor or cancel already
367 // claimed the op (state is now unregistered), so we're done. However,
368 // we must still deregister the fd because cancel's deregister_fd may
369 // have run before our register_fd, leaving the fd orphaned.
370 auto expected = select_registration_state::registering;
371 if (!op.registered.compare_exchange_strong(
372 expected, select_registration_state::registered,
373 std::memory_order_acq_rel))
374 {
375 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_write);
376 return std::noop_coroutine();
377 }
378
379 // If cancelled was set before we registered, handle it now.
380 if (op.cancelled.load(std::memory_order_acquire))
381 {
382 auto prev = op.registered.exchange(
383 select_registration_state::unregistered,
384 std::memory_order_acq_rel);
385 if (prev != select_registration_state::unregistered)
386 {
387 svc_.scheduler().deregister_fd(
388 fd_, select_scheduler::event_write);
389 op.impl_ptr = shared_from_this();
390 svc_.post(&op);
391 svc_.work_finished();
392 }
393 }
394 return std::noop_coroutine();
395 }
396
397 1 op.complete(errno ? errno : EIO, 0);
398 1 op.impl_ptr = shared_from_this();
399 1 svc_.post(&op);
400 1 return std::noop_coroutine();
401 }
402
403 std::error_code
404 3 select_socket_impl::shutdown(tcp_socket::shutdown_type what) noexcept
405 {
406 int how;
407 3 switch (what)
408 {
409 1 case tcp_socket::shutdown_receive:
410 1 how = SHUT_RD;
411 1 break;
412 1 case tcp_socket::shutdown_send:
413 1 how = SHUT_WR;
414 1 break;
415 1 case tcp_socket::shutdown_both:
416 1 how = SHUT_RDWR;
417 1 break;
418 default:
419 return make_err(EINVAL);
420 }
421 3 if (::shutdown(fd_, how) != 0)
422 return make_err(errno);
423 3 return {};
424 }
425
426 std::error_code
427 5 select_socket_impl::set_no_delay(bool value) noexcept
428 {
429 5 int flag = value ? 1 : 0;
430 5 if (::setsockopt(fd_, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(flag)) != 0)
431 return make_err(errno);
432 5 return {};
433 }
434
435 bool
436 5 select_socket_impl::no_delay(std::error_code& ec) const noexcept
437 {
438 5 int flag = 0;
439 5 socklen_t len = sizeof(flag);
440 5 if (::getsockopt(fd_, IPPROTO_TCP, TCP_NODELAY, &flag, &len) != 0)
441 {
442 ec = make_err(errno);
443 return false;
444 }
445 5 ec = {};
446 5 return flag != 0;
447 }
448
449 std::error_code
450 4 select_socket_impl::set_keep_alive(bool value) noexcept
451 {
452 4 int flag = value ? 1 : 0;
453 4 if (::setsockopt(fd_, SOL_SOCKET, SO_KEEPALIVE, &flag, sizeof(flag)) != 0)
454 return make_err(errno);
455 4 return {};
456 }
457
458 bool
459 4 select_socket_impl::keep_alive(std::error_code& ec) const noexcept
460 {
461 4 int flag = 0;
462 4 socklen_t len = sizeof(flag);
463 4 if (::getsockopt(fd_, SOL_SOCKET, SO_KEEPALIVE, &flag, &len) != 0)
464 {
465 ec = make_err(errno);
466 return false;
467 }
468 4 ec = {};
469 4 return flag != 0;
470 }
471
472 std::error_code
473 1 select_socket_impl::set_receive_buffer_size(int size) noexcept
474 {
475 1 if (::setsockopt(fd_, SOL_SOCKET, SO_RCVBUF, &size, sizeof(size)) != 0)
476 return make_err(errno);
477 1 return {};
478 }
479
480 int
481 3 select_socket_impl::receive_buffer_size(std::error_code& ec) const noexcept
482 {
483 3 int size = 0;
484 3 socklen_t len = sizeof(size);
485 3 if (::getsockopt(fd_, SOL_SOCKET, SO_RCVBUF, &size, &len) != 0)
486 {
487 ec = make_err(errno);
488 return 0;
489 }
490 3 ec = {};
491 3 return size;
492 }
493
494 std::error_code
495 1 select_socket_impl::set_send_buffer_size(int size) noexcept
496 {
497 1 if (::setsockopt(fd_, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size)) != 0)
498 return make_err(errno);
499 1 return {};
500 }
501
502 int
503 3 select_socket_impl::send_buffer_size(std::error_code& ec) const noexcept
504 {
505 3 int size = 0;
506 3 socklen_t len = sizeof(size);
507 3 if (::getsockopt(fd_, SOL_SOCKET, SO_SNDBUF, &size, &len) != 0)
508 {
509 ec = make_err(errno);
510 return 0;
511 }
512 3 ec = {};
513 3 return size;
514 }
515
516 std::error_code
517 4 select_socket_impl::set_linger(bool enabled, int timeout) noexcept
518 {
519 4 if (timeout < 0)
520 1 return make_err(EINVAL);
521 struct ::linger lg;
522 3 lg.l_onoff = enabled ? 1 : 0;
523 3 lg.l_linger = timeout;
524 3 if (::setsockopt(fd_, SOL_SOCKET, SO_LINGER, &lg, sizeof(lg)) != 0)
525 return make_err(errno);
526 3 return {};
527 }
528
529 tcp_socket::linger_options
530 3 select_socket_impl::linger(std::error_code& ec) const noexcept
531 {
532 3 struct ::linger lg{};
533 3 socklen_t len = sizeof(lg);
534 3 if (::getsockopt(fd_, SOL_SOCKET, SO_LINGER, &lg, &len) != 0)
535 {
536 ec = make_err(errno);
537 return {};
538 }
539 3 ec = {};
540 3 return {.enabled = lg.l_onoff != 0, .timeout = lg.l_linger};
541 }
542
543 void
544 177 select_socket_impl::cancel() noexcept
545 {
546 177 auto self = weak_from_this().lock();
547 177 if (!self)
548 return;
549
550 531 auto cancel_op = [this, &self](select_op& op, int events) {
551 531 auto prev = op.registered.exchange(
552 select_registration_state::unregistered, std::memory_order_acq_rel);
553 531 op.request_cancel();
554 531 if (prev != select_registration_state::unregistered)
555 {
556 92 svc_.scheduler().deregister_fd(fd_, events);
557 92 op.impl_ptr = self;
558 92 svc_.post(&op);
559 92 svc_.work_finished();
560 }
561 708 };
562
563 177 cancel_op(conn_, select_scheduler::event_write);
564 177 cancel_op(rd_, select_scheduler::event_read);
565 177 cancel_op(wr_, select_scheduler::event_write);
566 177 }
567
568 void
569 99 select_socket_impl::cancel_single_op(select_op& op) noexcept
570 {
571 99 auto self = weak_from_this().lock();
572 99 if (!self)
573 return;
574
575 // Called from stop_token callback to cancel a specific pending operation.
576 99 auto prev = op.registered.exchange(
577 select_registration_state::unregistered, std::memory_order_acq_rel);
578 99 op.request_cancel();
579
580 99 if (prev != select_registration_state::unregistered)
581 {
582 // Determine which event type to deregister
583 67 int events = 0;
584 67 if (&op == &conn_ || &op == &wr_)
585 events = select_scheduler::event_write;
586 67 else if (&op == &rd_)
587 67 events = select_scheduler::event_read;
588
589 67 svc_.scheduler().deregister_fd(fd_, events);
590
591 67 op.impl_ptr = self;
592 67 svc_.post(&op);
593 67 svc_.work_finished();
594 }
595 99 }
596
597 void
598 32111 select_socket_impl::close_socket() noexcept
599 {
600 32111 auto self = weak_from_this().lock();
601 32111 if (self)
602 {
603 96333 auto cancel_op = [this, &self](select_op& op, int events) {
604 96333 auto prev = op.registered.exchange(
605 select_registration_state::unregistered,
606 std::memory_order_acq_rel);
607 96333 op.request_cancel();
608 96333 if (prev != select_registration_state::unregistered)
609 {
610 1 svc_.scheduler().deregister_fd(fd_, events);
611 1 op.impl_ptr = self;
612 1 svc_.post(&op);
613 1 svc_.work_finished();
614 }
615 128444 };
616
617 32111 cancel_op(conn_, select_scheduler::event_write);
618 32111 cancel_op(rd_, select_scheduler::event_read);
619 32111 cancel_op(wr_, select_scheduler::event_write);
620 }
621
622 32111 if (fd_ >= 0)
623 {
624 7134 svc_.scheduler().deregister_fd(
625 fd_, select_scheduler::event_read | select_scheduler::event_write);
626 7134 ::close(fd_);
627 7134 fd_ = -1;
628 }
629
630 32111 local_endpoint_ = endpoint{};
631 32111 remote_endpoint_ = endpoint{};
632 32111 }
633
634 133 select_socket_service::select_socket_service(capy::execution_context& ctx)
635 133 : state_(
636 std::make_unique<select_socket_state>(
637 133 ctx.use_service<select_scheduler>()))
638 {
639 133 }
640
641 266 select_socket_service::~select_socket_service() {}
642
643 void
644 133 select_socket_service::shutdown()
645 {
646 133 std::lock_guard lock(state_->mutex_);
647
648 133 while (auto* impl = state_->socket_list_.pop_front())
649 impl->close_socket();
650
651 // Don't clear socket_ptrs_ here. The scheduler shuts down after us and
652 // drains completed_ops_, calling destroy() on each queued op. Letting
653 // ~state_ release the ptrs (during service destruction, after scheduler
654 // shutdown) keeps every impl alive until all ops have been drained.
655 133 }
656
657 io_object::implementation*
658 10702 select_socket_service::construct()
659 {
660 10702 auto impl = std::make_shared<select_socket_impl>(*this);
661 10702 auto* raw = impl.get();
662
663 {
664 10702 std::lock_guard lock(state_->mutex_);
665 10702 state_->socket_list_.push_back(raw);
666 10702 state_->socket_ptrs_.emplace(raw, std::move(impl));
667 10702 }
668
669 10702 return raw;
670 10702 }
671
672 void
673 10702 select_socket_service::destroy(io_object::implementation* impl)
674 {
675 10702 auto* select_impl = static_cast<select_socket_impl*>(impl);
676 10702 select_impl->close_socket();
677 10702 std::lock_guard lock(state_->mutex_);
678 10702 state_->socket_list_.remove(select_impl);
679 10702 state_->socket_ptrs_.erase(select_impl);
680 10702 }
681
682 std::error_code
683 3573 select_socket_service::open_socket(tcp_socket::implementation& impl)
684 {
685 3573 auto* select_impl = static_cast<select_socket_impl*>(&impl);
686 3573 select_impl->close_socket();
687
688 3573 int fd = ::socket(AF_INET, SOCK_STREAM, 0);
689 3573 if (fd < 0)
690 return make_err(errno);
691
692 // Set non-blocking and close-on-exec
693 3573 int flags = ::fcntl(fd, F_GETFL, 0);
694 3573 if (flags == -1)
695 {
696 int errn = errno;
697 ::close(fd);
698 return make_err(errn);
699 }
700 3573 if (::fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1)
701 {
702 int errn = errno;
703 ::close(fd);
704 return make_err(errn);
705 }
706 3573 if (::fcntl(fd, F_SETFD, FD_CLOEXEC) == -1)
707 {
708 int errn = errno;
709 ::close(fd);
710 return make_err(errn);
711 }
712
713 // Check fd is within select() limits
714 3573 if (fd >= FD_SETSIZE)
715 {
716 ::close(fd);
717 return make_err(EMFILE); // Too many open files
718 }
719
720 3573 select_impl->fd_ = fd;
721 3573 return {};
722 }
723
724 void
725 17836 select_socket_service::close(io_object::handle& h)
726 {
727 17836 static_cast<select_socket_impl*>(h.get())->close_socket();
728 17836 }
729
730 void
731 248003 select_socket_service::post(select_op* op)
732 {
733 248003 state_->sched_.post(op);
734 248003 }
735
736 void
737 3845 select_socket_service::work_started() noexcept
738 {
739 3845 state_->sched_.work_started();
740 3845 }
741
742 void
743 160 select_socket_service::work_finished() noexcept
744 {
745 160 state_->sched_.work_finished();
746 160 }
747
748 } // namespace boost::corosio::detail
749
750 #endif // BOOST_COROSIO_HAS_SELECT
751