src/corosio/src/detail/select/sockets.cpp

73.4% Lines (273/372) 94.1% Functions (32/34) 57.4% Branches (113/197)
src/corosio/src/detail/select/sockets.cpp
Line Branch Hits Source Code
1 //
2 // Copyright (c) 2026 Steve Gerbino
3 //
4 // Distributed under the Boost Software License, Version 1.0. (See accompanying
5 // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // Official repository: https://github.com/cppalliance/corosio
8 //
9
10 #include <boost/corosio/detail/platform.hpp>
11
12 #if BOOST_COROSIO_HAS_SELECT
13
14 #include "src/detail/select/sockets.hpp"
15 #include "src/detail/endpoint_convert.hpp"
16 #include "src/detail/dispatch_coro.hpp"
17 #include "src/detail/make_err.hpp"
18
19 #include <boost/corosio/detail/except.hpp>
20
21 #include <boost/capy/buffers.hpp>
22
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <netinet/in.h>
26 #include <netinet/tcp.h>
27 #include <sys/socket.h>
28 #include <unistd.h>
29
30 namespace boost::corosio::detail {
31
32 void
33 98 select_op::canceller::
34 operator()() const noexcept
35 {
36 98 op->cancel();
37 98 }
38
39 void
40 select_connect_op::
41 cancel() noexcept
42 {
43 if (socket_impl_)
44 socket_impl_->cancel_single_op(*this);
45 else
46 request_cancel();
47 }
48
49 void
50 98 select_read_op::
51 cancel() noexcept
52 {
53
1/2
✓ Branch 0 taken 98 times.
✗ Branch 1 not taken.
98 if (socket_impl_)
54 98 socket_impl_->cancel_single_op(*this);
55 else
56 request_cancel();
57 98 }
58
59 void
60 select_write_op::
61 cancel() noexcept
62 {
63 if (socket_impl_)
64 socket_impl_->cancel_single_op(*this);
65 else
66 request_cancel();
67 }
68
69 void
70 3436 select_connect_op::
71 operator()()
72 {
73 3436 stop_cb.reset();
74
75
3/4
✓ Branch 0 taken 3434 times.
✓ Branch 1 taken 2 times.
✓ Branch 3 taken 3434 times.
✗ Branch 4 not taken.
3436 bool success = (errn == 0 && !cancelled.load(std::memory_order_acquire));
76
77 // Cache endpoints on successful connect
78
3/4
✓ Branch 0 taken 3434 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 3434 times.
✗ Branch 3 not taken.
3436 if (success && socket_impl_)
79 {
80 // Query local endpoint via getsockname (may fail, but remote is always known)
81 3434 endpoint local_ep;
82 3434 sockaddr_in local_addr{};
83 3434 socklen_t local_len = sizeof(local_addr);
84
1/2
✓ Branch 1 taken 3434 times.
✗ Branch 2 not taken.
3434 if (::getsockname(fd, reinterpret_cast<sockaddr*>(&local_addr), &local_len) == 0)
85 3434 local_ep = from_sockaddr_in(local_addr);
86 // Always cache remote endpoint; local may be default if getsockname failed
87 3434 static_cast<select_socket_impl*>(socket_impl_)->set_endpoints(local_ep, target_endpoint);
88 }
89
90
1/2
✓ Branch 0 taken 3436 times.
✗ Branch 1 not taken.
3436 if (ec_out)
91 {
92
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3436 times.
3436 if (cancelled.load(std::memory_order_acquire))
93 *ec_out = capy::error::canceled;
94
2/2
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 3434 times.
3436 else if (errn != 0)
95 2 *ec_out = make_err(errn);
96 else
97 3434 *ec_out = {};
98 }
99
100
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3436 times.
3436 if (bytes_out)
101 *bytes_out = bytes_transferred;
102
103 // Move to stack before destroying the frame
104 3436 capy::executor_ref saved_ex( std::move( ex ) );
105 3436 std::coroutine_handle<> saved_h( std::move( h ) );
106 3436 impl_ptr.reset();
107
2/2
✓ Branch 1 taken 3436 times.
✓ Branch 4 taken 3436 times.
3436 dispatch_coro(saved_ex, saved_h).resume();
108 3436 }
109
110 10322 select_socket_impl::
111 10322 select_socket_impl(select_socket_service& svc) noexcept
112 10322 : svc_(svc)
113 {
114 10322 }
115
116 std::coroutine_handle<>
117 3436 select_socket_impl::
118 connect(
119 std::coroutine_handle<> h,
120 capy::executor_ref ex,
121 endpoint ep,
122 std::stop_token token,
123 std::error_code* ec)
124 {
125 3436 auto& op = conn_;
126 3436 op.reset();
127 3436 op.h = h;
128 3436 op.ex = ex;
129 3436 op.ec_out = ec;
130 3436 op.fd = fd_;
131 3436 op.target_endpoint = ep; // Store target for endpoint caching
132 3436 op.start(token, this);
133
134 3436 sockaddr_in addr = detail::to_sockaddr_in(ep);
135
1/1
✓ Branch 1 taken 3436 times.
3436 int result = ::connect(fd_, reinterpret_cast<sockaddr*>(&addr), sizeof(addr));
136
137
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3436 times.
3436 if (result == 0)
138 {
139 // Sync success - cache endpoints immediately
140 sockaddr_in local_addr{};
141 socklen_t local_len = sizeof(local_addr);
142 if (::getsockname(fd_, reinterpret_cast<sockaddr*>(&local_addr), &local_len) == 0)
143 local_endpoint_ = detail::from_sockaddr_in(local_addr);
144 remote_endpoint_ = ep;
145
146 op.complete(0, 0);
147 op.impl_ptr = shared_from_this();
148 svc_.post(&op);
149 // completion is always posted to scheduler queue, never inline.
150 return std::noop_coroutine();
151 }
152
153
1/2
✓ Branch 0 taken 3436 times.
✗ Branch 1 not taken.
3436 if (errno == EINPROGRESS)
154 {
155 3436 svc_.work_started();
156
1/1
✓ Branch 1 taken 3436 times.
3436 op.impl_ptr = shared_from_this();
157
158 // Set registering BEFORE register_fd to close the race window where
159 // reactor sees an event before we set registered. The reactor treats
160 // registering the same as registered when claiming the op.
161 3436 op.registered.store(select_registration_state::registering, std::memory_order_release);
162
1/1
✓ Branch 2 taken 3436 times.
3436 svc_.scheduler().register_fd(fd_, &op, select_scheduler::event_write);
163
164 // Transition to registered. If this fails, reactor or cancel already
165 // claimed the op (state is now unregistered), so we're done. However,
166 // we must still deregister the fd because cancel's deregister_fd may
167 // have run before our register_fd, leaving the fd orphaned.
168 3436 auto expected = select_registration_state::registering;
169
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3436 times.
3436 if (!op.registered.compare_exchange_strong(
170 expected, select_registration_state::registered, std::memory_order_acq_rel))
171 {
172 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_write);
173 // completion is always posted to scheduler queue, never inline.
174 return std::noop_coroutine();
175 }
176
177 // If cancelled was set before we registered, handle it now.
178
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3436 times.
3436 if (op.cancelled.load(std::memory_order_acquire))
179 {
180 auto prev = op.registered.exchange(
181 select_registration_state::unregistered, std::memory_order_acq_rel);
182 if (prev != select_registration_state::unregistered)
183 {
184 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_write);
185 op.impl_ptr = shared_from_this();
186 svc_.post(&op);
187 svc_.work_finished();
188 }
189 }
190 // completion is always posted to scheduler queue, never inline.
191 3436 return std::noop_coroutine();
192 }
193
194 op.complete(errno, 0);
195 op.impl_ptr = shared_from_this();
196 svc_.post(&op);
197 // completion is always posted to scheduler queue, never inline.
198 return std::noop_coroutine();
199 }
200
201 std::coroutine_handle<>
202 83617 select_socket_impl::
203 read_some(
204 std::coroutine_handle<> h,
205 capy::executor_ref ex,
206 io_buffer_param param,
207 std::stop_token token,
208 std::error_code* ec,
209 std::size_t* bytes_out)
210 {
211 83617 auto& op = rd_;
212 83617 op.reset();
213 83617 op.h = h;
214 83617 op.ex = ex;
215 83617 op.ec_out = ec;
216 83617 op.bytes_out = bytes_out;
217 83617 op.fd = fd_;
218 83617 op.start(token, this);
219
220 83617 capy::mutable_buffer bufs[select_read_op::max_buffers];
221 83617 op.iovec_count = static_cast<int>(param.copy_to(bufs, select_read_op::max_buffers));
222
223
6/8
✓ Branch 0 taken 83616 times.
✓ Branch 1 taken 1 time.
✓ Branch 2 taken 83616 times.
✗ Branch 3 not taken.
✗ Branch 5 not taken.
✓ Branch 6 taken 83616 times.
✓ Branch 7 taken 1 time.
✓ Branch 8 taken 83616 times.
83617 if (op.iovec_count == 0 || (op.iovec_count == 1 && bufs[0].size() == 0))
224 {
225 1 op.empty_buffer_read = true;
226 1 op.complete(0, 0);
227
1/1
✓ Branch 1 taken 1 time.
1 op.impl_ptr = shared_from_this();
228
1/1
✓ Branch 1 taken 1 time.
1 svc_.post(&op);
229 1 return std::noop_coroutine();
230 }
231
232
2/2
✓ Branch 0 taken 83616 times.
✓ Branch 1 taken 83616 times.
167232 for (int i = 0; i < op.iovec_count; ++i)
233 {
234 83616 op.iovecs[i].iov_base = bufs[i].data();
235 83616 op.iovecs[i].iov_len = bufs[i].size();
236 }
237
238
1/1
✓ Branch 1 taken 83616 times.
83616 ssize_t n = ::readv(fd_, op.iovecs, op.iovec_count);
239
240
2/2
✓ Branch 0 taken 83329 times.
✓ Branch 1 taken 287 times.
83616 if (n > 0)
241 {
242 83329 op.complete(0, static_cast<std::size_t>(n));
243
1/1
✓ Branch 1 taken 83329 times.
83329 op.impl_ptr = shared_from_this();
244
1/1
✓ Branch 1 taken 83329 times.
83329 svc_.post(&op);
245 83329 return std::noop_coroutine();
246 }
247
248
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 282 times.
287 if (n == 0)
249 {
250 5 op.complete(0, 0);
251
1/1
✓ Branch 1 taken 5 times.
5 op.impl_ptr = shared_from_this();
252
1/1
✓ Branch 1 taken 5 times.
5 svc_.post(&op);
253 5 return std::noop_coroutine();
254 }
255
256
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 282 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
282 if (errno == EAGAIN || errno == EWOULDBLOCK)
257 {
258 282 svc_.work_started();
259
1/1
✓ Branch 1 taken 282 times.
282 op.impl_ptr = shared_from_this();
260
261 // Set registering BEFORE register_fd to close the race window where
262 // reactor sees an event before we set registered.
263 282 op.registered.store(select_registration_state::registering, std::memory_order_release);
264
1/1
✓ Branch 2 taken 282 times.
282 svc_.scheduler().register_fd(fd_, &op, select_scheduler::event_read);
265
266 // Transition to registered. If this fails, reactor or cancel already
267 // claimed the op (state is now unregistered), so we're done. However,
268 // we must still deregister the fd because cancel's deregister_fd may
269 // have run before our register_fd, leaving the fd orphaned.
270 282 auto expected = select_registration_state::registering;
271
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 282 times.
282 if (!op.registered.compare_exchange_strong(
272 expected, select_registration_state::registered, std::memory_order_acq_rel))
273 {
274 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_read);
275 return std::noop_coroutine();
276 }
277
278 // If cancelled was set before we registered, handle it now.
279
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 282 times.
282 if (op.cancelled.load(std::memory_order_acquire))
280 {
281 auto prev = op.registered.exchange(
282 select_registration_state::unregistered, std::memory_order_acq_rel);
283 if (prev != select_registration_state::unregistered)
284 {
285 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_read);
286 op.impl_ptr = shared_from_this();
287 svc_.post(&op);
288 svc_.work_finished();
289 }
290 }
291 282 return std::noop_coroutine();
292 }
293
294 op.complete(errno, 0);
295 op.impl_ptr = shared_from_this();
296 svc_.post(&op);
297 return std::noop_coroutine();
298 }
299
300 std::coroutine_handle<>
301 83454 select_socket_impl::
302 write_some(
303 std::coroutine_handle<> h,
304 capy::executor_ref ex,
305 io_buffer_param param,
306 std::stop_token token,
307 std::error_code* ec,
308 std::size_t* bytes_out)
309 {
310 83454 auto& op = wr_;
311 83454 op.reset();
312 83454 op.h = h;
313 83454 op.ex = ex;
314 83454 op.ec_out = ec;
315 83454 op.bytes_out = bytes_out;
316 83454 op.fd = fd_;
317 83454 op.start(token, this);
318
319 83454 capy::mutable_buffer bufs[select_write_op::max_buffers];
320 83454 op.iovec_count = static_cast<int>(param.copy_to(bufs, select_write_op::max_buffers));
321
322
6/8
✓ Branch 0 taken 83453 times.
✓ Branch 1 taken 1 time.
✓ Branch 2 taken 83453 times.
✗ Branch 3 not taken.
✗ Branch 5 not taken.
✓ Branch 6 taken 83453 times.
✓ Branch 7 taken 1 time.
✓ Branch 8 taken 83453 times.
83454 if (op.iovec_count == 0 || (op.iovec_count == 1 && bufs[0].size() == 0))
323 {
324 1 op.complete(0, 0);
325
1/1
✓ Branch 1 taken 1 time.
1 op.impl_ptr = shared_from_this();
326
1/1
✓ Branch 1 taken 1 time.
1 svc_.post(&op);
327 1 return std::noop_coroutine();
328 }
329
330
2/2
✓ Branch 0 taken 83453 times.
✓ Branch 1 taken 83453 times.
166906 for (int i = 0; i < op.iovec_count; ++i)
331 {
332 83453 op.iovecs[i].iov_base = bufs[i].data();
333 83453 op.iovecs[i].iov_len = bufs[i].size();
334 }
335
336 83453 msghdr msg{};
337 83453 msg.msg_iov = op.iovecs;
338 83453 msg.msg_iovlen = static_cast<std::size_t>(op.iovec_count);
339
340
1/1
✓ Branch 1 taken 83453 times.
83453 ssize_t n = ::sendmsg(fd_, &msg, MSG_NOSIGNAL);
341
342
2/2
✓ Branch 0 taken 83452 times.
✓ Branch 1 taken 1 time.
83453 if (n > 0)
343 {
344 83452 op.complete(0, static_cast<std::size_t>(n));
345
1/1
✓ Branch 1 taken 83452 times.
83452 op.impl_ptr = shared_from_this();
346
1/1
✓ Branch 1 taken 83452 times.
83452 svc_.post(&op);
347 83452 return std::noop_coroutine();
348 }
349
350
2/4
✓ Branch 0 taken 1 time.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 1 time.
1 if (errno == EAGAIN || errno == EWOULDBLOCK)
351 {
352 svc_.work_started();
353 op.impl_ptr = shared_from_this();
354
355 // Set registering BEFORE register_fd to close the race window where
356 // reactor sees an event before we set registered.
357 op.registered.store(select_registration_state::registering, std::memory_order_release);
358 svc_.scheduler().register_fd(fd_, &op, select_scheduler::event_write);
359
360 // Transition to registered. If this fails, reactor or cancel already
361 // claimed the op (state is now unregistered), so we're done. However,
362 // we must still deregister the fd because cancel's deregister_fd may
363 // have run before our register_fd, leaving the fd orphaned.
364 auto expected = select_registration_state::registering;
365 if (!op.registered.compare_exchange_strong(
366 expected, select_registration_state::registered, std::memory_order_acq_rel))
367 {
368 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_write);
369 return std::noop_coroutine();
370 }
371
372 // If cancelled was set before we registered, handle it now.
373 if (op.cancelled.load(std::memory_order_acquire))
374 {
375 auto prev = op.registered.exchange(
376 select_registration_state::unregistered, std::memory_order_acq_rel);
377 if (prev != select_registration_state::unregistered)
378 {
379 svc_.scheduler().deregister_fd(fd_, select_scheduler::event_write);
380 op.impl_ptr = shared_from_this();
381 svc_.post(&op);
382 svc_.work_finished();
383 }
384 }
385 return std::noop_coroutine();
386 }
387
388
1/2
✓ Branch 0 taken 1 time.
✗ Branch 1 not taken.
1 op.complete(errno ? errno : EIO, 0);
389
1/1
✓ Branch 1 taken 1 time.
1 op.impl_ptr = shared_from_this();
390
1/1
✓ Branch 1 taken 1 time.
1 svc_.post(&op);
391 1 return std::noop_coroutine();
392 }
393
394 std::error_code
395 3 select_socket_impl::
396 shutdown(tcp_socket::shutdown_type what) noexcept
397 {
398 int how;
399
3/4
✓ Branch 0 taken 1 time.
✓ Branch 1 taken 1 time.
✓ Branch 2 taken 1 time.
✗ Branch 3 not taken.
3 switch (what)
400 {
401 1 case tcp_socket::shutdown_receive: how = SHUT_RD; break;
402 1 case tcp_socket::shutdown_send: how = SHUT_WR; break;
403 1 case tcp_socket::shutdown_both: how = SHUT_RDWR; break;
404 default:
405 return make_err(EINVAL);
406 }
407
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
3 if (::shutdown(fd_, how) != 0)
408 return make_err(errno);
409 3 return {};
410 }
411
412 std::error_code
413 5 select_socket_impl::
414 set_no_delay(bool value) noexcept
415 {
416
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 1 time.
5 int flag = value ? 1 : 0;
417
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
5 if (::setsockopt(fd_, IPPROTO_TCP, TCP_NODELAY, &flag, sizeof(flag)) != 0)
418 return make_err(errno);
419 5 return {};
420 }
421
422 bool
423 5 select_socket_impl::
424 no_delay(std::error_code& ec) const noexcept
425 {
426 5 int flag = 0;
427 5 socklen_t len = sizeof(flag);
428
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
5 if (::getsockopt(fd_, IPPROTO_TCP, TCP_NODELAY, &flag, &len) != 0)
429 {
430 ec = make_err(errno);
431 return false;
432 }
433 5 ec = {};
434 5 return flag != 0;
435 }
436
437 std::error_code
438 4 select_socket_impl::
439 set_keep_alive(bool value) noexcept
440 {
441
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 1 time.
4 int flag = value ? 1 : 0;
442
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 4 times.
4 if (::setsockopt(fd_, SOL_SOCKET, SO_KEEPALIVE, &flag, sizeof(flag)) != 0)
443 return make_err(errno);
444 4 return {};
445 }
446
447 bool
448 4 select_socket_impl::
449 keep_alive(std::error_code& ec) const noexcept
450 {
451 4 int flag = 0;
452 4 socklen_t len = sizeof(flag);
453
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 4 times.
4 if (::getsockopt(fd_, SOL_SOCKET, SO_KEEPALIVE, &flag, &len) != 0)
454 {
455 ec = make_err(errno);
456 return false;
457 }
458 4 ec = {};
459 4 return flag != 0;
460 }
461
462 std::error_code
463 1 select_socket_impl::
464 set_receive_buffer_size(int size) noexcept
465 {
466
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
1 if (::setsockopt(fd_, SOL_SOCKET, SO_RCVBUF, &size, sizeof(size)) != 0)
467 return make_err(errno);
468 1 return {};
469 }
470
471 int
472 3 select_socket_impl::
473 receive_buffer_size(std::error_code& ec) const noexcept
474 {
475 3 int size = 0;
476 3 socklen_t len = sizeof(size);
477
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
3 if (::getsockopt(fd_, SOL_SOCKET, SO_RCVBUF, &size, &len) != 0)
478 {
479 ec = make_err(errno);
480 return 0;
481 }
482 3 ec = {};
483 3 return size;
484 }
485
486 std::error_code
487 1 select_socket_impl::
488 set_send_buffer_size(int size) noexcept
489 {
490
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 1 time.
1 if (::setsockopt(fd_, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size)) != 0)
491 return make_err(errno);
492 1 return {};
493 }
494
495 int
496 3 select_socket_impl::
497 send_buffer_size(std::error_code& ec) const noexcept
498 {
499 3 int size = 0;
500 3 socklen_t len = sizeof(size);
501
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
3 if (::getsockopt(fd_, SOL_SOCKET, SO_SNDBUF, &size, &len) != 0)
502 {
503 ec = make_err(errno);
504 return 0;
505 }
506 3 ec = {};
507 3 return size;
508 }
509
510 std::error_code
511 4 select_socket_impl::
512 set_linger(bool enabled, int timeout) noexcept
513 {
514
2/2
✓ Branch 0 taken 1 time.
✓ Branch 1 taken 3 times.
4 if (timeout < 0)
515 1 return make_err(EINVAL);
516 struct ::linger lg;
517
2/2
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 1 time.
3 lg.l_onoff = enabled ? 1 : 0;
518 3 lg.l_linger = timeout;
519
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
3 if (::setsockopt(fd_, SOL_SOCKET, SO_LINGER, &lg, sizeof(lg)) != 0)
520 return make_err(errno);
521 3 return {};
522 }
523
524 tcp_socket::linger_options
525 3 select_socket_impl::
526 linger(std::error_code& ec) const noexcept
527 {
528 3 struct ::linger lg{};
529 3 socklen_t len = sizeof(lg);
530
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
3 if (::getsockopt(fd_, SOL_SOCKET, SO_LINGER, &lg, &len) != 0)
531 {
532 ec = make_err(errno);
533 return {};
534 }
535 3 ec = {};
536 3 return {.enabled = lg.l_onoff != 0, .timeout = lg.l_linger};
537 }
538
539 void
540 31149 select_socket_impl::
541 cancel() noexcept
542 {
543 31149 std::shared_ptr<select_socket_impl> self;
544 try {
545
1/1
✓ Branch 1 taken 31149 times.
31149 self = shared_from_this();
546 } catch (const std::bad_weak_ptr&) {
547 return;
548 }
549
550 93447 auto cancel_op = [this, &self](select_op& op, int events) {
551 93447 auto prev = op.registered.exchange(
552 select_registration_state::unregistered, std::memory_order_acq_rel);
553 93447 op.request_cancel();
554
2/2
✓ Branch 0 taken 93 times.
✓ Branch 1 taken 93354 times.
93447 if (prev != select_registration_state::unregistered)
555 {
556 93 svc_.scheduler().deregister_fd(fd_, events);
557 93 op.impl_ptr = self;
558 93 svc_.post(&op);
559 93 svc_.work_finished();
560 }
561 124596 };
562
563 31149 cancel_op(conn_, select_scheduler::event_write);
564 31149 cancel_op(rd_, select_scheduler::event_read);
565 31149 cancel_op(wr_, select_scheduler::event_write);
566 31149 }
567
568 void
569 98 select_socket_impl::
570 cancel_single_op(select_op& op) noexcept
571 {
572 // Called from stop_token callback to cancel a specific pending operation.
573 98 auto prev = op.registered.exchange(
574 select_registration_state::unregistered, std::memory_order_acq_rel);
575 98 op.request_cancel();
576
577
2/2
✓ Branch 0 taken 66 times.
✓ Branch 1 taken 32 times.
98 if (prev != select_registration_state::unregistered)
578 {
579 // Determine which event type to deregister
580 66 int events = 0;
581
2/4
✓ Branch 0 taken 66 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 66 times.
66 if (&op == &conn_ || &op == &wr_)
582 events = select_scheduler::event_write;
583
1/2
✓ Branch 0 taken 66 times.
✗ Branch 1 not taken.
66 else if (&op == &rd_)
584 66 events = select_scheduler::event_read;
585
586 66 svc_.scheduler().deregister_fd(fd_, events);
587
588 // Keep impl alive until op completes
589 try {
590
1/1
✓ Branch 1 taken 66 times.
66 op.impl_ptr = shared_from_this();
591 } catch (const std::bad_weak_ptr&) {
592 // Impl is being destroyed, op will be orphaned but that's ok
593 }
594
595 66 svc_.post(&op);
596 66 svc_.work_finished();
597 }
598 98 }
599
600 void
601 30972 select_socket_impl::
602 close_socket() noexcept
603 {
604 30972 cancel();
605
606
2/2
✓ Branch 0 taken 6881 times.
✓ Branch 1 taken 24091 times.
30972 if (fd_ >= 0)
607 {
608 // Unconditionally remove from registered_fds_ to handle edge cases
609 // where the fd might be registered but cancel() didn't clean it up
610 // due to race conditions.
611 6881 svc_.scheduler().deregister_fd(fd_,
612 select_scheduler::event_read | select_scheduler::event_write);
613 6881 ::close(fd_);
614 6881 fd_ = -1;
615 }
616
617 // Clear cached endpoints
618 30972 local_endpoint_ = endpoint{};
619 30972 remote_endpoint_ = endpoint{};
620 30972 }
621
622 133 select_socket_service::
623 133 select_socket_service(capy::execution_context& ctx)
624
2/2
✓ Branch 2 taken 133 times.
✓ Branch 5 taken 133 times.
133 : state_(std::make_unique<select_socket_state>(ctx.use_service<select_scheduler>()))
625 {
626 133 }
627
628 266 select_socket_service::
629 133 ~select_socket_service()
630 {
631 266 }
632
633 void
634 133 select_socket_service::
635 shutdown()
636 {
637
1/1
✓ Branch 2 taken 133 times.
133 std::lock_guard lock(state_->mutex_);
638
639
1/2
✗ Branch 2 not taken.
✓ Branch 3 taken 133 times.
133 while (auto* impl = state_->socket_list_.pop_front())
640 impl->close_socket();
641
642 // Don't clear socket_ptrs_ here. The scheduler shuts down after us and
643 // drains completed_ops_, calling destroy() on each queued op. Letting
644 // ~state_ release the ptrs (during service destruction, after scheduler
645 // shutdown) keeps every impl alive until all ops have been drained.
646 133 }
647
648 io_object::implementation*
649 10322 select_socket_service::
650 construct()
651 {
652
1/1
✓ Branch 1 taken 10322 times.
10322 auto impl = std::make_shared<select_socket_impl>(*this);
653 10322 auto* raw = impl.get();
654
655 {
656
1/1
✓ Branch 2 taken 10322 times.
10322 std::lock_guard lock(state_->mutex_);
657 10322 state_->socket_list_.push_back(raw);
658
1/1
✓ Branch 3 taken 10322 times.
10322 state_->socket_ptrs_.emplace(raw, std::move(impl));
659 10322 }
660
661 10322 return raw;
662 10322 }
663
664 void
665 10322 select_socket_service::
666 destroy(io_object::implementation* impl)
667 {
668 10322 auto* select_impl = static_cast<select_socket_impl*>(impl);
669 10322 select_impl->close_socket();
670
1/1
✓ Branch 2 taken 10322 times.
10322 std::lock_guard lock(state_->mutex_);
671 10322 state_->socket_list_.remove(select_impl);
672
1/1
✓ Branch 2 taken 10322 times.
10322 state_->socket_ptrs_.erase(select_impl);
673 10322 }
674
675 std::error_code
676 3447 select_socket_service::
677 open_socket(tcp_socket::implementation& impl)
678 {
679 3447 auto* select_impl = static_cast<select_socket_impl*>(&impl);
680 3447 select_impl->close_socket();
681
682 3447 int fd = ::socket(AF_INET, SOCK_STREAM, 0);
683
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3447 times.
3447 if (fd < 0)
684 return make_err(errno);
685
686 // Set non-blocking and close-on-exec
687 3447 int flags = ::fcntl(fd, F_GETFL, 0);
688
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3447 times.
3447 if (flags == -1)
689 {
690 int errn = errno;
691 ::close(fd);
692 return make_err(errn);
693 }
694
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3447 times.
3447 if (::fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1)
695 {
696 int errn = errno;
697 ::close(fd);
698 return make_err(errn);
699 }
700
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 3447 times.
3447 if (::fcntl(fd, F_SETFD, FD_CLOEXEC) == -1)
701 {
702 int errn = errno;
703 ::close(fd);
704 return make_err(errn);
705 }
706
707 // Check fd is within select() limits
708
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3447 times.
3447 if (fd >= FD_SETSIZE)
709 {
710 ::close(fd);
711 return make_err(EMFILE); // Too many open files
712 }
713
714 3447 select_impl->fd_ = fd;
715 3447 return {};
716 }
717
718 void
719 17203 select_socket_service::
720 close(io_object::handle& h)
721 {
722 17203 static_cast<select_socket_impl*>(h.get())->close_socket();
723 17203 }
724
725 void
726 166948 select_socket_service::
727 post(select_op* op)
728 {
729 166948 state_->sched_.post(op);
730 166948 }
731
732 void
733 3718 select_socket_service::
734 work_started() noexcept
735 {
736 3718 state_->sched_.work_started();
737 3718 }
738
739 void
740 159 select_socket_service::
741 work_finished() noexcept
742 {
743 159 state_->sched_.work_finished();
744 159 }
745
746 } // namespace boost::corosio::detail
747
748 #endif // BOOST_COROSIO_HAS_SELECT
749