diff options
| author | Kacper <kacper@mail.openlinux.dev> | 2025-12-15 02:01:33 +0100 |
|---|---|---|
| committer | Kacper <kacper@mail.openlinux.dev> | 2025-12-15 02:01:59 +0100 |
| commit | 3b3325f761b09ebbfef04c44eed546cc4fdeb329 (patch) | |
| tree | aa19ea259bcda2410c2b3dd4512f19fb85aeaf8f /lib/libc/sys/io_uring_setup.c | |
| parent | 15d2df7811ef3cb79cc3e501d0d5f9b993d42bea (diff) | |
Added aio and eventfd support, along with sleep and yes utilities
Diffstat (limited to 'lib/libc/sys/io_uring_setup.c')
| -rw-r--r-- | lib/libc/sys/io_uring_setup.c | 81 |
1 files changed, 81 insertions, 0 deletions
diff --git a/lib/libc/sys/io_uring_setup.c b/lib/libc/sys/io_uring_setup.c new file mode 100644 index 00000000..0a368a81 --- /dev/null +++ b/lib/libc/sys/io_uring_setup.c @@ -0,0 +1,81 @@ +#include <sys/eventfd.h> +#include <io_uring.h> +#include <string.h> +#include <sys/mman.h> +#include <syscall.h> + +struct io_uring __io_uring; + +int io_uring_setup(unsigned int entries, struct io_uring_params *params) +{ + return syscall(io_uring_setup, entries, params); +} + +int __io_uring_setup(void) +{ + struct io_uring_params p; + memset(&p, 0, sizeof(p)); + + __io_uring.fd = io_uring_setup(IO_URING_ENTRIES, &p); + + if (__io_uring.fd < 0) + return -1; + + __io_uring.sq.ring_size = + p.sq_off.array + p.sq_entries * sizeof(unsigned int); + __io_uring.sq.ring = mmap(NULL, __io_uring.sq.ring_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, __io_uring.fd, + IORING_OFF_SQ_RING); + + if (__io_uring.sq.ring == MAP_FAILED) + return -1; + + __io_uring.sq.head = __io_uring.sq.ring + p.sq_off.head; + __io_uring.sq.tail = __io_uring.sq.ring + p.sq_off.tail; + __io_uring.sq.ring_mask = __io_uring.sq.ring + p.sq_off.ring_mask; + __io_uring.sq.ring_entries = __io_uring.sq.ring + p.sq_off.ring_entries; + __io_uring.sq.flags = __io_uring.sq.ring + p.sq_off.flags; + __io_uring.sq.dropped = __io_uring.sq.ring + p.sq_off.dropped; + __io_uring.sq.array = __io_uring.sq.ring + p.sq_off.array; + __io_uring.sq.sqes = + mmap(NULL, p.sq_entries * sizeof(struct io_uring_sqe), + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, + __io_uring.fd, IORING_OFF_SQES); + + if (__io_uring.sq.sqes == MAP_FAILED) { + munmap(__io_uring.sq.ring, __io_uring.sq.ring_size); + return -1; + } + + __io_uring.cq.ring_size = + p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe); + __io_uring.cq.ring = mmap(NULL, __io_uring.cq.ring_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, __io_uring.fd, + IORING_OFF_CQ_RING); + + if (__io_uring.cq.ring == MAP_FAILED) { + munmap(__io_uring.sq.ring, __io_uring.sq.ring_size); + munmap(__io_uring.sq.sqes, + p.sq_entries * sizeof(struct io_uring_sqe)); + return -1; + } + + __io_uring.eventfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); + if (__io_uring.eventfd < 0) + return -1; + + io_uring_register(__io_uring.fd, IORING_REGISTER_EVENTFD, + &__io_uring.eventfd, 1); + + __io_uring.cq.head = __io_uring.cq.ring + p.cq_off.head; + __io_uring.cq.tail = __io_uring.cq.ring + p.cq_off.tail; + __io_uring.cq.ring_mask = __io_uring.cq.ring + p.cq_off.ring_mask; + __io_uring.cq.ring_entries = __io_uring.cq.ring + p.cq_off.ring_entries; + __io_uring.cq.overflow = __io_uring.cq.ring + p.cq_off.overflow; + __io_uring.cq.cqes = __io_uring.cq.ring + p.cq_off.cqes; + __io_uring.cq.flags = __io_uring.cq.ring + p.cq_off.flags; + + return 0; +} |
