summaryrefslogtreecommitdiff
path: root/include/arch/x86_64
diff options
context:
space:
mode:
authorKacper <kacper@mail.openlinux.dev>2025-12-22 23:27:56 +0100
committerKacper <kacper@mail.openlinux.dev>2025-12-22 23:30:32 +0100
commit0f30d227497418c6d3bef7d52244407e30454504 (patch)
tree0e1ac19623d3268380cf74328cdf643648a2f43c /include/arch/x86_64
parent90dad97fc07f049383903a166631e2c257f9b8c1 (diff)
Added c11 threads, fixed some locks and add *_unlocked functions
Diffstat (limited to 'include/arch/x86_64')
-rw-r--r--include/arch/x86_64/asm/syscall.h56
-rw-r--r--include/arch/x86_64/asm/vdso.h29
2 files changed, 48 insertions, 37 deletions
diff --git a/include/arch/x86_64/asm/syscall.h b/include/arch/x86_64/asm/syscall.h
index b3d1ae48..181120ec 100644
--- a/include/arch/x86_64/asm/syscall.h
+++ b/include/arch/x86_64/asm/syscall.h
@@ -1,81 +1,79 @@
#ifndef __ASM_SYSCALL_H
#define __ASM_SYSCALL_H
-static __inline long __syscall0(long n)
+__attribute__((__always_inline__)) inline unsigned long __syscall0(long n)
{
unsigned long ret;
- __asm__ volatile("syscall"
- : "=a"(ret)
- : "a"(n)
- : "rcx", "r11", "memory");
+ __asm__ volatile("syscall" : "=a"(ret) : "a"(n) : "rcx", "r11", "memory");
return ret;
}
-static __inline long __syscall1(long n, long a1)
+__attribute__((__always_inline__)) inline unsigned long __syscall1(long n, long a1)
{
unsigned long ret;
- __asm__ __volatile__("syscall"
- : "=a"(ret)
- : "a"(n), "D"(a1)
- : "rcx", "r11", "memory");
+ __asm__ __volatile__("syscall" : "=a"(ret) : "a"(n), "D"(a1) : "rcx", "r11", "memory");
return ret;
}
-static inline long __syscall2(long n, long a1, long a2)
+__attribute__((__always_inline__)) inline unsigned long __syscall2(long n, long a1, long a2)
{
unsigned long ret;
- __asm__ volatile("syscall"
- : "=a"(ret)
- : "a"(n), "D"(a1), "S"(a2)
- : "rcx", "r11", "memory");
+ __asm__ volatile("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2) : "rcx", "r11", "memory");
return ret;
}
-static inline long __syscall3(long n, long a1, long a2, long a3)
+__attribute__((__always_inline__)) inline unsigned long __syscall3(long n, long a1, long a2, long a3)
{
unsigned long ret;
- __asm__ volatile("syscall"
- : "=a"(ret)
- : "a"(n), "D"(a1), "S"(a2), "d"(a3)
- : "rcx", "r11", "memory");
+ __asm__ volatile("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2), "d"(a3) : "rcx", "r11", "memory");
return ret;
}
-static inline long __syscall4(long n, long a1, long a2, long a3, long a4)
+__attribute__((__always_inline__)) inline unsigned long __syscall4(long n, long a1, long a2, long a3, long a4)
{
unsigned long ret;
register long r10 __asm__("r10") = a4;
+ __asm__ volatile("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2), "d"(a3), "r"(r10) : "rcx", "r11", "memory");
+ return ret;
+}
+
+__attribute__((__always_inline__)) inline unsigned long __syscall5(long n, long a1, long a2, long a3, long a4, long a5)
+{
+ unsigned long ret;
+ register long r10 __asm__("r10") = a4;
+ register long r8 __asm__("r8") = a5;
__asm__ volatile("syscall"
: "=a"(ret)
- : "a"(n), "D"(a1), "S"(a2), "d"(a3), "r"(r10)
+ : "a"(n), "D"(a1), "S"(a2), "d"(a3), "r"(r10), "r"(r8)
: "rcx", "r11", "memory");
return ret;
}
-static inline long __syscall5(long n, long a1, long a2, long a3, long a4,
- long a5)
+__attribute__((__always_inline__)) inline unsigned long __syscall6(long n, long a1, long a2, long a3, long a4, long a5,
+ long a6)
{
unsigned long ret;
register long r10 __asm__("r10") = a4;
register long r8 __asm__("r8") = a5;
+ register long r9 __asm__("r9") = a6;
__asm__ volatile("syscall"
: "=a"(ret)
- : "a"(n), "D"(a1), "S"(a2), "d"(a3), "r"(r10), "r"(r8)
+ : "a"(n), "D"(a1), "S"(a2), "d"(a3), "r"(r10), "r"(r8), "r"(r9)
: "rcx", "r11", "memory");
return ret;
}
-static inline long __syscall6(long n, long a1, long a2, long a3, long a4,
- long a5, long a6)
+__attribute__((__always_inline__)) inline unsigned long __syscall7(long n, long a1, long a2, long a3, long a4, long a5,
+ long a6, long a7)
{
unsigned long ret;
register long r10 __asm__("r10") = a4;
register long r8 __asm__("r8") = a5;
register long r9 __asm__("r9") = a6;
+ register long r12 __asm__("r12") = a7;
__asm__ volatile("syscall"
: "=a"(ret)
- : "a"(n), "D"(a1), "S"(a2), "d"(a3), "r"(r10), "r"(r8),
- "r"(r9)
+ : "a"(n), "D"(a1), "S"(a2), "d"(a3), "r"(r10), "r"(r8), "r"(r9), "r"(r12)
: "rcx", "r11", "memory");
return ret;
}
diff --git a/include/arch/x86_64/asm/vdso.h b/include/arch/x86_64/asm/vdso.h
index 7f0ea2aa..2fb34473 100644
--- a/include/arch/x86_64/asm/vdso.h
+++ b/include/arch/x86_64/asm/vdso.h
@@ -1,22 +1,35 @@
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
+/*
+ * vDSO support interface.
+ *
+ * IMPORTANT:
+ * Do not define storage in this header. This header is included by multiple
+ * translation units; defining `static` variables here causes each TU to get its
+ * own copy, which makes initialization inconsistent and prevents dead-stripping.
+ *
+ * The actual storage/definitions must live in a single .c file (e.g.
+ * `lib/libc/internal/init/vdso.c`), and this header should only declare them.
+ */
+
struct timespec;
#define __VDSO_CLOCK_GETTIME
#define __VDSO_GETCPU
#define __VDSO_TIME
-static int (*__vdso_clock_gettime)(int, struct timespec *) = 0;
-static int (*__vdso_getcpu)(unsigned *, unsigned *, void *) = 0;
-static int (*__vdso_time)(long *) = 0;
+/* Resolved vDSO entry points (set by __init_vdso). */
+extern int (*__vdso_clock_gettime)(int, struct timespec *);
+extern int (*__vdso_getcpu)(unsigned *, unsigned *, void *);
+extern int (*__vdso_time)(long *);
-struct {
+/* Symbol table used by __init_vdso to locate vDSO functions. */
+struct __vdso_sym {
const char *name;
void *func;
-} __vdso_symtab[] = { { "__vdso_clock_gettime", (void *)&__vdso_clock_gettime },
- { "__vdso_getcpu", (void *)&__vdso_getcpu },
- { "__vdso_time", (void *)&__vdso_time },
- { 0, 0 } };
+};
+
+extern struct __vdso_sym __vdso_symtab[];
#endif