infix
A JIT-Powered FFI Library for C
Loading...
Searching...
No Matches
executor.c
Go to the documentation of this file.
1
38#include "common/utility.h"
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <time.h>
43// Platform-Specific Includes
44#if defined(INFIX_OS_WINDOWS)
45#include <windows.h>
46#else
47#include <errno.h>
48#include <fcntl.h>
49#include <pthread.h>
50#include <sys/mman.h>
51#include <sys/types.h>
52#include <unistd.h>
53#endif
54#if defined(INFIX_OS_MACOS)
55#include <dlfcn.h>
56#include <libkern/OSCacheControl.h>
57#endif
58// Polyfills for mmap flags for maximum POSIX compatibility.
59#if defined(INFIX_ENV_POSIX) && !defined(INFIX_OS_WINDOWS)
60#if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
61#define MAP_ANON MAP_ANONYMOUS
62#endif
63static pthread_mutex_t g_dwarf_mutex = PTHREAD_MUTEX_INITIALIZER;
64#endif
65
66#if defined(INFIX_OS_WINDOWS) && defined(INFIX_ARCH_X64)
67// SEH Unwind Info Opcodes and Structures for JIT code on Windows x64.
68// These are defined in winnt.h but we redefine them here for clarity and to ensure availability.
69#define UWOP_PUSH_NONVOL 0
70#define UWOP_ALLOC_LARGE 1
71#define UWOP_ALLOC_SMALL 2
72#define UWOP_SET_FPREG 3
73
74#pragma pack(push, 1)
75typedef struct _UNWIND_CODE {
76 uint8_t CodeOffset;
77 uint8_t UnwindOp : 4;
78 uint8_t OpInfo : 4;
79} UNWIND_CODE;
80
81typedef struct _UNWIND_INFO {
82 uint8_t Version : 3;
83 uint8_t Flags : 5;
84 uint8_t SizeOfPrologue;
85 uint8_t CountOfCodes;
86 uint8_t FrameRegister : 4;
87 uint8_t FrameOffset : 4;
88 UNWIND_CODE UnwindCode[1]; // Variable length array
89} UNWIND_INFO;
90
91// We reserve 512 bytes at the end of every JIT block for SEH metadata.
92#define INFIX_SEH_METADATA_SIZE 256
93#elif defined(INFIX_OS_WINDOWS) && defined(INFIX_ARCH_AARCH64)
94#pragma pack(push, 1)
95typedef struct _UNWIND_INFO_ARM64 {
96 uint32_t FunctionLength : 18;
97 uint32_t Version : 2;
98 uint32_t X : 1;
99 uint32_t E : 1;
100 uint32_t EpilogueCount : 5;
101 uint32_t CodeWords : 5;
102} UNWIND_INFO_ARM64;
103#pragma pack(pop)
104#define INFIX_SEH_METADATA_SIZE 256
105#else
106#define INFIX_SEH_METADATA_SIZE 0
107#endif
108
109// macOS JIT Security Hardening Logic
110#if defined(INFIX_OS_MACOS)
125typedef const struct __CFString * CFStringRef;
126typedef const void * CFTypeRef;
127typedef struct __SecTask * SecTaskRef;
128typedef struct __CFError * CFErrorRef;
129#define kCFStringEncodingUTF8 0x08000100
130// A struct to hold dynamically loaded function pointers from macOS frameworks.
131static struct {
132 void (*CFRelease)(CFTypeRef);
133 bool (*CFBooleanGetValue)(CFTypeRef boolean);
134 CFStringRef (*CFStringCreateWithCString)(CFTypeRef allocator, const char * cStr, uint32_t encoding);
135 CFTypeRef kCFAllocatorDefault;
136 SecTaskRef (*SecTaskCreateFromSelf)(CFTypeRef allocator);
137 CFTypeRef (*SecTaskCopyValueForEntitlement)(SecTaskRef task, CFStringRef entitlement, CFErrorRef * error);
138 void (*pthread_jit_write_protect_np)(int enabled);
139 void (*sys_icache_invalidate)(void * start, size_t len);
140} g_macos_apis;
148static void initialize_macos_apis(void) {
149 // We don't need to link against these frameworks, which makes building simpler.
150 void * cf = dlopen("/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", RTLD_LAZY);
151 void * sec = dlopen("/System/Library/Frameworks/Security.framework/Security", RTLD_LAZY);
152
153 // Hardened Runtime helpers found in libSystem/libpthread
154 g_macos_apis.pthread_jit_write_protect_np = dlsym(RTLD_DEFAULT, "pthread_jit_write_protect_np");
155 g_macos_apis.sys_icache_invalidate = dlsym(RTLD_DEFAULT, "sys_icache_invalidate");
156
157 if (!cf || !sec) {
158 INFIX_DEBUG_PRINTF("Warning: Could not dlopen macOS frameworks. JIT security features will be degraded.");
159 if (cf)
160 dlclose(cf);
161 if (sec)
162 dlclose(sec);
163 return;
164 }
165 g_macos_apis.CFRelease = dlsym(cf, "CFRelease");
166 g_macos_apis.CFBooleanGetValue = dlsym(cf, "CFBooleanGetValue");
167 g_macos_apis.CFStringCreateWithCString = dlsym(cf, "CFStringCreateWithCString");
168 void ** pAlloc = (void **)dlsym(cf, "kCFAllocatorDefault");
169 if (pAlloc)
170 g_macos_apis.kCFAllocatorDefault = *pAlloc;
171 g_macos_apis.SecTaskCreateFromSelf = dlsym(sec, "SecTaskCreateFromSelf");
172 g_macos_apis.SecTaskCopyValueForEntitlement = dlsym(sec, "SecTaskCopyValueForEntitlement");
173 dlclose(cf);
174 dlclose(sec);
175}
181static bool has_jit_entitlement(void) {
182 // Use pthread_once to ensure the dynamic loading happens exactly once, thread-safely.
183 static pthread_once_t init_once = PTHREAD_ONCE_INIT;
184 pthread_once(&init_once, initialize_macos_apis);
185
186 // Secure JIT path on macOS requires both the entitlement check and the toggle API.
187 if (!g_macos_apis.pthread_jit_write_protect_np)
188 return false;
189
190 if (!g_macos_apis.SecTaskCopyValueForEntitlement || !g_macos_apis.CFStringCreateWithCString)
191 return false;
192 bool result = false;
193 SecTaskRef task = g_macos_apis.SecTaskCreateFromSelf(g_macos_apis.kCFAllocatorDefault);
194 if (!task)
195 return false;
196 CFStringRef key = g_macos_apis.CFStringCreateWithCString(
197 g_macos_apis.kCFAllocatorDefault, "com.apple.security.cs.allow-jit", kCFStringEncodingUTF8);
198 CFTypeRef value = nullptr;
199 if (key) {
200 // This is the core check: ask the system for the value of the entitlement.
201 value = g_macos_apis.SecTaskCopyValueForEntitlement(task, key, nullptr);
202 g_macos_apis.CFRelease(key);
203 }
204 g_macos_apis.CFRelease(task);
205 if (value) {
206 // The value of the entitlement is a CFBoolean, so we must extract its value.
207 if (g_macos_apis.CFBooleanGetValue && g_macos_apis.CFBooleanGetValue(value))
208 result = true;
209 g_macos_apis.CFRelease(value);
210 }
211 return result;
212}
213#endif // INFIX_OS_MACOS
214// Hardened POSIX Anonymous Shared Memory Allocator (for Dual-Mapping W^X)
215#if !defined(INFIX_OS_WINDOWS) && !defined(INFIX_OS_MACOS) && !defined(INFIX_OS_ANDROID) && !defined(INFIX_OS_OPENBSD)
216#include <fcntl.h>
217#include <stdint.h>
218#if defined(INFIX_OS_LINUX) && defined(_GNU_SOURCE)
219#include <sys/syscall.h>
220#endif
221
231static int create_anonymous_file(void) {
232#if defined(INFIX_OS_LINUX) && defined(MFD_CLOEXEC)
233 // Strategy 1: memfd_create (Linux 3.17+)
234 // MFD_CLOEXEC ensures the FD isn't leaked to child processes.
235 int linux_fd = memfd_create("infix_jit", MFD_CLOEXEC);
236 if (linux_fd >= 0)
237 return linux_fd;
238 // If it fails (e.g. old kernel, ENOSYS), fall through to shm_open.
239#endif
240
241#if defined(__FreeBSD__) && defined(SHM_ANON)
242 // Strategy 2: SHM_ANON (FreeBSD)
243 int bsd_fd = shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, 0600);
244 if (bsd_fd >= 0)
245 return bsd_fd;
246#endif
247
248 // Strategy 3: shm_open with randomized name (Legacy POSIX)
249 char shm_name[64];
250 uint64_t random_val = 0;
251 // Generate a sufficiently random name to avoid collisions if multiple processes
252 // are running this code simultaneously. Using /dev/urandom is a robust way to do this.
253 int rand_fd = open("/dev/urandom", O_RDONLY);
254 if (rand_fd < 0)
255 return -1;
256 ssize_t bytes_read = read(rand_fd, &random_val, sizeof(random_val));
257 close(rand_fd);
258 if (bytes_read != sizeof(random_val))
259 return -1;
260
261 snprintf(shm_name, sizeof(shm_name), "/infix-jit-%d-%llx", getpid(), (unsigned long long)random_val);
262 // Create the shared memory object exclusively.
263 int fd = shm_open(shm_name, O_RDWR | O_CREAT | O_EXCL, 0600);
264 if (fd >= 0) {
265 // Unlink immediately. The name is removed, but the inode persists until close().
266 shm_unlink(shm_name);
267 return fd;
268 }
269 return -1;
270}
271#endif
272// Public API: Executable Memory Management
281#if defined(INFIX_OS_WINDOWS)
282 infix_executable_t exec = {
283 .rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .handle = nullptr, .seh_registration = nullptr};
284#else
285 infix_executable_t exec = {.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .shm_fd = -1, .eh_frame_ptr = nullptr};
286#endif
287 if (size == 0)
288 return exec;
289
290#if defined(INFIX_OS_WINDOWS)
291 // Add headroom for SEH metadata on Windows.
292 size_t total_size = size + INFIX_SEH_METADATA_SIZE;
293
294 // Windows: Single-mapping W^X. Allocate as RW, later change to RX via VirtualProtect.
295 void * code = VirtualAlloc(nullptr, total_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
296 if (code == nullptr) {
299 return exec;
300 }
301 exec.rw_ptr = code;
302 exec.rx_ptr = code;
303#elif defined(INFIX_OS_MACOS) || defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
304 // Single-mapping POSIX platforms. Allocate as RW, later change to RX via mprotect.
305 void * code = MAP_FAILED;
306#if defined(MAP_ANON)
307 int flags = MAP_PRIVATE | MAP_ANON;
308#if defined(INFIX_OS_MACOS)
309 // On macOS, we perform a one-time check for JIT support.
310 static bool g_use_secure_jit_path = false;
311 static bool g_checked_jit_support = false;
312 if (!g_checked_jit_support) {
313 g_use_secure_jit_path = has_jit_entitlement();
314 INFIX_DEBUG_PRINTF("macOS JIT check: Entitlement found = %s. Using %s API.",
315 g_use_secure_jit_path ? "yes" : "no",
316 g_use_secure_jit_path ? "secure (MAP_JIT)" : "legacy (mprotect)");
317 g_checked_jit_support = true;
318 }
319 // If entitled, use the modern, more secure MAP_JIT flag.
320 if (g_use_secure_jit_path)
321 flags |= MAP_JIT;
322#endif // INFIX_OS_MACOS
323 code = mmap(nullptr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
324#if defined(INFIX_OS_MACOS)
325 if (code != MAP_FAILED && g_use_secure_jit_path) {
326 // Switch thread to Write mode. enabled=0 means Write allowed.
327 g_macos_apis.pthread_jit_write_protect_np(0);
328 }
329#endif
330#endif // MAP_ANON
331 if (code == MAP_FAILED) { // Fallback for older systems without MAP_ANON
332 int fd = open("/dev/zero", O_RDWR);
333 if (fd != -1) {
334 code = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
335 close(fd);
336 }
337 }
338 if (code == MAP_FAILED) {
340 return exec;
341 }
342 exec.rw_ptr = code;
343 exec.rx_ptr = code;
344#else
345 // Dual-mapping POSIX platforms (e.g., Linux, FreeBSD). Create two separate views of the same memory.
347 if (exec.shm_fd < 0) {
349 INFIX_CATEGORY_ALLOCATION, INFIX_CODE_EXECUTABLE_MEMORY_FAILURE, errno, "create_anonymous_file failed");
350 return exec;
351 }
352 if (ftruncate(exec.shm_fd, size) != 0) {
355 close(exec.shm_fd);
356 exec.shm_fd = -1; // Ensure clean state
357 return exec;
358 }
359 // The RW mapping.
360 exec.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, exec.shm_fd, 0);
361 // The RX mapping of the exact same physical memory.
362 exec.rx_ptr = mmap(nullptr, size, PROT_READ | PROT_EXEC, MAP_SHARED, exec.shm_fd, 0);
363 // If either mapping fails, clean up both and return an error.
364 if (exec.rw_ptr == MAP_FAILED || exec.rx_ptr == MAP_FAILED) {
365 int err = errno; // Capture errno before cleanup
366 if (exec.rw_ptr != MAP_FAILED)
367 munmap(exec.rw_ptr, size);
368 if (exec.rx_ptr != MAP_FAILED)
369 munmap(exec.rx_ptr, size);
370 close(exec.shm_fd);
372 return (infix_executable_t){.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .shm_fd = -1};
373 }
374
375 // The mmap mappings hold a reference to the shared memory object, so we don't
376 // need the FD anymore. Keeping it open consumes a file descriptor per trampoline,
377 // causing "shm_open failed" after ~1024 trampolines.
378 close(exec.shm_fd);
379 exec.shm_fd = -1;
380#endif
381 exec.size = size;
382 INFIX_DEBUG_PRINTF("Allocated JIT memory. RW at %p, RX at %p", exec.rw_ptr, exec.rx_ptr);
383 return exec;
384}
385
386#if defined(INFIX_OS_WINDOWS)
397static EXCEPTION_DISPOSITION _infix_seh_personality_routine(PEXCEPTION_RECORD ExceptionRecord,
398 void * EstablisherFrame,
399 c23_maybe_unused PCONTEXT ContextRecord,
400 void * DispatcherContext) {
401 PDISPATCHER_CONTEXT dc = (PDISPATCHER_CONTEXT)DispatcherContext;
402
403 // If we are already unwinding, don't do anything.
404 if (ExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
405 return ExceptionContinueSearch;
406
407 // Set the thread-local error.
409
410 // Retrieve the target epilogue IP from our HandlerData.
411 // The HandlerData points to the 4-byte epilogue offset we stored in UNWIND_INFO.
412 uint32_t epilogue_offset = *(uint32_t *)dc->HandlerData;
413 void * target_ip = (void *)(dc->ImageBase + epilogue_offset);
414
415 // Perform a non-local unwind to the epilogue.
416 RtlUnwind(EstablisherFrame, target_ip, ExceptionRecord, nullptr);
417
418 return ExceptionContinueSearch; // Unreachable
419}
420
421#if defined(INFIX_ARCH_X64)
422// Internal: Populates and registers SEH metadata for a Windows x64 JIT block.
423static void _infix_register_seh_windows_x64(infix_executable_t * exec,
425 uint32_t prologue_size,
426 uint32_t epilogue_offset) {
427 // metadata_ptr starts after the machine code.
428 uint8_t * metadata_base = (uint8_t *)exec->rw_ptr + exec->size;
429
430 // RUNTIME_FUNCTION (PDATA) - Must be 4-byte aligned.
431 RUNTIME_FUNCTION * rf = (RUNTIME_FUNCTION *)_infix_align_up((size_t)metadata_base, 4);
432
433 // UNWIND_INFO (XDATA) - Follows PDATA.
434 UNWIND_INFO * ui = (UNWIND_INFO *)_infix_align_up((size_t)(rf + 1), 2);
435
436 ui->Version = 1;
437 ui->Flags = 0;
438 if (category == INFIX_EXECUTABLE_SAFE_FORWARD)
439 ui->Flags |= UNW_FLAG_EHANDLER;
440 ui->FrameRegister = 5; // RBP
441 ui->FrameOffset = 0;
442 ui->SizeOfPrologue = (uint8_t)prologue_size;
443
444 if (category == INFIX_EXECUTABLE_REVERSE) {
445 // Reverse Trampoline: push rbp, push rsi, push rdi, mov rbp, rsp, and rsp -mask, [sub rsp, alloc]
446 ui->CountOfCodes = 4;
447 ui->UnwindCode[0].CodeOffset = 6; // After mov rbp, rsp
448 ui->UnwindCode[0].UnwindOp = UWOP_SET_FPREG;
449 ui->UnwindCode[0].OpInfo = 0;
450
451 ui->UnwindCode[1].CodeOffset = 3; // After push rdi
452 ui->UnwindCode[1].UnwindOp = UWOP_PUSH_NONVOL;
453 ui->UnwindCode[1].OpInfo = 7; // RDI
454
455 ui->UnwindCode[2].CodeOffset = 2; // After push rsi
456 ui->UnwindCode[2].UnwindOp = UWOP_PUSH_NONVOL;
457 ui->UnwindCode[2].OpInfo = 6; // RSI
458
459 ui->UnwindCode[3].CodeOffset = 1; // After push rbp
460 ui->UnwindCode[3].UnwindOp = UWOP_PUSH_NONVOL;
461 ui->UnwindCode[3].OpInfo = 5; // RBP
462 }
463 else {
464 // Forward or Direct Trampoline: push rbp, push r12-r15, mov rbp, rsp, and rsp -16, [sub rsp, alloc]
465 ui->CountOfCodes = 6;
466 // Opcodes in reverse order:
467 ui->UnwindCode[0].CodeOffset = 12; // After mov rbp, rsp
468 ui->UnwindCode[0].UnwindOp = UWOP_SET_FPREG;
469 ui->UnwindCode[0].OpInfo = 0;
470
471 ui->UnwindCode[1].CodeOffset = 9; // After push r15
472 ui->UnwindCode[1].UnwindOp = UWOP_PUSH_NONVOL;
473 ui->UnwindCode[1].OpInfo = 15; // R15
474
475 ui->UnwindCode[2].CodeOffset = 7; // After push r14
476 ui->UnwindCode[2].UnwindOp = UWOP_PUSH_NONVOL;
477 ui->UnwindCode[2].OpInfo = 14; // R14
478
479 ui->UnwindCode[3].CodeOffset = 5; // After push r13
480 ui->UnwindCode[3].UnwindOp = UWOP_PUSH_NONVOL;
481 ui->UnwindCode[3].OpInfo = 13; // R13
482
483 ui->UnwindCode[4].CodeOffset = 3; // After push r12
484 ui->UnwindCode[4].UnwindOp = UWOP_PUSH_NONVOL;
485 ui->UnwindCode[4].OpInfo = 12; // R12
486
487 ui->UnwindCode[5].CodeOffset = 1; // After push rbp
488 ui->UnwindCode[5].UnwindOp = UWOP_PUSH_NONVOL;
489 ui->UnwindCode[5].OpInfo = 5; // RBP
490 }
491
492 // 3. Personality Routine Stub - Follows UNWIND_INFO.
493 // The ExceptionHandler field is at offset: 4 + ((CountOfCodes + 1) & ~1) * 2
494 uint32_t * eh_field_ptr = (uint32_t *)&ui->UnwindCode[(ui->CountOfCodes + 1) & ~1];
495
496 // Position the stub AFTER the ExceptionHandler RVA and HandlerData (8 bytes total).
497 uint8_t * stub = (uint8_t *)_infix_align_up((size_t)(eh_field_ptr + 2), 16);
498
499 stub[0] = 0x48;
500 stub[1] = 0xB8; // mov rax, imm64
501 *(uint64_t *)(stub + 2) = (uint64_t)_infix_seh_personality_routine;
502 stub[10] = 0xFF;
503 stub[11] = 0xE0; // jmp rax
504
505 // BaseAddress should be 64KB aligned for maximum compatibility.
506 DWORD64 base_address = (DWORD64)exec->rx_ptr & ~0xFFFF;
507 DWORD rva_offset = (DWORD)((uint8_t *)exec->rx_ptr - (uint8_t *)base_address);
508
509 rf->BeginAddress = rva_offset; // Relative to BaseAddress
510 // EndAddress covers the entire code block.
511 rf->EndAddress = rva_offset + (DWORD)exec->size;
512 rf->UnwindData = rva_offset + (DWORD)((uint8_t *)ui - (uint8_t *)exec->rx_ptr);
513
514 if (ui->Flags & UNW_FLAG_EHANDLER) {
515 // ExceptionHandler RVA points to our absolute jump stub.
516 eh_field_ptr[0] = rva_offset + (uint32_t)(stub - (uint8_t *)exec->rx_ptr);
517 // HandlerData field stores our target epilogue offset.
518 eh_field_ptr[1] = epilogue_offset;
519 }
520
521 if (RtlAddFunctionTable(rf, 1, base_address)) {
522 exec->seh_registration = rf;
524 "Registered SEH PDATA at %p (XDATA at %p, Stub at %p) for JIT code at %p", rf, ui, stub, exec->rx_ptr);
525 }
526 else {
527 fprintf(stderr, "infix: RtlAddFunctionTable failed! GetLastError=%lu\n", GetLastError());
528 }
529}
530#elif defined(INFIX_ARCH_AARCH64)
531// Internal: Populates and registers SEH metadata for a Windows ARM64 JIT block.
532static void _infix_register_seh_windows_arm64(infix_executable_t * exec,
534 uint32_t prologue_size,
535 uint32_t epilogue_offset) {
536 uint8_t * metadata_base = (uint8_t *)exec->rw_ptr + exec->size;
537
538 // RUNTIME_FUNCTION (PDATA) - Must be 4-byte aligned.
539 // On ARM64, we use two entries: one for the function and a sentinel for the end.
540 RUNTIME_FUNCTION * rf = (RUNTIME_FUNCTION *)_infix_align_up((size_t)metadata_base, 4);
541
542 // UNWIND_INFO (XDATA) - Follows PDATA.
543 UNWIND_INFO_ARM64 * ui = (UNWIND_INFO_ARM64 *)_infix_align_up((size_t)(rf + 2), 4);
544 infix_memset(ui, 0, sizeof(UNWIND_INFO_ARM64));
545
546 ui->FunctionLength = (uint32_t)(exec->size / 4);
547 ui->Version = 0;
548 ui->X = (category == INFIX_EXECUTABLE_SAFE_FORWARD);
549 ui->E = 0;
550 ui->EpilogueCount = 1;
551
552 uint8_t * unwind_codes = (uint8_t *)(ui + 1);
553 uint32_t code_idx = 0;
554
555 if (category == INFIX_EXECUTABLE_REVERSE) {
556 // Reverse Prologue: stp x29, x30, [sp, #-16]!; mov x29, sp; sub sp, sp, #alloc
557 // Opcodes in REVERSE order:
558 unwind_codes[code_idx++] = 0xE1; // mov x29, sp
559 unwind_codes[code_idx++] = 0xC8; // stp x29, x30, [sp, #-16]!
560 unwind_codes[code_idx++] = 0xE4; // end
561 }
562 else {
563 // Forward or Direct Prologue: stp x29, x30, [sp, #-16]!; stp x19, x20, ...; stp x21, x22, ...; mov x29, sp; sub
564 // sp, sp, #alloc
565 unwind_codes[code_idx++] = 0xE1; // mov x29, sp
566 unwind_codes[code_idx++] = 0xD4; // stp x21, x22, [sp, #-16]!
567 unwind_codes[code_idx++] = 0xD2; // stp x19, x20, [sp, #-16]!
568 unwind_codes[code_idx++] = 0xC8; // stp x29, x30, [sp, #-16]!
569 unwind_codes[code_idx++] = 0xE4; // end
570 }
571
572 ui->CodeWords = (code_idx + 3) / 4;
573
574 // On ARM64, if X=1, the Exception Handler RVA and Handler Data follow the epilogue scopes
575 // and unwind codes.
576 // XDATA layout: [Header] [Epilogue Scopes] [Unwind Codes] [Padding] [Handler RVA] [Handler Data]
577
578 uint32_t * epilogue_scopes = (uint32_t *)(ui + 1);
579 // Each epilogue scope is 4 bytes. We have ui->EpilogueCount of them.
580 epilogue_scopes[0] = (epilogue_offset / 4); // Epilogue Start Index (instructions)
581
582 uint8_t * unwind_codes_ptr = (uint8_t *)(epilogue_scopes + ui->EpilogueCount);
583 // Clear and then copy the codes
584 infix_memset(unwind_codes_ptr, 0, ui->CodeWords * 4);
585 infix_memcpy(unwind_codes_ptr, unwind_codes, code_idx);
586
587 // Handler info must follow unwind codes (which are already padded to 4 bytes by ui->CodeWords).
588 uint32_t * handler_info_ptr = (uint32_t *)(unwind_codes_ptr + ui->CodeWords * 4);
589
590 uint8_t * stub = (uint8_t *)_infix_align_up((size_t)(handler_info_ptr + 2), 16);
591
592 // stub:
593 // ldr x9, personality_addr
594 // br x9
595 // personality_addr: .quad _infix_seh_personality_routine
596 *(uint32_t *)stub = 0x58000049; // ldr x9, #8
597 *(uint32_t *)(stub + 4) = 0xD61F0120; // br x9
598 *(uint64_t *)(stub + 8) = (uint64_t)_infix_seh_personality_routine;
599
600 DWORD64 base_address = (DWORD64)exec->rx_ptr & ~0xFFFF;
601 DWORD rva_offset = (DWORD)((uint8_t *)exec->rx_ptr - (uint8_t *)base_address);
602
603 rf[0].BeginAddress = rva_offset;
604 rf[0].UnwindData = rva_offset + (DWORD)((uint8_t *)ui - (uint8_t *)exec->rx_ptr);
605
606 // Sentinel entry defines the end of the previous function
607 rf[1].BeginAddress = rva_offset + (DWORD)exec->size;
608 rf[1].UnwindData = 0;
609
610 if (ui->X) {
611 // According to the spec, the Exception Handler RVA and Handler Data
612 // are located at the end of the XDATA, which is 4-byte aligned.
613 handler_info_ptr[0] = rva_offset + (uint32_t)(stub - (uint8_t *)exec->rx_ptr);
614 handler_info_ptr[1] = epilogue_offset;
615 }
616
617 if (RtlAddFunctionTable(rf, 2, base_address)) {
618 exec->seh_registration = rf;
620 "Registered SEH PDATA at %p (XDATA at %p, Stub at %p) for JIT code at %p", rf, ui, stub, exec->rx_ptr);
621 }
622 else {
623 fprintf(stderr, "infix: RtlAddFunctionTable failed! GetLastError=%lu\n", GetLastError());
624 }
625}
626#endif
627#endif
628
629#if defined(INFIX_OS_LINUX) && defined(INFIX_ARCH_X64)
638static void _infix_register_eh_frame_linux_x64(infix_executable_t * exec, infix_executable_category_t category) {
639 // Simplified .eh_frame layout: [ CIE | FDE | Terminator ]
640 const size_t cie_size = 32;
641 const size_t fde_size = 64;
642 const size_t total_size = cie_size + fde_size + 4; // +4 for null terminator
643
644 uint8_t * eh = infix_malloc(total_size);
645 if (!eh)
646 return;
647 infix_memset(eh, 0, total_size);
648
649 uint8_t * p = eh;
650
651 // CIE
652 *(uint32_t *)p = (uint32_t)(cie_size - 4);
653 p += 4;
654 *(uint32_t *)p = 0;
655 p += 4;
656 *p++ = 1; // version
657 *p++ = '\0'; // augmentation
658 *p++ = 1; // code align
659 *p++ = 0x78; // data align (-8)
660 *p++ = 16; // ret reg (rip)
661
662 // Initial state: CFA = rsp + 8, rip at CFA - 8
663 *p++ = 0x0c;
664 *p++ = 0x07;
665 *p++ = 0x08;
666 *p++ = 0x90;
667 *p++ = 0x01;
668 while ((size_t)(p - eh) < cie_size)
669 *p++ = 0;
670
671 // FDE
672 uint8_t * fde_start = eh + cie_size;
673 p = fde_start;
674 *(uint32_t *)p = (uint32_t)(fde_size - 4);
675 p += 4;
676 *(uint32_t *)p = (uint32_t)(p - eh);
677 p += 4; // back-offset
678
679 *(void **)p = exec->rx_ptr;
680 p += 8;
681 *(uint64_t *)p = (uint64_t)exec->size;
682 p += 8;
683 *p++ = 0; // aug data len
684
685 // Instructions:
686 if (category == INFIX_EXECUTABLE_REVERSE) {
687 // push rbp; mov rbp, rsp; push rsi; push rdi
688 *p++ = 0x41; // loc +1 (after push rbp)
689 *p++ = 0x0e;
690 *p++ = 16; // def_cfa_offset 16
691 *p++ = 0x86;
692 *p++ = 0x02; // offset rbp (6), 2
693 *p++ = 0x43; // loc +3 (after mov rbp, rsp)
694 *p++ = 0x0d;
695 *p++ = 0x06; // def_cfa_register rbp (6)
696 *p++ = 0x41; // loc +1 (after push rsi)
697 *p++ = 0x84;
698 *p++ = 0x03; // offset rsi (4), 3
699 *p++ = 0x41; // loc +1 (after push rdi)
700 *p++ = 0x85;
701 *p++ = 0x04; // offset rdi (5), 4
702 }
703 else {
704 // push rbp; mov rbp, rsp; push r12; push r13; push r14; push r15
705 *p++ = 0x41; // loc +1 (after push rbp)
706 *p++ = 0x0e;
707 *p++ = 16; // def_cfa_offset 16
708 *p++ = 0x86;
709 *p++ = 0x02; // offset rbp (6), 2
710 *p++ = 0x43; // loc +3 (after mov rbp, rsp)
711 *p++ = 0x0d;
712 *p++ = 0x06; // def_cfa_register rbp (6)
713 *p++ = 0x42; // loc +2 (after push r12)
714 *p++ = 0x8c;
715 *p++ = 0x03; // offset r12, 3
716 *p++ = 0x42; // loc +2 (after push r13)
717 *p++ = 0x8d;
718 *p++ = 0x04; // offset r13, 4
719 *p++ = 0x42; // loc +2 (after push r14)
720 *p++ = 0x8e;
721 *p++ = 0x05; // offset r14, 5
722 *p++ = 0x42; // loc +2 (after push r15)
723 *p++ = 0x8f;
724 *p++ = 0x06; // offset r15, 6
725 }
726
727 while ((size_t)(p - eh) < (cie_size + fde_size))
728 *p++ = 0;
729 *(uint32_t *)p = 0; // Terminator
730
731 extern void __register_frame(void *);
732 pthread_mutex_lock(&g_dwarf_mutex);
733 __register_frame(eh);
734 pthread_mutex_unlock(&g_dwarf_mutex);
735
736 exec->eh_frame_ptr = eh;
737 INFIX_DEBUG_PRINTF("Registered DWARF .eh_frame at %p for JIT code at %p", (void *)eh, exec->rx_ptr);
738}
739#elif defined(INFIX_OS_LINUX) && defined(INFIX_ARCH_AARCH64)
748static void _infix_register_eh_frame_arm64(infix_executable_t * exec, infix_executable_category_t category) {
749 // Simplified .eh_frame layout: [ CIE | FDE | Terminator ]
750 const size_t cie_size = 32;
751 const size_t fde_size = 64;
752 const size_t total_size = cie_size + fde_size + 4; // +4 for null terminator
753
754 uint8_t * eh = infix_malloc(total_size);
755 if (!eh)
756 return;
757 infix_memset(eh, 0, total_size);
758
759 uint8_t * p = eh;
760
761 // CIE (Common Information Entry)
762 *(uint32_t *)p = (uint32_t)(cie_size - 4);
763 p += 4; // length
764 *(uint32_t *)p = 0;
765 p += 4; // cie_id (0)
766 *p++ = 1; // version
767 *p++ = '\0'; // augmentation string ("")
768 *p++ = 4; // code_alignment_factor (AArch64 instructions are 4 bytes)
769 *p++ = 0x78; // data_alignment_factor (-8 in SLEB128)
770 *p++ = 30; // return_address_register (30 = lr on arm64)
771
772 // CIE Instructions: Initial state
773 // DW_CFA_def_cfa sp, 0
774 *p++ = 0x0c;
775 *p++ = 31;
776 *p++ = 0;
777 while ((size_t)(p - eh) < cie_size)
778 *p++ = 0;
779
780 // FDE (Frame Description Entry)
781 uint8_t * fde_start = eh + cie_size;
782 p = fde_start;
783 *(uint32_t *)p = (uint32_t)(fde_size - 4);
784 p += 4; // length
785 *(uint32_t *)p = (uint32_t)(p - eh);
786 p += 4; // cie_pointer (back-offset)
787
788 *(void **)p = exec->rx_ptr;
789 p += 8; // pc_begin (absolute)
790 *(uint64_t *)p = (uint64_t)exec->size;
791 p += 8; // pc_range (absolute)
792 *p++ = 0; // aug data len
793
794 // Instructions: match our trampoline prologue
795 if (category == INFIX_EXECUTABLE_REVERSE) {
796 // stp x29, x30, [sp, #-16]!; mov x29, sp
797 *p++ = 0x41; // loc +1 (4 bytes, after stp)
798 *p++ = 0x0e;
799 *p++ = 16; // def_cfa_offset 16
800 *p++ = 0x9d;
801 *p++ = 2; // offset r29 (x29), 2 (CFA - 16)
802 *p++ = 0x9e;
803 *p++ = 1; // offset r30 (x30/lr), 1 (CFA - 8)
804 *p++ = 0x41; // loc +1 (4 bytes, after mov)
805 *p++ = 0x0d;
806 *p++ = 29; // def_cfa_register r29
807 }
808 else {
809 // stp x29, x30, [sp, #-16]!; stp x19, x20, ...; stp x21, x22, ...; mov x29, sp
810 *p++ = 0x41; // after stp x29, x30
811 *p++ = 0x0e;
812 *p++ = 16;
813 *p++ = 0x9d;
814 *p++ = 2; // x29 at CFA - 16
815 *p++ = 0x9e;
816 *p++ = 1; // x30 at CFA - 8
817 *p++ = 0x41; // after stp x19, x20
818 *p++ = 0x0e;
819 *p++ = 32;
820 *p++ = 0x93;
821 *p++ = 4; // x19 at CFA - 32
822 *p++ = 0x94;
823 *p++ = 3; // x20 at CFA - 24
824 *p++ = 0x41; // after stp x21, x22
825 *p++ = 0x0e;
826 *p++ = 48;
827 *p++ = 0x95;
828 *p++ = 6; // x21 at CFA - 48
829 *p++ = 0x96;
830 *p++ = 5; // x22 at CFA - 40
831 *p++ = 0x41; // after mov x29, sp
832 *p++ = 0x0d;
833 *p++ = 29; // def_cfa_register x29 (offset remains 48)
834 }
835
836 while ((size_t)(p - eh) < (cie_size + fde_size))
837 *p++ = 0;
838 *(uint32_t *)p = 0; // Terminator
839
840 // Register the frame with the runtime.
841 extern void __register_frame(void *);
842 pthread_mutex_lock(&g_dwarf_mutex);
843 __register_frame(eh);
844 pthread_mutex_unlock(&g_dwarf_mutex);
845
846 exec->eh_frame_ptr = eh;
847 INFIX_DEBUG_PRINTF("Registered ARM64 DWARF .eh_frame at %p for JIT code at %p", (void *)eh, exec->rx_ptr);
848}
849#endif
850
864 if (exec.size == 0)
865 return;
866#if defined(INFIX_OS_WINDOWS)
867#if defined(INFIX_ARCH_X64) || defined(INFIX_ARCH_AARCH64)
868 if (exec.seh_registration)
869 RtlDeleteFunctionTable((PRUNTIME_FUNCTION)exec.seh_registration);
870#endif
871 if (exec.rw_ptr) {
872 // Change protection to NOACCESS to catch use-after-free bugs immediately.
873 if (!VirtualProtect(exec.rw_ptr, exec.size, PAGE_NOACCESS, &(DWORD){0}))
874 INFIX_DEBUG_PRINTF("WARNING: VirtualProtect failed to set PAGE_NOACCESS guard page.");
875 VirtualFree(exec.rw_ptr, 0, MEM_RELEASE);
876 }
877#elif defined(INFIX_OS_MACOS)
878 // On macOS with MAP_JIT, the memory is managed with special thread-local permissions.
879 // We only need to unmap the single mapping.
880 if (exec.rw_ptr) {
881 // Creating a guard page before unmapping is good practice.
882 mprotect(exec.rw_ptr, exec.size, PROT_NONE);
883 munmap(exec.rw_ptr, exec.size);
884 }
885#elif defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
886 // Other single-mapping POSIX systems.
887 if (exec.rw_ptr) {
888 mprotect(exec.rw_ptr, exec.size, PROT_NONE);
889 munmap(exec.rw_ptr, exec.size);
890 }
891#else
892 // Dual-mapping POSIX: protect and unmap both views.
893 if (exec.eh_frame_ptr) {
894 extern void __deregister_frame(void *);
895 pthread_mutex_lock(&g_dwarf_mutex);
896 __deregister_frame(exec.eh_frame_ptr);
897 pthread_mutex_unlock(&g_dwarf_mutex);
899 }
900 if (exec.rx_ptr)
901 mprotect(exec.rx_ptr, exec.size, PROT_NONE);
902 if (exec.rw_ptr)
903 munmap(exec.rw_ptr, exec.size);
904 if (exec.rx_ptr && exec.rx_ptr != exec.rw_ptr) // rw_ptr might be same as rx_ptr on some platforms
905 munmap(exec.rx_ptr, exec.size);
906 if (exec.shm_fd >= 0)
907 close(exec.shm_fd);
908#endif
909}
930 c23_maybe_unused uint32_t prologue_size,
931 c23_maybe_unused uint32_t epilogue_offset) {
932 if (exec->rw_ptr == nullptr || exec->size == 0)
933 return false;
934
935 // On AArch64 (and other RISC architectures), the instruction and data caches can be
936 // separate. We must explicitly flush the D-cache (where the JIT wrote the code)
937 // and invalidate the I-cache so the CPU fetches the new instructions.
938 // We might as well do it on x64 too.
939#if defined(INFIX_COMPILER_MSVC)
940 // Use the Windows-specific API.
941 FlushInstructionCache(GetCurrentProcess(), exec->rw_ptr, exec->size);
942#elif defined(INFIX_OS_MACOS)
943 // Use the Apple-specific API if available (required for Apple Silicon correctness)
944 if (g_macos_apis.sys_icache_invalidate)
945 g_macos_apis.sys_icache_invalidate(exec->rw_ptr, exec->size);
946 else
947 __builtin___clear_cache((char *)exec->rw_ptr, (char *)exec->rw_ptr + exec->size);
948#elif defined(INFIX_ARCH_AARCH64)
949 // Robust manual cache clearing for AArch64 Linux/BSD.
950 // We clean the D-cache to point of unification and invalidate the I-cache.
951 uintptr_t start = (uintptr_t)exec->rw_ptr;
952 uintptr_t end = start + exec->size;
953 uintptr_t ctr_el0;
954 __asm__ __volatile__("mrs %0, ctr_el0" : "=r"(ctr_el0));
955
956 // D-cache line size is in bits [19:16] as log2 of number of words.
957 uintptr_t d_line_size = 4 << ((ctr_el0 >> 16) & 0xf);
958 for (uintptr_t addr = start & ~(d_line_size - 1); addr < end; addr += d_line_size)
959 __asm__ __volatile__("dc cvau, %0" ::"r"(addr) : "memory");
960 __asm__ __volatile__("dsb ish" ::: "memory");
961
962 // I-cache line size is in bits [3:0] as log2 of number of words.
963 uintptr_t i_line_size = 4 << (ctr_el0 & 0xf);
964 for (uintptr_t addr = start & ~(i_line_size - 1); addr < end; addr += i_line_size)
965 __asm__ __volatile__("ic ivau, %0" ::"r"(addr) : "memory");
966 __asm__ __volatile__("dsb ish\n\tisb" ::: "memory");
967#else
968 // Use the GCC/Clang built-in for other platforms.
969 __builtin___clear_cache((char *)exec->rw_ptr, (char *)exec->rw_ptr + exec->size);
970#endif
971
972 bool result = false;
973#if defined(INFIX_OS_WINDOWS)
974 // On Windows, we register SEH unwind info before making the memory executable.
975#if defined(INFIX_ARCH_X64)
976 _infix_register_seh_windows_x64(exec, category, prologue_size, epilogue_offset);
977#elif defined(INFIX_ARCH_AARCH64)
978 _infix_register_seh_windows_arm64(exec, category, prologue_size, epilogue_offset);
979#endif
980 // Finalize permissions to Read+Execute.
981 // We include the SEH metadata in the protected region.
982 result = VirtualProtect(exec->rw_ptr, exec->size + INFIX_SEH_METADATA_SIZE, PAGE_EXECUTE_READ, &(DWORD){0});
983 if (!result)
985#elif defined(INFIX_OS_MACOS)
986 static bool g_use_secure_jit_path = false;
987 static bool g_checked_jit_support = false;
988 if (!g_checked_jit_support) {
989 g_use_secure_jit_path = has_jit_entitlement();
990 g_checked_jit_support = true;
991 }
992
993 if (g_use_secure_jit_path && g_macos_apis.pthread_jit_write_protect_np) {
994 // Switch thread state to Execute allowed (enabled=1)
995 g_macos_apis.pthread_jit_write_protect_np(1);
996 result = true;
997 }
998 else {
999 result = (mprotect(exec->rw_ptr, exec->size, PROT_READ | PROT_EXEC) == 0);
1000 }
1001 if (!result)
1003#elif defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
1004 // Other single-mapping POSIX platforms use mprotect.
1005 result = (mprotect(exec->rw_ptr, exec->size, PROT_READ | PROT_EXEC) == 0);
1006 if (!result)
1008#else
1009 // Dual-mapping POSIX (Linux, FreeBSD).
1010 // The RX mapping is already executable.
1011#if defined(INFIX_OS_LINUX) && defined(INFIX_ARCH_X64)
1012 _infix_register_eh_frame_linux_x64(exec, category);
1013#elif defined(INFIX_OS_LINUX) && defined(INFIX_ARCH_AARCH64)
1014 _infix_register_eh_frame_arm64(exec, category);
1015#endif
1016 // SECURITY CRITICAL: We MUST unmap the RW view now. If we leave it mapped,
1017 // an attacker with a heap disclosure could find it and overwrite the JIT code,
1018 // bypassing W^X.
1019 if (munmap(exec->rw_ptr, exec->size) == 0) {
1020 exec->rw_ptr = nullptr; // Clear the pointer to prevent double-free or misuse.
1021 result = true;
1022 }
1023 else {
1025 INFIX_CATEGORY_ALLOCATION, INFIX_CODE_PROTECTION_FAILURE, errno, "munmap of RW view failed");
1026 result = false;
1027 }
1028#endif
1029 if (result)
1030 INFIX_DEBUG_PRINTF("Memory at %p is now executable.", exec->rx_ptr);
1031 return result;
1032}
1033// Public API: Protected (Read-Only) Memory
1046 infix_protected_t prot = {.rw_ptr = nullptr, .size = 0};
1047 if (size == 0)
1048 return prot;
1049#if defined(INFIX_OS_WINDOWS)
1050 prot.rw_ptr = VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
1051 if (!prot.rw_ptr)
1053#else
1054#if defined(MAP_ANON)
1055 prot.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1056#else
1057 int fd = open("/dev/zero", O_RDWR);
1058 if (fd == -1)
1059 prot.rw_ptr = MAP_FAILED;
1060 else {
1061 prot.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1062 close(fd);
1063 }
1064#endif
1065 if (prot.rw_ptr == MAP_FAILED) {
1067 prot.rw_ptr = nullptr;
1068 }
1069#endif
1070 if (prot.rw_ptr)
1071 prot.size = size;
1072 return prot;
1073}
1080 if (prot.size == 0)
1081 return;
1082#if defined(INFIX_OS_WINDOWS)
1083 VirtualFree(prot.rw_ptr, 0, MEM_RELEASE);
1084#else
1085 munmap(prot.rw_ptr, prot.size);
1086#endif
1087}
1100 if (prot.size == 0)
1101 return false;
1102 bool result = false;
1103#if defined(INFIX_OS_WINDOWS)
1104 result = VirtualProtect(prot.rw_ptr, prot.size, PAGE_READONLY, &(DWORD){0});
1105 if (!result)
1107#else
1108 result = (mprotect(prot.rw_ptr, prot.size, PROT_READ) == 0);
1109 if (!result)
1111#endif
1112 return result;
1113}
1114// Universal Reverse Call Dispatcher
1138void infix_internal_dispatch_callback_fn_impl(infix_reverse_t * context, void * return_value_ptr, void ** args_array) {
1139 INFIX_DEBUG_PRINTF("Dispatching reverse call. Context: %p, User Fn: %p, ret=%p, args=%p",
1140 (void *)context,
1141 context->user_callback_fn,
1142 return_value_ptr,
1143 (void *)args_array);
1144 if (args_array) {
1145 for (size_t i = 0; i < context->num_args; i++) {
1147 " args[%zu] = %p (val: 0x%04X)", i, args_array[i], args_array[i] ? *(uint16_t *)args_array[i] : 0);
1148 }
1149 }
1150 if (context->user_callback_fn == nullptr) {
1151 // If no handler is set, do nothing. If the function has a return value,
1152 // it's good practice to zero it out to avoid returning garbage.
1153 if (return_value_ptr && context->return_type->size > 0)
1154 infix_memset(return_value_ptr, 0, context->return_type->size);
1155 return;
1156 }
1157 if (context->cached_forward_trampoline != nullptr) {
1158 // Path 1: Type-safe "callback". Use the pre-generated forward trampoline to
1159 // call the user's C function with the correct signature. This is efficient
1160 // and provides a clean interface for the C developer.
1162 cif_func(return_value_ptr, args_array);
1163 }
1164 else {
1165 // Path 2: Generic "closure". Directly call the user's generic handler.
1166 // This path is more flexible and is intended for language bindings where the
1167 // handler needs access to the context and raw argument pointers.
1169 handler(context, return_value_ptr, args_array);
1170 }
1171 INFIX_DEBUG_PRINTF("Exiting reverse call dispatcher.");
1172}
clock_t start
Definition 901_call_overhead.c:48
clock_t end
Definition 901_call_overhead.c:48
char * p
Definition 904_registry_benchmark.c:25
#define c23_nodiscard
Internal alias for the public INFIX_NODISCARD macro.
Definition compat_c23.h:92
#define c23_maybe_unused
A compatibility macro for the C23 [[maybe_unused]] attribute.
Definition compat_c23.h:156
c23_nodiscard bool infix_executable_make_executable(infix_executable_t *exec, c23_maybe_unused infix_executable_category_t category, c23_maybe_unused uint32_t prologue_size, c23_maybe_unused uint32_t epilogue_offset)
Definition executor.c:928
void infix_protected_free(infix_protected_t prot)
Frees a block of protected memory.
Definition executor.c:1079
void infix_executable_free(infix_executable_t exec)
Frees a block of executable memory and applies guard pages to prevent use-after-free.
Definition executor.c:863
static int create_anonymous_file(void)
Definition executor.c:231
c23_nodiscard infix_protected_t infix_protected_alloc(size_t size)
Allocates a block of standard memory for later protection.
Definition executor.c:1045
c23_nodiscard infix_executable_t infix_executable_alloc(size_t size)
Allocates a block of executable memory using the platform's W^X strategy.
Definition executor.c:280
#define INFIX_SEH_METADATA_SIZE
Definition executor.c:106
c23_nodiscard bool infix_protected_make_readonly(infix_protected_t prot)
Makes a block of memory read-only for security hardening.
Definition executor.c:1099
void infix_internal_dispatch_callback_fn_impl(infix_reverse_t *context, void *return_value_ptr, void **args_array)
The universal C entry point for all reverse call trampolines.
Definition executor.c:1138
@ INFIX_CODE_PROTECTION_FAILURE
Definition infix.h:1371
@ INFIX_CODE_EXECUTABLE_MEMORY_FAILURE
Definition infix.h:1370
@ INFIX_CODE_NATIVE_EXCEPTION
Definition infix.h:1366
@ INFIX_CODE_OUT_OF_MEMORY
Definition infix.h:1369
@ INFIX_CATEGORY_ABI
Definition infix.h:1355
@ INFIX_CATEGORY_ALLOCATION
Definition infix.h:1353
void(* infix_cif_func)(void *, void **)
A function pointer type for a bound forward trampoline.
Definition infix.h:418
size_t size
Definition infix.h:277
void(* infix_closure_handler_fn)(infix_context_t *, void *, void **)
A function pointer type for a generic closure handler.
Definition infix.h:430
INFIX_API INFIX_NODISCARD infix_cif_func infix_forward_get_code(infix_forward_t *)
Gets the callable function pointer from a bound forward trampoline.
Definition trampoline.c:283
#define infix_free
A macro that can be defined to override the default free function.
Definition infix.h:383
#define infix_memcpy
A macro that can be defined to override the default memcpy function.
Definition infix.h:387
#define infix_memset
A macro that can be defined to override the default memset function.
Definition infix.h:391
#define infix_malloc
A macro that can be defined to override the default malloc function.
Definition infix.h:371
Internal data structures, function prototypes, and constants.
infix_executable_category_t
Definition infix_internals.h:698
@ INFIX_EXECUTABLE_REVERSE
Definition infix_internals.h:701
@ INFIX_EXECUTABLE_SAFE_FORWARD
Definition infix_internals.h:700
static size_t _infix_align_up(size_t value, size_t alignment)
Aligns a value up to the next multiple of a power-of-two alignment.
Definition infix_internals.h:767
INFIX_INTERNAL void _infix_set_system_error(infix_error_category_t category, infix_error_code_t code, long system_code, const char *msg)
Sets the thread-local error state for a system-level error.
Definition error.c:244
INFIX_INTERNAL void _infix_set_error(infix_error_category_t category, infix_error_code_t code, size_t position)
Sets the thread-local error state with detailed information.
Definition error.c:175
Internal representation of an executable memory block for JIT code.
Definition infix_internals.h:56
size_t size
Definition infix_internals.h:66
void * rw_ptr
Definition infix_internals.h:65
void * rx_ptr
Definition infix_internals.h:64
void * eh_frame_ptr
Definition infix_internals.h:62
int shm_fd
Definition infix_internals.h:61
Internal representation of a memory block that will be made read-only.
Definition infix_internals.h:77
size_t size
Definition infix_internals.h:79
void * rw_ptr
Definition infix_internals.h:78
Internal definition of a reverse trampoline (callback/closure) handle.
Definition infix_internals.h:119
infix_type * return_type
Definition infix_internals.h:123
void * user_callback_fn
Definition infix_internals.h:128
size_t num_args
Definition infix_internals.h:125
infix_forward_t * cached_forward_trampoline
Definition infix_internals.h:133
A header for conditionally compiled debugging utilities.
#define INFIX_DEBUG_PRINTF(...)
Definition utility.h:97