infix
A JIT-Powered FFI Library for C
Loading...
Searching...
No Matches
executor.c
Go to the documentation of this file.
1
38#include "common/utility.h"
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <time.h>
43// Platform-Specific Includes
44#if defined(INFIX_OS_WINDOWS)
45#include <windows.h>
46#else
47#include <errno.h>
48#include <fcntl.h>
49#include <sys/mman.h>
50#include <sys/types.h>
51#include <unistd.h>
52#endif
53#if defined(INFIX_OS_MACOS)
54#include <dlfcn.h>
55#include <libkern/OSCacheControl.h>
56#include <pthread.h>
57#endif
58// Polyfills for mmap flags for maximum POSIX compatibility.
59#if defined(INFIX_ENV_POSIX) && !defined(INFIX_OS_WINDOWS)
60#if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
61#define MAP_ANON MAP_ANONYMOUS
62#endif
63#endif
64// macOS JIT Security Hardening Logic
65#if defined(INFIX_OS_MACOS)
80typedef const struct __CFString * CFStringRef;
81typedef const void * CFTypeRef;
82typedef struct __SecTask * SecTaskRef;
83typedef struct __CFError * CFErrorRef;
84#define kCFStringEncodingUTF8 0x08000100
85// A struct to hold dynamically loaded function pointers from macOS frameworks.
86static struct {
87 void (*CFRelease)(CFTypeRef);
88 bool (*CFBooleanGetValue)(CFTypeRef boolean);
89 CFStringRef (*CFStringCreateWithCString)(CFTypeRef allocator, const char * cStr, uint32_t encoding);
90 CFTypeRef kCFAllocatorDefault;
91 SecTaskRef (*SecTaskCreateFromSelf)(CFTypeRef allocator);
92 CFTypeRef (*SecTaskCopyValueForEntitlement)(SecTaskRef task, CFStringRef entitlement, CFErrorRef * error);
93 void (*pthread_jit_write_protect_np)(int enabled);
94 void (*sys_icache_invalidate)(void * start, size_t len);
95} g_macos_apis;
103static void initialize_macos_apis(void) {
104 // We don't need to link against these frameworks, which makes building simpler.
105 void * cf = dlopen("/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", RTLD_LAZY);
106 void * sec = dlopen("/System/Library/Frameworks/Security.framework/Security", RTLD_LAZY);
107
108 // Hardened Runtime helpers found in libSystem/libpthread
109 g_macos_apis.pthread_jit_write_protect_np = dlsym(RTLD_DEFAULT, "pthread_jit_write_protect_np");
110 g_macos_apis.sys_icache_invalidate = dlsym(RTLD_DEFAULT, "sys_icache_invalidate");
111
112 if (!cf || !sec) {
113 INFIX_DEBUG_PRINTF("Warning: Could not dlopen macOS frameworks. JIT security features will be degraded.");
114 if (cf)
115 dlclose(cf);
116 if (sec)
117 dlclose(sec);
118 return;
119 }
120 g_macos_apis.CFRelease = dlsym(cf, "CFRelease");
121 g_macos_apis.CFBooleanGetValue = dlsym(cf, "CFBooleanGetValue");
122 g_macos_apis.CFStringCreateWithCString = dlsym(cf, "CFStringCreateWithCString");
123 void ** pAlloc = (void **)dlsym(cf, "kCFAllocatorDefault");
124 if (pAlloc)
125 g_macos_apis.kCFAllocatorDefault = *pAlloc;
126 g_macos_apis.SecTaskCreateFromSelf = dlsym(sec, "SecTaskCreateFromSelf");
127 g_macos_apis.SecTaskCopyValueForEntitlement = dlsym(sec, "SecTaskCopyValueForEntitlement");
128 dlclose(cf);
129 dlclose(sec);
130}
136static bool has_jit_entitlement(void) {
137 // Use pthread_once to ensure the dynamic loading happens exactly once, thread-safely.
138 static pthread_once_t init_once = PTHREAD_ONCE_INIT;
139 pthread_once(&init_once, initialize_macos_apis);
140
141 // Secure JIT path on macOS requires both the entitlement check and the toggle API.
142 if (!g_macos_apis.pthread_jit_write_protect_np)
143 return false;
144
145 if (!g_macos_apis.SecTaskCopyValueForEntitlement || !g_macos_apis.CFStringCreateWithCString)
146 return false;
147 bool result = false;
148 SecTaskRef task = g_macos_apis.SecTaskCreateFromSelf(g_macos_apis.kCFAllocatorDefault);
149 if (!task)
150 return false;
151 CFStringRef key = g_macos_apis.CFStringCreateWithCString(
152 g_macos_apis.kCFAllocatorDefault, "com.apple.security.cs.allow-jit", kCFStringEncodingUTF8);
153 CFTypeRef value = nullptr;
154 if (key) {
155 // This is the core check: ask the system for the value of the entitlement.
156 value = g_macos_apis.SecTaskCopyValueForEntitlement(task, key, nullptr);
157 g_macos_apis.CFRelease(key);
158 }
159 g_macos_apis.CFRelease(task);
160 if (value) {
161 // The value of the entitlement is a CFBoolean, so we must extract its value.
162 if (g_macos_apis.CFBooleanGetValue && g_macos_apis.CFBooleanGetValue(value))
163 result = true;
164 g_macos_apis.CFRelease(value);
165 }
166 return result;
167}
168#endif // INFIX_OS_MACOS
169// Hardened POSIX Anonymous Shared Memory Allocator (for Dual-Mapping W^X)
170#if !defined(INFIX_OS_WINDOWS) && !defined(INFIX_OS_MACOS) && !defined(INFIX_OS_ANDROID) && !defined(INFIX_OS_OPENBSD)
171#include <fcntl.h>
172#include <stdint.h>
173#if defined(__linux__) && defined(_GNU_SOURCE)
174#include <sys/syscall.h>
175#endif
176
186static int create_anonymous_file(void) {
187#if defined(__linux__) && defined(MFD_CLOEXEC)
188 // Strategy 1: memfd_create (Linux 3.17+)
189 // MFD_CLOEXEC ensures the FD isn't leaked to child processes.
190 int linux_fd = memfd_create("infix_jit", MFD_CLOEXEC);
191 if (linux_fd >= 0)
192 return linux_fd;
193 // If it fails (e.g. old kernel, ENOSYS), fall through to shm_open.
194#endif
195
196#if defined(__FreeBSD__) && defined(SHM_ANON)
197 // Strategy 2: SHM_ANON (FreeBSD)
198 int bsd_fd = shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, 0600);
199 if (bsd_fd >= 0)
200 return bsd_fd;
201#endif
202
203 // Strategy 3: shm_open with randomized name (Legacy POSIX)
204 char shm_name[64];
205 uint64_t random_val = 0;
206 // Generate a sufficiently random name to avoid collisions if multiple processes
207 // are running this code simultaneously. Using /dev/urandom is a robust way to do this.
208 int rand_fd = open("/dev/urandom", O_RDONLY);
209 if (rand_fd < 0)
210 return -1;
211 ssize_t bytes_read = read(rand_fd, &random_val, sizeof(random_val));
212 close(rand_fd);
213 if (bytes_read != sizeof(random_val))
214 return -1;
215
216 snprintf(shm_name, sizeof(shm_name), "/infix-jit-%d-%llx", getpid(), (unsigned long long)random_val);
217 // Create the shared memory object exclusively.
218 int fd = shm_open(shm_name, O_RDWR | O_CREAT | O_EXCL, 0600);
219 if (fd >= 0) {
220 // Unlink immediately. The name is removed, but the inode persists until close().
221 shm_unlink(shm_name);
222 return fd;
223 }
224 return -1;
225}
226#endif
227// Public API: Executable Memory Management
236#if defined(INFIX_OS_WINDOWS)
237 infix_executable_t exec = {.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .handle = nullptr};
238#else
239 infix_executable_t exec = {.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .shm_fd = -1};
240#endif
241 if (size == 0)
242 return exec;
243#if defined(INFIX_OS_WINDOWS)
244 // Windows: Single-mapping W^X. Allocate as RW, later change to RX via VirtualProtect.
245 void * code = VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
246 if (code == nullptr) {
249 return exec;
250 }
251 exec.rw_ptr = code;
252 exec.rx_ptr = code;
253#elif defined(INFIX_OS_MACOS) || defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
254 // Single-mapping POSIX platforms. Allocate as RW, later change to RX via mprotect.
255 void * code = MAP_FAILED;
256#if defined(MAP_ANON)
257 int flags = MAP_PRIVATE | MAP_ANON;
258#if defined(INFIX_OS_MACOS)
259 // On macOS, we perform a one-time check for JIT support.
260 static bool g_use_secure_jit_path = false;
261 static bool g_checked_jit_support = false;
262 if (!g_checked_jit_support) {
263 g_use_secure_jit_path = has_jit_entitlement();
264 INFIX_DEBUG_PRINTF("macOS JIT check: Entitlement found = %s. Using %s API.",
265 g_use_secure_jit_path ? "yes" : "no",
266 g_use_secure_jit_path ? "secure (MAP_JIT)" : "legacy (mprotect)");
267 g_checked_jit_support = true;
268 }
269 // If entitled, use the modern, more secure MAP_JIT flag.
270 if (g_use_secure_jit_path)
271 flags |= MAP_JIT;
272#endif // INFIX_OS_MACOS
273 code = mmap(nullptr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
274#if defined(INFIX_OS_MACOS)
275 if (code != MAP_FAILED && g_use_secure_jit_path) {
276 // Switch thread to Write mode. enabled=0 means Write allowed.
277 g_macos_apis.pthread_jit_write_protect_np(0);
278 }
279#endif
280#endif // MAP_ANON
281 if (code == MAP_FAILED) { // Fallback for older systems without MAP_ANON
282 int fd = open("/dev/zero", O_RDWR);
283 if (fd != -1) {
284 code = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
285 close(fd);
286 }
287 }
288 if (code == MAP_FAILED) {
290 return exec;
291 }
292 exec.rw_ptr = code;
293 exec.rx_ptr = code;
294#else
295 // Dual-mapping POSIX platforms (e.g., Linux, FreeBSD). Create two separate views of the same memory.
297 if (exec.shm_fd < 0) {
299 INFIX_CATEGORY_ALLOCATION, INFIX_CODE_EXECUTABLE_MEMORY_FAILURE, errno, "create_anonymous_file failed");
300 return exec;
301 }
302 if (ftruncate(exec.shm_fd, size) != 0) {
305 close(exec.shm_fd);
306 exec.shm_fd = -1; // Ensure clean state
307 return exec;
308 }
309 // The RW mapping.
310 exec.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, exec.shm_fd, 0);
311 // The RX mapping of the exact same physical memory.
312 exec.rx_ptr = mmap(nullptr, size, PROT_READ | PROT_EXEC, MAP_SHARED, exec.shm_fd, 0);
313 // If either mapping fails, clean up both and return an error.
314 if (exec.rw_ptr == MAP_FAILED || exec.rx_ptr == MAP_FAILED) {
315 int err = errno; // Capture errno before cleanup
316 if (exec.rw_ptr != MAP_FAILED)
317 munmap(exec.rw_ptr, size);
318 if (exec.rx_ptr != MAP_FAILED)
319 munmap(exec.rx_ptr, size);
320 close(exec.shm_fd);
322 return (infix_executable_t){.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .shm_fd = -1};
323 }
324
325 // The mmap mappings hold a reference to the shared memory object, so we don't
326 // need the FD anymore. Keeping it open consumes a file descriptor per trampoline,
327 // causing "shm_open failed" after ~1024 trampolines.
328 close(exec.shm_fd);
329 exec.shm_fd = -1;
330#endif
331 exec.size = size;
332 INFIX_DEBUG_PRINTF("Allocated JIT memory. RW at %p, RX at %p", exec.rw_ptr, exec.rx_ptr);
333 return exec;
334}
348 if (exec.size == 0)
349 return;
350#if defined(INFIX_OS_WINDOWS)
351 if (exec.rw_ptr) {
352 // Change protection to NOACCESS to catch use-after-free bugs immediately.
353 if (!VirtualProtect(exec.rw_ptr, exec.size, PAGE_NOACCESS, &(DWORD){0}))
354 INFIX_DEBUG_PRINTF("WARNING: VirtualProtect failed to set PAGE_NOACCESS guard page.");
355 VirtualFree(exec.rw_ptr, 0, MEM_RELEASE);
356 }
357#elif defined(INFIX_OS_MACOS)
358 // On macOS with MAP_JIT, the memory is managed with special thread-local permissions.
359 // We only need to unmap the single mapping.
360 if (exec.rw_ptr) {
361 // Creating a guard page before unmapping is good practice.
362 mprotect(exec.rw_ptr, exec.size, PROT_NONE);
363 munmap(exec.rw_ptr, exec.size);
364 }
365#elif defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
366 // Other single-mapping POSIX systems.
367 if (exec.rw_ptr) {
368 mprotect(exec.rw_ptr, exec.size, PROT_NONE);
369 munmap(exec.rw_ptr, exec.size);
370 }
371#else
372 // Dual-mapping POSIX: protect and unmap both views.
373 if (exec.rx_ptr)
374 mprotect(exec.rx_ptr, exec.size, PROT_NONE);
375 if (exec.rw_ptr)
376 munmap(exec.rw_ptr, exec.size);
377 if (exec.rx_ptr && exec.rx_ptr != exec.rw_ptr) // rw_ptr might be same as rx_ptr on some platforms
378 munmap(exec.rx_ptr, exec.size);
379 if (exec.shm_fd >= 0)
380 close(exec.shm_fd);
381#endif
382}
400 if (exec->rw_ptr == nullptr || exec->size == 0)
401 return false;
402 // On AArch64 (and other RISC architectures), the instruction and data caches can be
403 // separate. We must explicitly flush the D-cache (where the JIT wrote the code)
404 // and invalidate the I-cache so the CPU fetches the new instructions.
405 // We might as well do it on x64 too.
406#if defined(_MSC_VER)
407 // Use the Windows-specific API.
408 FlushInstructionCache(GetCurrentProcess(), exec->rw_ptr, exec->size);
409#elif defined(INFIX_OS_MACOS)
410 // Use the Apple-specific API if available (required for Apple Silicon correctness)
411 if (g_macos_apis.sys_icache_invalidate)
412 g_macos_apis.sys_icache_invalidate(exec->rw_ptr, exec->size);
413 else
414 __builtin___clear_cache((char *)exec->rw_ptr, (char *)exec->rw_ptr + exec->size);
415#else
416 // Use the GCC/Clang built-in for other platforms.
417 __builtin___clear_cache((char *)exec->rw_ptr, (char *)exec->rw_ptr + exec->size);
418#endif
419 bool result = false;
420#if defined(INFIX_OS_WINDOWS)
421 // Finalize permissions to Read+Execute.
422 result = VirtualProtect(exec->rw_ptr, exec->size, PAGE_EXECUTE_READ, &(DWORD){0});
423 if (!result)
425#elif defined(INFIX_OS_MACOS)
426 static bool g_use_secure_jit_path = false;
427 static bool g_checked_jit_support = false;
428 if (!g_checked_jit_support) {
429 g_use_secure_jit_path = has_jit_entitlement();
430 g_checked_jit_support = true;
431 }
432
433 if (g_use_secure_jit_path && g_macos_apis.pthread_jit_write_protect_np) {
434 // Switch thread state to Execute allowed (enabled=1)
435 g_macos_apis.pthread_jit_write_protect_np(1);
436 result = true;
437 }
438 else {
439 result = (mprotect(exec->rw_ptr, exec->size, PROT_READ | PROT_EXEC) == 0);
440 }
441 if (!result)
443#elif defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
444 // Other single-mapping POSIX platforms use mprotect.
445 result = (mprotect(exec->rw_ptr, exec->size, PROT_READ | PROT_EXEC) == 0);
446 if (!result)
448#else
449 // Dual-mapping POSIX (Linux, FreeBSD).
450 // The RX mapping is already executable.
451 // SECURITY CRITICAL: We MUST unmap the RW view now. If we leave it mapped,
452 // an attacker with a heap disclosure could find it and overwrite the JIT code,
453 // bypassing W^X.
454 if (munmap(exec->rw_ptr, exec->size) == 0) {
455 exec->rw_ptr = nullptr; // Clear the pointer to prevent double-free or misuse.
456 result = true;
457 }
458 else {
460 INFIX_CATEGORY_ALLOCATION, INFIX_CODE_PROTECTION_FAILURE, errno, "munmap of RW view failed");
461 result = false;
462 }
463#endif
464 if (result)
465 INFIX_DEBUG_PRINTF("Memory at %p is now executable.", exec->rx_ptr);
466 return result;
467}
468// Public API: Protected (Read-Only) Memory
481 infix_protected_t prot = {.rw_ptr = nullptr, .size = 0};
482 if (size == 0)
483 return prot;
484#if defined(INFIX_OS_WINDOWS)
485 prot.rw_ptr = VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
486 if (!prot.rw_ptr)
488#else
489#if defined(MAP_ANON)
490 prot.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
491#else
492 int fd = open("/dev/zero", O_RDWR);
493 if (fd == -1)
494 prot.rw_ptr = MAP_FAILED;
495 else {
496 prot.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
497 close(fd);
498 }
499#endif
500 if (prot.rw_ptr == MAP_FAILED) {
502 prot.rw_ptr = nullptr;
503 }
504#endif
505 if (prot.rw_ptr)
506 prot.size = size;
507 return prot;
508}
515 if (prot.size == 0)
516 return;
517#if defined(INFIX_OS_WINDOWS)
518 VirtualFree(prot.rw_ptr, 0, MEM_RELEASE);
519#else
520 munmap(prot.rw_ptr, prot.size);
521#endif
522}
535 if (prot.size == 0)
536 return false;
537 bool result = false;
538#if defined(INFIX_OS_WINDOWS)
539 result = VirtualProtect(prot.rw_ptr, prot.size, PAGE_READONLY, &(DWORD){0});
540 if (!result)
542#else
543 result = (mprotect(prot.rw_ptr, prot.size, PROT_READ) == 0);
544 if (!result)
546#endif
547 return result;
548}
549// Universal Reverse Call Dispatcher
573void infix_internal_dispatch_callback_fn_impl(infix_reverse_t * context, void * return_value_ptr, void ** args_array) {
575 "Dispatching reverse call. Context: %p, User Fn: %p", (void *)context, context->user_callback_fn);
576 if (context->user_callback_fn == nullptr) {
577 // If no handler is set, do nothing. If the function has a return value,
578 // it's good practice to zero it out to avoid returning garbage.
579 if (return_value_ptr && context->return_type->size > 0)
580 infix_memset(return_value_ptr, 0, context->return_type->size);
581 return;
582 }
583 if (context->cached_forward_trampoline != nullptr) {
584 // Path 1: Type-safe "callback". Use the pre-generated forward trampoline to
585 // call the user's C function with the correct signature. This is efficient
586 // and provides a clean interface for the C developer.
588 cif_func(return_value_ptr, args_array);
589 }
590 else {
591 // Path 2: Generic "closure". Directly call the user's generic handler.
592 // This path is more flexible and is intended for language bindings where the
593 // handler needs access to the context and raw argument pointers.
595 handler(context, return_value_ptr, args_array);
596 }
597 INFIX_DEBUG_PRINTF("Exiting reverse call dispatcher.");
598}
clock_t start
Definition 901_call_overhead.c:48
#define c23_nodiscard
Internal alias for the public INFIX_NODISCARD macro.
Definition compat_c23.h:91
void infix_protected_free(infix_protected_t prot)
Frees a block of protected memory.
Definition executor.c:514
void infix_executable_free(infix_executable_t exec)
Frees a block of executable memory and applies guard pages to prevent use-after-free.
Definition executor.c:347
static int create_anonymous_file(void)
Definition executor.c:186
c23_nodiscard infix_protected_t infix_protected_alloc(size_t size)
Allocates a block of standard memory for later protection.
Definition executor.c:480
c23_nodiscard infix_executable_t infix_executable_alloc(size_t size)
Allocates a block of executable memory using the platform's W^X strategy.
Definition executor.c:235
c23_nodiscard bool infix_executable_make_executable(infix_executable_t *exec)
Makes a block of JIT memory executable, completing the W^X process.
Definition executor.c:399
c23_nodiscard bool infix_protected_make_readonly(infix_protected_t prot)
Makes a block of memory read-only for security hardening.
Definition executor.c:534
void infix_internal_dispatch_callback_fn_impl(infix_reverse_t *context, void *return_value_ptr, void **args_array)
The universal C entry point for all reverse call trampolines.
Definition executor.c:573
@ INFIX_CODE_PROTECTION_FAILURE
Definition infix.h:1352
@ INFIX_CODE_EXECUTABLE_MEMORY_FAILURE
Definition infix.h:1351
@ INFIX_CODE_OUT_OF_MEMORY
Definition infix.h:1350
@ INFIX_CATEGORY_ALLOCATION
Definition infix.h:1335
void(* infix_cif_func)(void *, void **)
A function pointer type for a bound forward trampoline.
Definition infix.h:417
size_t size
Definition infix.h:276
void(* infix_closure_handler_fn)(infix_context_t *, void *, void **)
A function pointer type for a generic closure handler.
Definition infix.h:429
INFIX_API INFIX_NODISCARD infix_cif_func infix_forward_get_code(infix_forward_t *)
Gets the callable function pointer from a bound forward trampoline.
Definition trampoline.c:283
#define infix_memset
A macro that can be defined to override the default memset function.
Definition infix.h:390
Internal data structures, function prototypes, and constants.
INFIX_INTERNAL void _infix_set_system_error(infix_error_category_t category, infix_error_code_t code, long system_code, const char *msg)
Sets the thread-local error state for a system-level error.
Definition error.c:242
Internal representation of an executable memory block for JIT code.
Definition infix_internals.h:56
size_t size
Definition infix_internals.h:64
void * rw_ptr
Definition infix_internals.h:63
void * rx_ptr
Definition infix_internals.h:62
int shm_fd
Definition infix_internals.h:60
Internal representation of a memory block that will be made read-only.
Definition infix_internals.h:75
size_t size
Definition infix_internals.h:77
void * rw_ptr
Definition infix_internals.h:76
Internal definition of a reverse trampoline (callback/closure) handle.
Definition infix_internals.h:114
infix_type * return_type
Definition infix_internals.h:118
void * user_callback_fn
Definition infix_internals.h:123
infix_forward_t * cached_forward_trampoline
Definition infix_internals.h:128
A header for conditionally compiled debugging utilities.
#define INFIX_DEBUG_PRINTF(...)
Definition utility.h:116