infix
A JIT-Powered FFI Library for C
Loading...
Searching...
No Matches
executor.c
Go to the documentation of this file.
1
40#include "common/utility.h"
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <time.h>
45
46// Platform-Specific Includes
47#if defined(INFIX_OS_WINDOWS)
48#include <windows.h>
49#else
50#include <errno.h>
51#include <fcntl.h>
52#include <sys/mman.h>
53#include <sys/types.h>
54#include <unistd.h>
55#endif
56
57#if defined(INFIX_OS_MACOS)
58#include <dlfcn.h>
59#include <pthread.h>
60#endif
61
62// Polyfills for mmap flags for maximum POSIX compatibility.
63#if defined(INFIX_ENV_POSIX) && !defined(INFIX_OS_WINDOWS)
64#if !defined(MAP_ANON) && defined(MAP_ANONYMOUS)
65#define MAP_ANON MAP_ANONYMOUS
66#endif
67#endif
68
69// macOS JIT Security Hardening Logic
70
71#if defined(INFIX_OS_MACOS)
86typedef const struct __CFString * CFStringRef;
87typedef const void * CFTypeRef;
88typedef struct __SecTask * SecTaskRef;
89typedef struct __CFError * CFErrorRef;
90#define kCFStringEncodingUTF8 0x08000100
91
92// A struct to hold dynamically loaded function pointers from macOS frameworks.
93static struct {
94 void (*CFRelease)(CFTypeRef);
95 bool (*CFBooleanGetValue)(CFTypeRef boolean);
96 CFStringRef (*CFStringCreateWithCString)(CFTypeRef allocator, const char * cStr, uint32_t encoding);
97 CFTypeRef kCFAllocatorDefault;
98 SecTaskRef (*SecTaskCreateFromSelf)(CFTypeRef allocator);
99 CFTypeRef (*SecTaskCopyValueForEntitlement)(SecTaskRef task, CFStringRef entitlement, CFErrorRef * error);
100} g_macos_apis;
101
109static void initialize_macos_apis(void) {
110 // We don't need to link against these frameworks, which makes building simpler.
111 void * cf = dlopen("/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation", RTLD_LAZY);
112 void * sec = dlopen("/System/Library/Frameworks/Security.framework/Security", RTLD_LAZY);
113
114 if (!cf || !sec) {
115 INFIX_DEBUG_PRINTF("Warning: Could not dlopen macOS frameworks. JIT security features will be degraded.");
116 if (cf)
117 dlclose(cf);
118 if (sec)
119 dlclose(sec);
120
121 memset(&g_macos_apis, 0, sizeof(g_macos_apis));
122 return;
123 }
124
125 g_macos_apis.CFRelease = dlsym(cf, "CFRelease");
126 g_macos_apis.CFBooleanGetValue = dlsym(cf, "CFBooleanGetValue");
127 g_macos_apis.CFStringCreateWithCString = dlsym(cf, "CFStringCreateWithCString");
128 void ** pAlloc = (void **)dlsym(cf, "kCFAllocatorDefault");
129 if (pAlloc)
130 g_macos_apis.kCFAllocatorDefault = *pAlloc;
131
132 g_macos_apis.SecTaskCreateFromSelf = dlsym(sec, "SecTaskCreateFromSelf");
133 g_macos_apis.SecTaskCopyValueForEntitlement = dlsym(sec, "SecTaskCopyValueForEntitlement");
134
135 dlclose(cf);
136 dlclose(sec);
137}
138
144static bool has_jit_entitlement(void) {
145 // Use pthread_once to ensure the dynamic loading happens exactly once, thread-safely.
146 static pthread_once_t init_once = PTHREAD_ONCE_INIT;
147 pthread_once(&init_once, initialize_macos_apis);
148
149 if (!g_macos_apis.SecTaskCopyValueForEntitlement || !g_macos_apis.CFStringCreateWithCString)
150 return false;
151
152 bool result = false;
153
154 SecTaskRef task = g_macos_apis.SecTaskCreateFromSelf(g_macos_apis.kCFAllocatorDefault);
155 if (!task)
156 return false;
157
158 CFStringRef key = g_macos_apis.CFStringCreateWithCString(
159 g_macos_apis.kCFAllocatorDefault, "com.apple.security.cs.allow-jit", kCFStringEncodingUTF8);
160 CFTypeRef value = nullptr;
161 if (key) {
162 // This is the core check: ask the system for the value of the entitlement.
163 value = g_macos_apis.SecTaskCopyValueForEntitlement(task, key, nullptr);
164 g_macos_apis.CFRelease(key);
165 }
166 g_macos_apis.CFRelease(task);
167
168 if (value) {
169 // The value of the entitlement is a CFBoolean, so we must extract its value.
170 if (g_macos_apis.CFBooleanGetValue && g_macos_apis.CFBooleanGetValue(value))
171 result = true;
172 g_macos_apis.CFRelease(value);
173 }
174 return result;
175}
176#endif // INFIX_OS_MACOS
177
178// Hardened POSIX Anonymous Shared Memory Allocator (for Dual-Mapping W^X)
179
180#if !defined(INFIX_OS_WINDOWS) && !defined(INFIX_OS_MACOS) && !defined(INFIX_OS_ANDROID) && !defined(INFIX_OS_OPENBSD)
181#include <fcntl.h>
182#include <stdint.h>
195static int shm_open_anonymous() {
196 char shm_name[64];
197 uint64_t random_val = 0;
198
199 // Generate a sufficiently random name to avoid collisions if multiple processes
200 // are running this code simultaneously. Using /dev/urandom is a robust way to do this.
201 int rand_fd = open("/dev/urandom", O_RDONLY);
202 if (rand_fd < 0)
203 return -1;
204
205 ssize_t bytes_read = read(rand_fd, &random_val, sizeof(random_val));
206 close(rand_fd);
207 if (bytes_read != sizeof(random_val))
208 return -1;
209
210 snprintf(shm_name, sizeof(shm_name), "/infix-jit-%d-%llx", getpid(), (unsigned long long)random_val);
211
212 // Create the shared memory object exclusively.
213 int fd = shm_open(shm_name, O_RDWR | O_CREAT | O_EXCL, 0600);
214 if (fd >= 0) {
215 // Unlink immediately. The file descriptor remains valid, but the name is removed.
216 // This ensures the kernel will clean up the memory object when the last fd is closed.
217 shm_unlink(shm_name);
218 return fd;
219 }
220
221 return -1;
222}
223#endif
224
225// Public API: Executable Memory Management
226
235#if defined(INFIX_OS_WINDOWS)
236 infix_executable_t exec = {.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .handle = nullptr};
237#else
238 infix_executable_t exec = {.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .shm_fd = -1};
239#endif
240
241 if (size == 0)
242 return exec;
243
244#if defined(INFIX_OS_WINDOWS)
245 // Windows: Single-mapping W^X. Allocate as RW, later change to RX via VirtualProtect.
246 void * code = VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
247 if (code == nullptr)
248 return exec;
249 exec.rw_ptr = code;
250 exec.rx_ptr = code;
251
252#elif defined(INFIX_OS_MACOS) || defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
253 // Single-mapping POSIX platforms. Allocate as RW, later change to RX via mprotect.
254 void * code = MAP_FAILED;
255#if defined(MAP_ANON)
256 int flags = MAP_PRIVATE | MAP_ANON;
257#if defined(INFIX_OS_MACOS)
258 // On macOS, we perform a one-time check for JIT support.
259 static bool g_use_secure_jit_path = false;
260 static bool g_checked_jit_support = false;
261 if (!g_checked_jit_support) {
262 g_use_secure_jit_path = has_jit_entitlement();
263 INFIX_DEBUG_PRINTF("macOS JIT check: Entitlement found = %s. Using %s API.",
264 g_use_secure_jit_path ? "yes" : "no",
265 g_use_secure_jit_path ? "secure (MAP_JIT)" : "legacy (mprotect)");
266 g_checked_jit_support = true;
267 }
268 // If entitled, use the modern, more secure MAP_JIT flag.
269 if (g_use_secure_jit_path)
270 flags |= MAP_JIT;
271#endif // INFIX_OS_MACOS
272 code = mmap(nullptr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
273#endif // MAP_ANON
274
275 if (code == MAP_FAILED) { // Fallback for older systems without MAP_ANON
276 int fd = open("/dev/zero", O_RDWR);
277 if (fd != -1) {
278 code = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
279 close(fd);
280 }
281 }
282 if (code == MAP_FAILED)
283 return exec;
284 exec.rw_ptr = code;
285 exec.rx_ptr = code;
286
287#else
288 // Dual-mapping POSIX platforms (e.g., Linux, FreeBSD). Create two separate views of the same memory.
289 exec.shm_fd = shm_open_anonymous();
290 if (exec.shm_fd < 0)
291 return exec;
292 if (ftruncate(exec.shm_fd, size) != 0) {
293 close(exec.shm_fd);
294 return exec;
295 }
296 // The RW mapping.
297 exec.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, exec.shm_fd, 0);
298 // The RX mapping of the exact same physical memory.
299 exec.rx_ptr = mmap(nullptr, size, PROT_READ | PROT_EXEC, MAP_SHARED, exec.shm_fd, 0);
300
301 // If either mapping fails, clean up both and return an error.
302 if (exec.rw_ptr == MAP_FAILED || exec.rx_ptr == MAP_FAILED) {
303 if (exec.rw_ptr != MAP_FAILED)
304 munmap(exec.rw_ptr, size);
305 if (exec.rx_ptr != MAP_FAILED)
306 munmap(exec.rx_ptr, size);
307 close(exec.shm_fd);
308 return (infix_executable_t){.rx_ptr = nullptr, .rw_ptr = nullptr, .size = 0, .shm_fd = -1};
309 }
310#endif
311
312 exec.size = size;
313 INFIX_DEBUG_PRINTF("Allocated JIT memory. RW at %p, RX at %p", exec.rw_ptr, exec.rx_ptr);
314 return exec;
315}
316
330 if (exec.size == 0)
331 return;
332
333#if defined(INFIX_OS_WINDOWS)
334 if (exec.rw_ptr) {
335 // Change protection to NOACCESS to catch use-after-free bugs immediately.
336 if (!VirtualProtect(exec.rw_ptr, exec.size, PAGE_NOACCESS, &(DWORD){0}))
337 INFIX_DEBUG_PRINTF("WARNING: VirtualProtect failed to set PAGE_NOACCESS guard page.");
338 VirtualFree(exec.rw_ptr, 0, MEM_RELEASE);
339 }
340#elif defined(INFIX_OS_MACOS)
341 // On macOS with MAP_JIT, the memory is managed with special thread-local permissions.
342 // We only need to unmap the single mapping.
343 if (exec.rw_ptr) {
344#if INFIX_MACOS_SECURE_JIT_AVAILABLE // This macro is not yet defined, placeholder for future
345 // If using the secure path, we should toggle write protection back on.
346 static bool g_use_secure_jit_path = false; // Re-check or use a shared flag
347 if (g_use_secure_jit_path)
348 pthread_jit_write_protect_np(true);
349#endif
350 // Creating a guard page before unmapping is good practice.
351 mprotect(exec.rw_ptr, exec.size, PROT_NONE);
352 munmap(exec.rw_ptr, exec.size);
353 }
354#elif defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
355 // Other single-mapping POSIX systems.
356 if (exec.rw_ptr) {
357 mprotect(exec.rw_ptr, exec.size, PROT_NONE);
358 munmap(exec.rw_ptr, exec.size);
359 }
360#else
361 // Dual-mapping POSIX: protect and unmap both views.
362 if (exec.rx_ptr)
363 mprotect(exec.rx_ptr, exec.size, PROT_NONE);
364 if (exec.rw_ptr)
365 munmap(exec.rw_ptr, exec.size);
366 if (exec.rx_ptr && exec.rx_ptr != exec.rw_ptr) // rw_ptr might be same as rx_ptr on some platforms
367 munmap(exec.rx_ptr, exec.size);
368 if (exec.shm_fd >= 0)
369 close(exec.shm_fd);
370#endif
371}
372
390 if (exec.rw_ptr == nullptr || exec.size == 0)
391 return false;
392
393 // On AArch64 (and other RISC architectures), the instruction and data caches can be
394 // separate. We must explicitly flush the D-cache (where the JIT wrote the code)
395 // and invalidate the I-cache so the CPU fetches the new instructions.
396#if defined(INFIX_ARCH_AARCH64)
397#if defined(_MSC_VER)
398 // Use the Windows-specific API.
399 FlushInstructionCache(GetCurrentProcess(), exec.rw_ptr, exec.size);
400#else
401 // Use the GCC/Clang built-in for other platforms.
402 __builtin___clear_cache((char *)exec.rw_ptr, (char *)exec.rw_ptr + exec.size);
403#endif
404#endif
405
406 bool result = false;
407#if defined(INFIX_OS_WINDOWS)
408 // Finalize permissions to Read+Execute.
409 result = VirtualProtect(exec.rw_ptr, exec.size, PAGE_EXECUTE_READ, &(DWORD){0});
410#elif defined(INFIX_OS_MACOS)
411#if INFIX_MACOS_SECURE_JIT_AVAILABLE // Placeholder
412 static bool g_use_secure_jit_path = false;
413 if (g_use_secure_jit_path) {
414 pthread_jit_write_protect_np(false); // Make writable region executable.
415 result = true;
416 }
417 else
418#endif
419 // On macOS with the JIT entitlement, we don't use mprotect. Instead, we toggle
420 // a thread-local "write permission" state for all JIT memory. The memory is
421 // RX by default, and we temporarily make it RW for writing.
422 // However, the current logic does this change via `pthread_jit_write_protect_np`
423 // within the allocator itself. For now, this is a placeholder for that logic.
424 result = (mprotect(exec.rw_ptr, exec.size, PROT_READ | PROT_EXEC) == 0);
425#elif defined(INFIX_OS_ANDROID) || defined(INFIX_OS_OPENBSD) || defined(INFIX_OS_DRAGONFLY)
426 // Other single-mapping POSIX platforms use mprotect.
427 result = (mprotect(exec.rw_ptr, exec.size, PROT_READ | PROT_EXEC) == 0);
428#else
429 // On dual-mapping platforms, the RX mapping is already executable. This is a no-op.
430 result = true;
431#endif
432
433 if (result)
434 INFIX_DEBUG_PRINTF("Memory at %p is now executable.", exec.rx_ptr);
435 return result;
436}
437
438// Public API: Protected (Read-Only) Memory
439
452 infix_protected_t prot = {.rw_ptr = nullptr, .size = 0};
453 if (size == 0)
454 return prot;
455#if defined(INFIX_OS_WINDOWS)
456 prot.rw_ptr = VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
457#else
458#if defined(MAP_ANON)
459 prot.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
460#else
461 int fd = open("/dev/zero", O_RDWR);
462 if (fd == -1)
463 prot.rw_ptr = MAP_FAILED;
464 else {
465 prot.rw_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
466 close(fd);
467 }
468#endif
469 if (prot.rw_ptr == MAP_FAILED)
470 prot.rw_ptr = nullptr;
471#endif
472 if (prot.rw_ptr)
473 prot.size = size;
474 return prot;
475}
476
483 if (prot.size == 0)
484 return;
485#if defined(INFIX_OS_WINDOWS)
486 VirtualFree(prot.rw_ptr, 0, MEM_RELEASE);
487#else
488 munmap(prot.rw_ptr, prot.size);
489#endif
490}
491
504 if (prot.size == 0)
505 return false;
506 bool result = false;
507#if defined(INFIX_OS_WINDOWS)
508 result = VirtualProtect(prot.rw_ptr, prot.size, PAGE_READONLY, &(DWORD){0});
509#else
510 result = (mprotect(prot.rw_ptr, prot.size, PROT_READ) == 0);
511#endif
512 return result;
513}
514
515// Universal Reverse Call Dispatcher
516
540void infix_internal_dispatch_callback_fn_impl(infix_reverse_t * context, void * return_value_ptr, void ** args_array) {
542 "Dispatching reverse call. Context: %p, User Fn: %p", (void *)context, context->user_callback_fn);
543
544 if (context->user_callback_fn == nullptr) {
545 // If no handler is set, do nothing. If the function has a return value,
546 // it's good practice to zero it out to avoid returning garbage.
547 if (return_value_ptr && context->return_type->size > 0)
548 infix_memset(return_value_ptr, 0, context->return_type->size);
549 return;
550 }
551
552 if (context->cached_forward_trampoline != nullptr) {
553 // Path 1: Type-safe "callback". Use the pre-generated forward trampoline to
554 // call the user's C function with the correct signature. This is efficient
555 // and provides a clean interface for the C developer.
557 cif_func(return_value_ptr, args_array);
558 }
559 else {
560 // Path 2: Generic "closure". Directly call the user's generic handler.
561 // This path is more flexible and is intended for language bindings where the
562 // handler needs access to the context and raw argument pointers.
564 handler(context, return_value_ptr, args_array);
565 }
566
567 INFIX_DEBUG_PRINTF("Exiting reverse call dispatcher.");
568}
#define c23_nodiscard
A compatibility macro for the C23 [[nodiscard]] attribute.
Definition compat_c23.h:113
void infix_protected_free(infix_protected_t prot)
Frees a block of protected memory.
Definition executor.c:482
c23_nodiscard bool infix_executable_make_executable(infix_executable_t exec)
Makes a block of JIT memory executable, completing the W^X process.
Definition executor.c:389
void infix_executable_free(infix_executable_t exec)
Frees a block of executable memory and applies guard pages to prevent use-after-free.
Definition executor.c:329
c23_nodiscard infix_protected_t infix_protected_alloc(size_t size)
Allocates a block of standard memory for later protection.
Definition executor.c:451
c23_nodiscard infix_executable_t infix_executable_alloc(size_t size)
Allocates a block of executable memory using the platform's W^X strategy.
Definition executor.c:234
c23_nodiscard bool infix_protected_make_readonly(infix_protected_t prot)
Makes a block of memory read-only for security hardening.
Definition executor.c:503
static int shm_open_anonymous()
Definition executor.c:195
void infix_internal_dispatch_callback_fn_impl(infix_reverse_t *context, void *return_value_ptr, void **args_array)
The universal C entry point for all reverse call trampolines.
Definition executor.c:540
void(* infix_cif_func)(void *, void **)
A function pointer type for a bound forward trampoline.
Definition infix.h:371
size_t size
Definition infix.h:214
void(* infix_closure_handler_fn)(infix_context_t *, void *, void **)
A function pointer type for a generic closure handler.
Definition infix.h:384
c23_nodiscard infix_cif_func infix_forward_get_code(infix_forward_t *)
Gets the callable function pointer from a bound forward trampoline.
Definition trampoline.c:288
#define infix_memset
A macro that can be defined to override the default memset function.
Definition infix.h:340
Internal data structures, function prototypes, and constants.
Internal representation of an executable memory block for JIT code.
Definition infix_internals.h:60
size_t size
Definition infix_internals.h:68
void * rw_ptr
Definition infix_internals.h:67
void * rx_ptr
Definition infix_internals.h:66
int shm_fd
Definition infix_internals.h:64
Internal representation of a memory block that will be made read-only.
Definition infix_internals.h:80
size_t size
Definition infix_internals.h:82
void * rw_ptr
Definition infix_internals.h:81
Internal definition of a reverse trampoline (callback/closure) handle.
Definition infix_internals.h:121
infix_type * return_type
Definition infix_internals.h:125
void * user_callback_fn
Definition infix_internals.h:130
infix_forward_t * cached_forward_trampoline
Definition infix_internals.h:135
A header for conditionally compiled debugging utilities.
#define INFIX_DEBUG_PRINTF(...)
Definition utility.h:106