← Back to C-Kernel-Engine Docs Doxygen Source Documentation
ckernel_alloc_v6.c
Go to the documentation of this file.
1 #define _GNU_SOURCE
2 #include "ckernel_alloc.h"
3 
4 #include <errno.h>
5 #include <pthread.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/types.h>
11 #include <unistd.h>
12 
13 /* 2MB huge page size on Linux. */
14 #ifndef HUGE_PAGE_SIZE
15 #define HUGE_PAGE_SIZE (2UL * 1024UL * 1024UL)
16 #endif
17 
18 typedef struct ck_huge_alloc_entry {
19  void *ptr;
20  size_t len;
21  int was_mmap;
22  struct ck_huge_alloc_entry *next;
23 } ck_huge_alloc_entry_t;
24 
25 static pthread_mutex_t g_alloc_mutex = PTHREAD_MUTEX_INITIALIZER;
26 static ck_huge_alloc_entry_t *g_alloc_list = NULL;
27 
28 static size_t align_up_bytes(size_t n, size_t align)
29 {
30  if (align == 0) return n;
31  return (n + align - 1) & ~(align - 1);
32 }
33 
34 static int record_allocation(void *ptr, size_t len, int was_mmap)
35 {
36  ck_huge_alloc_entry_t *entry = malloc(sizeof(*entry));
37  if (!entry) {
38  return 0;
39  }
40  entry->ptr = ptr;
41  entry->len = len;
42  entry->was_mmap = was_mmap;
43  pthread_mutex_lock(&g_alloc_mutex);
44  entry->next = g_alloc_list;
45  g_alloc_list = entry;
46  pthread_mutex_unlock(&g_alloc_mutex);
47  return 1;
48 }
49 
50 static ck_huge_alloc_entry_t *detach_allocation(void *ptr)
51 {
52  pthread_mutex_lock(&g_alloc_mutex);
53  ck_huge_alloc_entry_t **node = &g_alloc_list;
54  while (*node) {
55  if ((*node)->ptr == ptr) {
56  ck_huge_alloc_entry_t *entry = *node;
57  *node = entry->next;
58  pthread_mutex_unlock(&g_alloc_mutex);
59  return entry;
60  }
61  node = &(*node)->next;
62  }
63  pthread_mutex_unlock(&g_alloc_mutex);
64  return NULL;
65 }
66 
67 void *ck_huge_alloc(size_t bytes)
68 {
69  size_t len = align_up_bytes(bytes, HUGE_PAGE_SIZE);
70 
71  /* First, try explicit huge pages via mmap + MAP_HUGETLB. */
72  void *p = mmap(NULL, len,
73  PROT_READ | PROT_WRITE,
74  MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
75  -1, 0);
76  if (p != MAP_FAILED) {
77  if (!record_allocation(p, len, 1)) {
78  munmap(p, len);
79  return NULL;
80  }
81  return p;
82  }
83 
84  /* Fallback: aligned_alloc with transparent hugepage hint. */
85  void *q = aligned_alloc(HUGE_PAGE_SIZE, len);
86  if (!q) {
87  fprintf(stderr, "ck_huge_alloc: aligned_alloc failed for %zu bytes: %s\n",
88  len, strerror(errno));
89  return NULL;
90  }
91 
92  /* Best-effort hint; ignore errors. */
93  (void)madvise(q, len, MADV_HUGEPAGE);
94  if (!record_allocation(q, len, 0)) {
95  free(q);
96  return NULL;
97  }
98  return q;
99 }
100 
101 void ck_huge_free(void *ptr, size_t bytes)
102 {
103  if (!ptr || bytes == 0) {
104  return;
105  }
106 
107  ck_huge_alloc_entry_t *entry = detach_allocation(ptr);
108  if (!entry) {
109  /* Fall back to malloc/free if the allocation wasn't tracked. */
110  free(ptr);
111  return;
112  }
113 
114  if (entry->was_mmap) {
115  munmap(ptr, entry->len);
116  } else {
117  free(ptr);
118  }
119 
120  free(entry);
121 }
void * ck_huge_alloc(size_t bytes)
static size_t align_up_bytes(size_t n, size_t align)
static ck_huge_alloc_entry_t * detach_allocation(void *ptr)
static ck_huge_alloc_entry_t * g_alloc_list
static int record_allocation(void *ptr, size_t len, int was_mmap)
static pthread_mutex_t g_alloc_mutex
#define HUGE_PAGE_SIZE
void ck_huge_free(void *ptr, size_t bytes)