MIRROR: javascript for 馃悳's, a tiny runtime with big ambitions
1#include <stdlib.h>
2#include <stdio.h>
3#include <string.h>
4#include <stdatomic.h>
5#include <pthread.h>
6#include <time.h>
7#include <errno.h>
8#include <math.h>
9#include <uv.h>
10
11#include "ant.h"
12#include "errors.h"
13#include "internal.h"
14#include "runtime.h"
15
16#include "gc/modules.h"
17#include "modules/buffer.h"
18#include "modules/atomics.h"
19#include "modules/symbol.h"
20#include "modules/timer.h"
21
22typedef enum {
23 ASYNC_WAIT_SETTLE_NONE = 0,
24 ASYNC_WAIT_SETTLE_OK,
25 ASYNC_WAIT_SETTLE_TIMED_OUT,
26} async_wait_settle_t;
27
28typedef struct AsyncWaitEntry {
29 ant_t *js;
30 ant_value_t promise;
31 ArrayBufferData *buffer;
32 int32_t *address;
33 uv_timer_t timer;
34 uv_async_t async;
35 bool timer_initialized;
36 bool async_initialized;
37 uint8_t pending_handles;
38 _Atomic int settle_state;
39 _Atomic bool settle_drain_microtasks;
40 struct AsyncWaitEntry *next;
41 struct AsyncWaitEntry *prev;
42} AsyncWaitEntry;
43
44static WaitQueue global_wait_queue;
45static AsyncWaitEntry *async_waiters_head = NULL;
46
47static pthread_once_t wait_queue_init_once = PTHREAD_ONCE_INIT;
48static pthread_mutex_t async_waiters_lock = PTHREAD_MUTEX_INITIALIZER;
49
50static inline bool async_waiter_is_linked_locked(AsyncWaitEntry *entry) {
51 return entry && (entry == async_waiters_head || entry->next || entry->prev);
52}
53
54static void init_wait_queue(void) {
55 wait_queue_init(&global_wait_queue);
56}
57
58void wait_queue_init(WaitQueue *queue) {
59 queue->head = NULL;
60 pthread_mutex_init(&queue->lock, NULL);
61}
62
63static void async_waiter_add_locked(AsyncWaitEntry *entry) {
64 entry->next = async_waiters_head;
65 entry->prev = NULL;
66 if (async_waiters_head) async_waiters_head->prev = entry;
67 async_waiters_head = entry;
68}
69
70static void async_waiter_remove_locked(AsyncWaitEntry *entry) {
71 if (entry->prev) entry->prev->next = entry->next;
72 else async_waiters_head = entry->next;
73 if (entry->next) entry->next->prev = entry->prev;
74 entry->next = NULL;
75 entry->prev = NULL;
76}
77
78static void async_waiter_release_buffer(AsyncWaitEntry *entry) {
79 if (!entry || !entry->buffer) return;
80 free_array_buffer_data(entry->buffer);
81 entry->buffer = NULL;
82}
83
84static void async_waiter_release_handle(AsyncWaitEntry *entry) {
85 if (!entry) return;
86 if (entry->pending_handles > 0) entry->pending_handles--;
87 if (entry->pending_handles == 0) {
88 async_waiter_release_buffer(entry);
89 free(entry);
90 }
91}
92
93static void async_waiter_close_cb(uv_handle_t *handle) {
94 AsyncWaitEntry *entry = handle ? handle->data : NULL;
95 async_waiter_release_handle(entry);
96}
97
98static void async_waiter_close_handles(AsyncWaitEntry *entry) {
99 bool closed = false;
100
101 if (entry->timer_initialized && !uv_is_closing((uv_handle_t *)&entry->timer)) {
102 uv_timer_stop(&entry->timer);
103 uv_close((uv_handle_t *)&entry->timer, async_waiter_close_cb);
104 entry->timer_initialized = false;
105 closed = true;
106 }
107
108 if (entry->async_initialized && !uv_is_closing((uv_handle_t *)&entry->async)) {
109 uv_close((uv_handle_t *)&entry->async, async_waiter_close_cb);
110 entry->async_initialized = false;
111 closed = true;
112 }
113
114 if (!closed) {
115 async_waiter_release_buffer(entry);
116 free(entry);
117 }
118}
119
120static void async_waiter_queue_settle(AsyncWaitEntry *entry, async_wait_settle_t state, bool drain_microtasks) {
121 if (!entry) return;
122 atomic_store(&entry->settle_drain_microtasks, drain_microtasks);
123 atomic_store(&entry->settle_state, state);
124 if (entry->async_initialized) uv_async_send(&entry->async);
125}
126
127static void async_waiter_async_cb(uv_async_t *handle) {
128 AsyncWaitEntry *entry = handle ? handle->data : NULL;
129 if (!entry || !entry->js) return;
130
131 async_wait_settle_t state = (async_wait_settle_t)atomic_exchange(&entry->settle_state, ASYNC_WAIT_SETTLE_NONE);
132 if (state == ASYNC_WAIT_SETTLE_NONE) return;
133
134 const char *result = state == ASYNC_WAIT_SETTLE_OK ? "ok" : "timed-out";
135 js_resolve_promise(entry->js, entry->promise, js_mkstr(entry->js, result, strlen(result)));
136 if (atomic_load(&entry->settle_drain_microtasks))
137 js_maybe_drain_microtasks_after_async_settle(entry->js);
138
139 async_waiter_close_handles(entry);
140}
141
142static void async_waiter_timeout_cb(uv_timer_t *timer) {
143 AsyncWaitEntry *entry = timer ? timer->data : NULL;
144 if (!entry) return;
145
146 pthread_mutex_lock(&async_waiters_lock);
147 bool linked = async_waiter_is_linked_locked(entry);
148 if (linked) async_waiter_remove_locked(entry);
149 pthread_mutex_unlock(&async_waiters_lock);
150
151 if (linked) async_waiter_queue_settle(entry, ASYNC_WAIT_SETTLE_TIMED_OUT, true);
152}
153
154static int async_waiter_notify(int32_t *address, int count) {
155 int notified = 0;
156 AsyncWaitEntry *ready = NULL;
157
158 pthread_mutex_lock(&async_waiters_lock);
159 AsyncWaitEntry *current = async_waiters_head;
160 while (current && (count == -1 || notified < count)) {
161 AsyncWaitEntry *next = current->next;
162 if (current->address == address) {
163 async_waiter_remove_locked(current);
164 current->next = ready;
165 current->prev = NULL;
166 ready = current;
167 notified++;
168 } current = next;
169 }
170
171 pthread_mutex_unlock(&async_waiters_lock);
172 while (ready) {
173 AsyncWaitEntry *next = ready->next;
174 ready->next = NULL;
175 async_waiter_queue_settle(ready, ASYNC_WAIT_SETTLE_OK, false);
176 ready = next;
177 }
178
179 return notified;
180}
181
182void wait_queue_cleanup(WaitQueue *queue) {
183 pthread_mutex_lock(&queue->lock);
184 WaitQueueEntry *current = queue->head;
185 while (current) {
186 WaitQueueEntry *next = current->next;
187 pthread_cond_destroy(¤t->cond);
188 pthread_mutex_destroy(¤t->mutex);
189 free(current);
190 current = next;
191 }
192 queue->head = NULL;
193 pthread_mutex_unlock(&queue->lock);
194 pthread_mutex_destroy(&queue->lock);
195}
196
197void wait_queue_add(WaitQueue *queue, WaitQueueEntry *entry) {
198 pthread_mutex_lock(&queue->lock);
199 entry->next = queue->head;
200 queue->head = entry;
201 pthread_mutex_unlock(&queue->lock);
202}
203
204void wait_queue_remove(WaitQueue *queue, WaitQueueEntry *entry) {
205 pthread_mutex_lock(&queue->lock);
206 WaitQueueEntry **current = &queue->head;
207 while (*current) {
208 if (*current == entry) {
209 *current = entry->next;
210 break;
211 }
212 current = &(*current)->next;
213 }
214 pthread_mutex_unlock(&queue->lock);
215}
216
217int wait_queue_notify(WaitQueue *queue, int32_t *address, int count) {
218 pthread_mutex_lock(&queue->lock);
219 int notified = 0;
220 WaitQueueEntry *current = queue->head;
221
222 while (current && (count == -1 || notified < count)) {
223 if (current->address == address) {
224 pthread_mutex_lock(¤t->mutex);
225 current->notified = 1;
226 pthread_cond_signal(¤t->cond);
227 pthread_mutex_unlock(¤t->mutex);
228 notified++;
229 } current = current->next;
230 }
231
232 pthread_mutex_unlock(&queue->lock);
233 if (count == -1 || notified < count)
234 notified += async_waiter_notify(address, count == -1 ? -1 : count - notified);
235
236 return notified;
237}
238
239void cleanup_atomics_module(ant_t *js) {
240 if (!js) return;
241 AsyncWaitEntry *removed = NULL;
242
243 pthread_mutex_lock(&async_waiters_lock);
244 AsyncWaitEntry *current = async_waiters_head;
245
246 while (current) {
247 AsyncWaitEntry *next = current->next;
248 if (current->js == js) {
249 async_waiter_remove_locked(current);
250 current->next = removed;
251 current->prev = NULL;
252 removed = current;
253 } current = next;
254 }
255
256 pthread_mutex_unlock(&async_waiters_lock);
257 while (removed) {
258 AsyncWaitEntry *next = removed->next;
259 removed->next = NULL;
260 removed->prev = NULL;
261 removed->js = NULL;
262 removed->promise = js_mkundef();
263 async_waiter_release_buffer(removed);
264 async_waiter_close_handles(removed);
265 removed = next;
266 }
267}
268
269static bool get_atomic_array_data(ant_t *js, ant_value_t this_val, TypedArrayData **out_data, uint8_t **out_ptr) {
270 TypedArrayData *ta_data = buffer_get_typedarray_data(this_val);
271 if (!ta_data || !ta_data->buffer) return false;
272
273 *out_data = ta_data;
274 *out_ptr = ta_data->buffer->data + ta_data->byte_offset;
275
276 return true;
277}
278
279// Atomics.add(typedArray, index, value)
280static ant_value_t js_atomics_add(ant_t *js, ant_value_t *args, int nargs) {
281 if (nargs < 3) {
282 return js_mkerr(js, "Atomics.add requires 3 arguments");
283 }
284
285 TypedArrayData *ta_data;
286 uint8_t *ptr;
287 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
288 return js_mkerr(js, "First argument must be a TypedArray");
289 }
290
291 size_t index = (size_t)js_getnum(args[1]);
292 if (index >= ta_data->length) {
293 return js_mkerr(js, "Index out of bounds");
294 }
295
296 int32_t value = (int32_t)js_getnum(args[2]);
297 int32_t old_value;
298
299 switch (ta_data->type) {
300 case TYPED_ARRAY_INT8: {
301 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
302 old_value = atomic_fetch_add(atomic_ptr, (int8_t)value);
303 break;
304 }
305 case TYPED_ARRAY_UINT8: {
306 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
307 old_value = atomic_fetch_add(atomic_ptr, (uint8_t)value);
308 break;
309 }
310 case TYPED_ARRAY_INT16: {
311 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
312 old_value = atomic_fetch_add(atomic_ptr, (int16_t)value);
313 break;
314 }
315 case TYPED_ARRAY_UINT16: {
316 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
317 old_value = atomic_fetch_add(atomic_ptr, (uint16_t)value);
318 break;
319 }
320 case TYPED_ARRAY_INT32: {
321 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
322 old_value = atomic_fetch_add(atomic_ptr, value);
323 break;
324 }
325 case TYPED_ARRAY_UINT32: {
326 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
327 old_value = atomic_fetch_add(atomic_ptr, (uint32_t)value);
328 break;
329 }
330 default:
331 return js_mkerr(js, "TypedArray type not supported for atomic operations");
332 }
333
334 return js_mknum((double)old_value);
335}
336
337// Atomics.and(typedArray, index, value)
338static ant_value_t js_atomics_and(ant_t *js, ant_value_t *args, int nargs) {
339 if (nargs < 3) {
340 return js_mkerr(js, "Atomics.and requires 3 arguments");
341 }
342
343 TypedArrayData *ta_data;
344 uint8_t *ptr;
345 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
346 return js_mkerr(js, "First argument must be a TypedArray");
347 }
348
349 size_t index = (size_t)js_getnum(args[1]);
350 if (index >= ta_data->length) {
351 return js_mkerr(js, "Index out of bounds");
352 }
353
354 int32_t value = (int32_t)js_getnum(args[2]);
355 int32_t old_value;
356
357 switch (ta_data->type) {
358 case TYPED_ARRAY_INT8: {
359 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
360 old_value = atomic_fetch_and(atomic_ptr, (int8_t)value);
361 break;
362 }
363 case TYPED_ARRAY_UINT8: {
364 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
365 old_value = atomic_fetch_and(atomic_ptr, (uint8_t)value);
366 break;
367 }
368 case TYPED_ARRAY_INT16: {
369 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
370 old_value = atomic_fetch_and(atomic_ptr, (int16_t)value);
371 break;
372 }
373 case TYPED_ARRAY_UINT16: {
374 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
375 old_value = atomic_fetch_and(atomic_ptr, (uint16_t)value);
376 break;
377 }
378 case TYPED_ARRAY_INT32: {
379 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
380 old_value = atomic_fetch_and(atomic_ptr, value);
381 break;
382 }
383 case TYPED_ARRAY_UINT32: {
384 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
385 old_value = atomic_fetch_and(atomic_ptr, (uint32_t)value);
386 break;
387 }
388 default:
389 return js_mkerr(js, "TypedArray type not supported for atomic bitwise operations");
390 }
391
392 return js_mknum((double)old_value);
393}
394
395// Atomics.compareExchange(typedArray, index, expectedValue, replacementValue)
396static ant_value_t js_atomics_compareExchange(ant_t *js, ant_value_t *args, int nargs) {
397 if (nargs < 4) {
398 return js_mkerr(js, "Atomics.compareExchange requires 4 arguments");
399 }
400
401 TypedArrayData *ta_data;
402 uint8_t *ptr;
403 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
404 return js_mkerr(js, "First argument must be a TypedArray");
405 }
406
407 size_t index = (size_t)js_getnum(args[1]);
408 if (index >= ta_data->length) {
409 return js_mkerr(js, "Index out of bounds");
410 }
411
412 int32_t expected = (int32_t)js_getnum(args[2]);
413 int32_t replacement = (int32_t)js_getnum(args[3]);
414
415 switch (ta_data->type) {
416 case TYPED_ARRAY_INT8: {
417 int8_t exp_i8 = (int8_t)expected;
418 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
419 atomic_compare_exchange_strong(atomic_ptr, &exp_i8, (int8_t)replacement);
420 expected = (int32_t)exp_i8;
421 break;
422 }
423 case TYPED_ARRAY_UINT8: {
424 uint8_t exp_u8 = (uint8_t)expected;
425 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
426 atomic_compare_exchange_strong(atomic_ptr, &exp_u8, (uint8_t)replacement);
427 expected = (int32_t)exp_u8;
428 break;
429 }
430 case TYPED_ARRAY_INT16: {
431 int16_t exp_i16 = (int16_t)expected;
432 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
433 atomic_compare_exchange_strong(atomic_ptr, &exp_i16, (int16_t)replacement);
434 expected = (int32_t)exp_i16;
435 break;
436 }
437 case TYPED_ARRAY_UINT16: {
438 uint16_t exp_u16 = (uint16_t)expected;
439 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
440 atomic_compare_exchange_strong(atomic_ptr, &exp_u16, (uint16_t)replacement);
441 expected = (int32_t)exp_u16;
442 break;
443 }
444 case TYPED_ARRAY_INT32: {
445 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
446 atomic_compare_exchange_strong(atomic_ptr, &expected, replacement);
447 break;
448 }
449 case TYPED_ARRAY_UINT32: {
450 uint32_t exp_u32 = (uint32_t)expected;
451 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
452 atomic_compare_exchange_strong(atomic_ptr, &exp_u32, (uint32_t)replacement);
453 expected = (int32_t)exp_u32;
454 break;
455 }
456 default:
457 return js_mkerr(js, "TypedArray type not supported for atomic operations");
458 }
459
460 return js_mknum((double)expected);
461}
462
463// Atomics.exchange(typedArray, index, value)
464static ant_value_t js_atomics_exchange(ant_t *js, ant_value_t *args, int nargs) {
465 if (nargs < 3) {
466 return js_mkerr(js, "Atomics.exchange requires 3 arguments");
467 }
468
469 TypedArrayData *ta_data;
470 uint8_t *ptr;
471 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
472 return js_mkerr(js, "First argument must be a TypedArray");
473 }
474
475 size_t index = (size_t)js_getnum(args[1]);
476 if (index >= ta_data->length) {
477 return js_mkerr(js, "Index out of bounds");
478 }
479
480 int32_t value = (int32_t)js_getnum(args[2]);
481 int32_t old_value;
482
483 switch (ta_data->type) {
484 case TYPED_ARRAY_INT8: {
485 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
486 old_value = atomic_exchange(atomic_ptr, (int8_t)value);
487 break;
488 }
489 case TYPED_ARRAY_UINT8: {
490 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
491 old_value = atomic_exchange(atomic_ptr, (uint8_t)value);
492 break;
493 }
494 case TYPED_ARRAY_INT16: {
495 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
496 old_value = atomic_exchange(atomic_ptr, (int16_t)value);
497 break;
498 }
499 case TYPED_ARRAY_UINT16: {
500 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
501 old_value = atomic_exchange(atomic_ptr, (uint16_t)value);
502 break;
503 }
504 case TYPED_ARRAY_INT32: {
505 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
506 old_value = atomic_exchange(atomic_ptr, value);
507 break;
508 }
509 case TYPED_ARRAY_UINT32: {
510 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
511 old_value = atomic_exchange(atomic_ptr, (uint32_t)value);
512 break;
513 }
514 default:
515 return js_mkerr(js, "TypedArray type not supported for atomic operations");
516 }
517
518 return js_mknum((double)old_value);
519}
520
521// Atomics.isLockFree(size)
522static ant_value_t js_atomics_isLockFree(ant_t *js, ant_value_t *args, int nargs) {
523 if (nargs < 1) {
524 return js_mkerr(js, "Atomics.isLockFree requires 1 argument");
525 }
526
527 int size = (int)js_getnum(args[0]);
528 bool is_lock_free = false;
529
530 switch (size) {
531 case 1:
532 is_lock_free = ATOMIC_CHAR_LOCK_FREE == 2;
533 break;
534 case 2:
535 is_lock_free = ATOMIC_SHORT_LOCK_FREE == 2;
536 break;
537 case 4:
538 is_lock_free = ATOMIC_INT_LOCK_FREE == 2;
539 break;
540 case 8:
541 is_lock_free = ATOMIC_LLONG_LOCK_FREE == 2;
542 break;
543 default:
544 is_lock_free = false;
545 }
546
547 return js_bool(is_lock_free);
548}
549
550// Atomics.load(typedArray, index)
551static ant_value_t js_atomics_load(ant_t *js, ant_value_t *args, int nargs) {
552 if (nargs < 2) {
553 return js_mkerr(js, "Atomics.load requires 2 arguments");
554 }
555
556 TypedArrayData *ta_data;
557 uint8_t *ptr;
558 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
559 return js_mkerr(js, "First argument must be a TypedArray");
560 }
561
562 size_t index = (size_t)js_getnum(args[1]);
563 if (index >= ta_data->length) {
564 return js_mkerr(js, "Index out of bounds");
565 }
566
567 int32_t value;
568
569 switch (ta_data->type) {
570 case TYPED_ARRAY_INT8: {
571 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
572 value = atomic_load(atomic_ptr);
573 break;
574 }
575 case TYPED_ARRAY_UINT8: {
576 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
577 value = atomic_load(atomic_ptr);
578 break;
579 }
580 case TYPED_ARRAY_INT16: {
581 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
582 value = atomic_load(atomic_ptr);
583 break;
584 }
585 case TYPED_ARRAY_UINT16: {
586 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
587 value = atomic_load(atomic_ptr);
588 break;
589 }
590 case TYPED_ARRAY_INT32: {
591 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
592 value = atomic_load(atomic_ptr);
593 break;
594 }
595 case TYPED_ARRAY_UINT32: {
596 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
597 value = atomic_load(atomic_ptr);
598 break;
599 }
600 default:
601 return js_mkerr(js, "TypedArray type not supported for atomic operations");
602 }
603
604 return js_mknum((double)value);
605}
606
607// Atomics.or(typedArray, index, value)
608static ant_value_t js_atomics_or(ant_t *js, ant_value_t *args, int nargs) {
609 if (nargs < 3) {
610 return js_mkerr(js, "Atomics.or requires 3 arguments");
611 }
612
613 TypedArrayData *ta_data;
614 uint8_t *ptr;
615 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
616 return js_mkerr(js, "First argument must be a TypedArray");
617 }
618
619 size_t index = (size_t)js_getnum(args[1]);
620 if (index >= ta_data->length) {
621 return js_mkerr(js, "Index out of bounds");
622 }
623
624 int32_t value = (int32_t)js_getnum(args[2]);
625 int32_t old_value;
626
627 switch (ta_data->type) {
628 case TYPED_ARRAY_INT8: {
629 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
630 old_value = atomic_fetch_or(atomic_ptr, (int8_t)value);
631 break;
632 }
633 case TYPED_ARRAY_UINT8: {
634 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
635 old_value = atomic_fetch_or(atomic_ptr, (uint8_t)value);
636 break;
637 }
638 case TYPED_ARRAY_INT16: {
639 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
640 old_value = atomic_fetch_or(atomic_ptr, (int16_t)value);
641 break;
642 }
643 case TYPED_ARRAY_UINT16: {
644 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
645 old_value = atomic_fetch_or(atomic_ptr, (uint16_t)value);
646 break;
647 }
648 case TYPED_ARRAY_INT32: {
649 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
650 old_value = atomic_fetch_or(atomic_ptr, value);
651 break;
652 }
653 case TYPED_ARRAY_UINT32: {
654 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
655 old_value = atomic_fetch_or(atomic_ptr, (uint32_t)value);
656 break;
657 }
658 default:
659 return js_mkerr(js, "TypedArray type not supported for atomic bitwise operations");
660 }
661
662 return js_mknum((double)old_value);
663}
664
665// Atomics.store(typedArray, index, value)
666static ant_value_t js_atomics_store(ant_t *js, ant_value_t *args, int nargs) {
667 if (nargs < 3) {
668 return js_mkerr(js, "Atomics.store requires 3 arguments");
669 }
670
671 TypedArrayData *ta_data;
672 uint8_t *ptr;
673 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
674 return js_mkerr(js, "First argument must be a TypedArray");
675 }
676
677 size_t index = (size_t)js_getnum(args[1]);
678 if (index >= ta_data->length) {
679 return js_mkerr(js, "Index out of bounds");
680 }
681
682 int32_t value = (int32_t)js_getnum(args[2]);
683
684 switch (ta_data->type) {
685 case TYPED_ARRAY_INT8: {
686 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
687 atomic_store(atomic_ptr, (int8_t)value);
688 break;
689 }
690 case TYPED_ARRAY_UINT8: {
691 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
692 atomic_store(atomic_ptr, (uint8_t)value);
693 break;
694 }
695 case TYPED_ARRAY_INT16: {
696 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
697 atomic_store(atomic_ptr, (int16_t)value);
698 break;
699 }
700 case TYPED_ARRAY_UINT16: {
701 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
702 atomic_store(atomic_ptr, (uint16_t)value);
703 break;
704 }
705 case TYPED_ARRAY_INT32: {
706 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
707 atomic_store(atomic_ptr, value);
708 break;
709 }
710 case TYPED_ARRAY_UINT32: {
711 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
712 atomic_store(atomic_ptr, (uint32_t)value);
713 break;
714 }
715 default:
716 return js_mkerr(js, "TypedArray type not supported for atomic operations");
717 }
718
719 return js_mknum((double)value);
720}
721
722// Atomics.sub(typedArray, index, value)
723static ant_value_t js_atomics_sub(ant_t *js, ant_value_t *args, int nargs) {
724 if (nargs < 3) {
725 return js_mkerr(js, "Atomics.sub requires 3 arguments");
726 }
727
728 TypedArrayData *ta_data;
729 uint8_t *ptr;
730 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
731 return js_mkerr(js, "First argument must be a TypedArray");
732 }
733
734 size_t index = (size_t)js_getnum(args[1]);
735 if (index >= ta_data->length) {
736 return js_mkerr(js, "Index out of bounds");
737 }
738
739 int32_t value = (int32_t)js_getnum(args[2]);
740 int32_t old_value;
741
742 switch (ta_data->type) {
743 case TYPED_ARRAY_INT8: {
744 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
745 old_value = atomic_fetch_sub(atomic_ptr, (int8_t)value);
746 break;
747 }
748 case TYPED_ARRAY_UINT8: {
749 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
750 old_value = atomic_fetch_sub(atomic_ptr, (uint8_t)value);
751 break;
752 }
753 case TYPED_ARRAY_INT16: {
754 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
755 old_value = atomic_fetch_sub(atomic_ptr, (int16_t)value);
756 break;
757 }
758 case TYPED_ARRAY_UINT16: {
759 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
760 old_value = atomic_fetch_sub(atomic_ptr, (uint16_t)value);
761 break;
762 }
763 case TYPED_ARRAY_INT32: {
764 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
765 old_value = atomic_fetch_sub(atomic_ptr, value);
766 break;
767 }
768 case TYPED_ARRAY_UINT32: {
769 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
770 old_value = atomic_fetch_sub(atomic_ptr, (uint32_t)value);
771 break;
772 }
773 default:
774 return js_mkerr(js, "TypedArray type not supported for atomic operations");
775 }
776
777 return js_mknum((double)old_value);
778}
779
780// Atomics.xor(typedArray, index, value)
781static ant_value_t js_atomics_xor(ant_t *js, ant_value_t *args, int nargs) {
782 if (nargs < 3) {
783 return js_mkerr(js, "Atomics.xor requires 3 arguments");
784 }
785
786 TypedArrayData *ta_data;
787 uint8_t *ptr;
788 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
789 return js_mkerr(js, "First argument must be a TypedArray");
790 }
791
792 size_t index = (size_t)js_getnum(args[1]);
793 if (index >= ta_data->length) {
794 return js_mkerr(js, "Index out of bounds");
795 }
796
797 int32_t value = (int32_t)js_getnum(args[2]);
798 int32_t old_value;
799
800 switch (ta_data->type) {
801 case TYPED_ARRAY_INT8: {
802 _Atomic int8_t *atomic_ptr = (_Atomic int8_t *)(ptr + index);
803 old_value = atomic_fetch_xor(atomic_ptr, (int8_t)value);
804 break;
805 }
806 case TYPED_ARRAY_UINT8: {
807 _Atomic uint8_t *atomic_ptr = (_Atomic uint8_t *)(ptr + index);
808 old_value = atomic_fetch_xor(atomic_ptr, (uint8_t)value);
809 break;
810 }
811 case TYPED_ARRAY_INT16: {
812 _Atomic int16_t *atomic_ptr = (_Atomic int16_t *)(ptr + index * 2);
813 old_value = atomic_fetch_xor(atomic_ptr, (int16_t)value);
814 break;
815 }
816 case TYPED_ARRAY_UINT16: {
817 _Atomic uint16_t *atomic_ptr = (_Atomic uint16_t *)(ptr + index * 2);
818 old_value = atomic_fetch_xor(atomic_ptr, (uint16_t)value);
819 break;
820 }
821 case TYPED_ARRAY_INT32: {
822 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
823 old_value = atomic_fetch_xor(atomic_ptr, value);
824 break;
825 }
826 case TYPED_ARRAY_UINT32: {
827 _Atomic uint32_t *atomic_ptr = (_Atomic uint32_t *)(ptr + index * 4);
828 old_value = atomic_fetch_xor(atomic_ptr, (uint32_t)value);
829 break;
830 }
831 default:
832 return js_mkerr(js, "TypedArray type not supported for atomic bitwise operations");
833 }
834
835 return js_mknum((double)old_value);
836}
837
838// Atomics.wait(typedArray, index, value, timeout)
839static ant_value_t js_atomics_wait(ant_t *js, ant_value_t *args, int nargs) {
840 if (nargs < 3) {
841 return js_mkerr(js, "Atomics.wait requires at least 3 arguments");
842 }
843
844 pthread_once(&wait_queue_init_once, init_wait_queue);
845
846 TypedArrayData *ta_data;
847 uint8_t *ptr;
848 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
849 return js_mkerr(js, "First argument must be a TypedArray");
850 }
851
852 if (ta_data->type != TYPED_ARRAY_INT32) {
853 return js_mkerr(js, "Atomics.wait only works with Int32Array");
854 }
855
856 size_t index = (size_t)js_getnum(args[1]);
857 if (index >= ta_data->length) {
858 return js_mkerr(js, "Index out of bounds");
859 }
860
861 int32_t expected_value = (int32_t)js_getnum(args[2]);
862 int64_t timeout_ms = -1;
863
864 if (nargs > 3 && vtype(args[3]) == T_NUM) {
865 timeout_ms = (int64_t)js_getnum(args[3]);
866 }
867
868 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
869 int32_t current_value = atomic_load(atomic_ptr);
870
871 if (current_value != expected_value) {
872 return js_mkstr(js, "not-equal", 9);
873 }
874
875 WaitQueueEntry entry;
876 pthread_cond_init(&entry.cond, NULL);
877 pthread_mutex_init(&entry.mutex, NULL);
878 entry.address = (int32_t *)atomic_ptr;
879 entry.notified = 0;
880 entry.next = NULL;
881
882 wait_queue_add(&global_wait_queue, &entry);
883 pthread_mutex_lock(&entry.mutex);
884
885 const char *result = "ok";
886 if (timeout_ms < 0) {
887 while (!entry.notified) pthread_cond_wait(&entry.cond, &entry.mutex);
888 } else {
889 struct timespec ts;
890 clock_gettime(CLOCK_REALTIME, &ts);
891 ts.tv_sec += timeout_ms / 1000;
892 ts.tv_nsec += (timeout_ms % 1000) * 1000000;
893 if (ts.tv_nsec >= 1000000000) {
894 ts.tv_sec++;
895 ts.tv_nsec -= 1000000000;
896 }
897
898 int wait_result = pthread_cond_timedwait(&entry.cond, &entry.mutex, &ts);
899 if (wait_result == ETIMEDOUT && !entry.notified) result = "timed-out";
900 }
901
902 pthread_mutex_unlock(&entry.mutex);
903 wait_queue_remove(&global_wait_queue, &entry);
904
905 pthread_cond_destroy(&entry.cond);
906 pthread_mutex_destroy(&entry.mutex);
907
908 return js_mkstr(js, result, strlen(result));
909}
910
911// Atomics.notify(typedArray, index, count)
912static ant_value_t js_atomics_notify(ant_t *js, ant_value_t *args, int nargs) {
913 if (nargs < 2) {
914 return js_mkerr(js, "Atomics.notify requires at least 2 arguments");
915 }
916
917 pthread_once(&wait_queue_init_once, init_wait_queue);
918
919 TypedArrayData *ta_data;
920 uint8_t *ptr;
921 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
922 return js_mkerr(js, "First argument must be a TypedArray");
923 }
924
925 if (ta_data->type != TYPED_ARRAY_INT32) {
926 return js_mkerr(js, "Atomics.notify only works with Int32Array");
927 }
928
929 size_t index = (size_t)js_getnum(args[1]);
930 if (index >= ta_data->length) {
931 return js_mkerr(js, "Index out of bounds");
932 }
933
934 int count = -1;
935 if (nargs > 2 && vtype(args[2]) == T_NUM) {
936 count = (int)js_getnum(args[2]);
937 }
938
939 int32_t *address = (int32_t *)(ptr + index * 4);
940 int notified = wait_queue_notify(&global_wait_queue, address, count);
941
942 return js_mknum((double)notified);
943}
944
945// Atomics.waitAsync(typedArray, index, value, timeout)
946static ant_value_t js_atomics_waitAsync(ant_t *js, ant_value_t *args, int nargs) {
947 if (nargs < 3) {
948 return js_mkerr(js, "Atomics.waitAsync requires at least 3 arguments");
949 }
950
951 TypedArrayData *ta_data;
952 uint8_t *ptr;
953 if (!get_atomic_array_data(js, args[0], &ta_data, &ptr)) {
954 return js_mkerr(js, "First argument must be a TypedArray");
955 }
956
957 if (ta_data->type != TYPED_ARRAY_INT32) {
958 return js_mkerr(js, "Atomics.waitAsync only works with Int32Array");
959 }
960
961 size_t index = (size_t)js_getnum(args[1]);
962 if (index >= ta_data->length) {
963 return js_mkerr(js, "Index out of bounds");
964 }
965
966 int32_t expected_value = (int32_t)js_getnum(args[2]);
967 _Atomic int32_t *atomic_ptr = (_Atomic int32_t *)(ptr + index * 4);
968 int32_t current_value = atomic_load(atomic_ptr);
969 double timeout_ms = HUGE_VAL;
970
971 if (nargs > 3 && vtype(args[3]) == T_NUM) {
972 timeout_ms = js_getnum(args[3]);
973 if (isnan(timeout_ms)) timeout_ms = HUGE_VAL;
974 else if (timeout_ms < 0) timeout_ms = 0;
975 }
976
977 ant_value_t result_obj = js_mkobj(js);
978 if (current_value != expected_value) {
979 js_set(js, result_obj, "async", js_false);
980 js_set(js, result_obj, "value", js_mkstr(js, "not-equal", 9));
981 return result_obj;
982 }
983
984 if (timeout_ms == 0) {
985 js_set(js, result_obj, "async", js_false);
986 js_set(js, result_obj, "value", js_mkstr(js, "timed-out", 9));
987 return result_obj;
988 }
989
990 ant_value_t promise = js_mkpromise(js);
991 AsyncWaitEntry *entry = calloc(1, sizeof(*entry));
992 if (!entry) return js_mkerr(js, "Out of memory");
993
994 entry->js = js;
995 entry->promise = promise;
996 entry->buffer = ta_data->buffer;
997 entry->buffer->ref_count++;
998 entry->address = (int32_t *)atomic_ptr;
999 atomic_store(&entry->settle_state, ASYNC_WAIT_SETTLE_NONE);
1000 atomic_store(&entry->settle_drain_microtasks, false);
1001
1002 if (uv_async_init(uv_default_loop(), &entry->async, async_waiter_async_cb) != 0) {
1003 async_waiter_close_handles(entry);
1004 return js_mkerr(js, "Failed to initialize Atomics.waitAsync notifier");
1005 }
1006
1007 entry->async_initialized = true;
1008 entry->async.data = entry;
1009 entry->pending_handles++;
1010
1011 if (isfinite(timeout_ms)) {
1012 uint64_t delay = timeout_ms > (double)UINT64_MAX ? UINT64_MAX : (uint64_t)timeout_ms;
1013 if (uv_timer_init(uv_default_loop(), &entry->timer) != 0) {
1014 async_waiter_close_handles(entry);
1015 return js_mkerr(js, "Failed to initialize Atomics.waitAsync timer");
1016 }
1017 entry->timer_initialized = true;
1018 entry->timer.data = entry;
1019 entry->pending_handles++;
1020 if (uv_timer_start(&entry->timer, async_waiter_timeout_cb, delay, 0) != 0) {
1021 async_waiter_close_handles(entry);
1022 return js_mkerr(js, "Failed to start Atomics.waitAsync timer");
1023 }
1024 }
1025
1026 pthread_mutex_lock(&async_waiters_lock);
1027 async_waiter_add_locked(entry);
1028 pthread_mutex_unlock(&async_waiters_lock);
1029
1030 js_set(js, result_obj, "async", js_true);
1031 js_set(js, result_obj, "value", promise);
1032
1033 return result_obj;
1034}
1035
1036// Atomics.pause()
1037static ant_value_t js_atomics_pause(ant_t *js, ant_value_t *args, int nargs) {
1038#if defined(__x86_64__) || defined(__i386__)
1039 __builtin_ia32_pause();
1040#elif defined(__aarch64__) || defined(__arm__)
1041 __asm__ __volatile__("yield");
1042#endif
1043
1044 return js_mkundef();
1045}
1046
1047void init_atomics_module(void) {
1048 ant_t *js = rt->js;
1049
1050 ant_value_t glob = js_glob(js);
1051 ant_value_t atomics = js_mkobj(js);
1052
1053 js_set(js, atomics, "add", js_mkfun(js_atomics_add));
1054 js_set(js, atomics, "and", js_mkfun(js_atomics_and));
1055 js_set(js, atomics, "compareExchange", js_mkfun(js_atomics_compareExchange));
1056 js_set(js, atomics, "exchange", js_mkfun(js_atomics_exchange));
1057 js_set(js, atomics, "isLockFree", js_mkfun(js_atomics_isLockFree));
1058 js_set(js, atomics, "load", js_mkfun(js_atomics_load));
1059 js_set(js, atomics, "notify", js_mkfun(js_atomics_notify));
1060 js_set(js, atomics, "or", js_mkfun(js_atomics_or));
1061 js_set(js, atomics, "pause", js_mkfun(js_atomics_pause));
1062 js_set(js, atomics, "store", js_mkfun(js_atomics_store));
1063 js_set(js, atomics, "sub", js_mkfun(js_atomics_sub));
1064 js_set(js, atomics, "wait", js_mkfun(js_atomics_wait));
1065 js_set(js, atomics, "waitAsync", js_mkfun(js_atomics_waitAsync));
1066 js_set(js, atomics, "xor", js_mkfun(js_atomics_xor));
1067
1068 js_set_sym(js, atomics, get_toStringTag_sym(), js_mkstr(js, "Atomics", 7));
1069 js_set(js, glob, "Atomics", atomics);
1070}
1071
1072void gc_mark_atomics(ant_t *js, gc_mark_fn mark) {
1073 pthread_mutex_lock(&async_waiters_lock);
1074 for (AsyncWaitEntry *entry = async_waiters_head; entry; entry = entry->next) {
1075 if (entry->js == js) mark(js, entry->promise);
1076 }
1077 pthread_mutex_unlock(&async_waiters_lock);
1078}