zcontext.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300
  1. /* Copyright (C) 1991, 2000 Aladdin Enterprises. All rights reserved.
  2. This software is provided AS-IS with no warranty, either express or
  3. implied.
  4. This software is distributed under license and may not be copied,
  5. modified or distributed except as expressly authorized under the terms
  6. of the license contained in the file LICENSE in this distribution.
  7. For more information about licensing, please refer to
  8. http://www.ghostscript.com/licensing/. For information on
  9. commercial licensing, go to http://www.artifex.com/licensing/ or
  10. contact Artifex Software, Inc., 101 Lucas Valley Road #110,
  11. San Rafael, CA 94903, U.S.A., +1(415)492-9861.
  12. */
  13. /* $Id: zcontext.c,v 1.11 2004/08/04 19:36:13 stefan Exp $ */
  14. /* Display PostScript context operators */
  15. #include "memory_.h"
  16. #include "ghost.h"
  17. #include "gp.h" /* for usertime */
  18. #include "oper.h"
  19. #include "gsexit.h"
  20. #include "gsgc.h"
  21. #include "gsstruct.h"
  22. #include "gsutil.h"
  23. #include "gxalloc.h"
  24. #include "gxstate.h" /* for copying gstate stack */
  25. #include "stream.h" /* for files.h */
  26. #include "files.h"
  27. #include "idict.h"
  28. #include "igstate.h"
  29. #include "icontext.h"
  30. #include "interp.h"
  31. #include "isave.h"
  32. #include "istruct.h"
  33. #include "dstack.h"
  34. #include "estack.h"
  35. #include "ostack.h"
  36. #include "store.h"
  37. /*
  38. * Define the rescheduling interval. A value of max_int effectively
  39. * disables scheduling. The only reason not to make this const is to
  40. * allow it to be changed during testing.
  41. */
  42. private int reschedule_interval = 100;
  43. /* Scheduling hooks in interp.c */
  44. extern int (*gs_interp_reschedule_proc)(i_ctx_t **);
  45. extern int (*gs_interp_time_slice_proc)(i_ctx_t **);
  46. extern int gs_interp_time_slice_ticks;
  47. /* Context structure */
  48. typedef enum {
  49. cs_active,
  50. cs_done
  51. } ctx_status_t;
  52. typedef long ctx_index_t; /* >= 0 */
  53. typedef struct gs_context_s gs_context_t;
  54. typedef struct gs_scheduler_s gs_scheduler_t;
  55. /*
  56. * If several contexts share local VM, then if any one of them has done an
  57. * unmatched save, the others are not allowed to run. We handle this by
  58. * maintaining the following invariant:
  59. * When control reaches the point in the scheduler that decides
  60. * what context to run next, then for each group of contexts
  61. * sharing local VM, if the save level for that VM is non-zero,
  62. * saved_local_vm is only set in the context that has unmatched
  63. * saves.
  64. * We maintain this invariant as follows: when control enters the
  65. * scheduler, if a context was running, we set its saved_local_vm flag
  66. * to (save_level > 0). When selecting a context to run, we ignore
  67. * contexts where saved_local_vm is false and the local VM save_level > 0.
  68. */
  69. struct gs_context_s {
  70. gs_context_state_t state; /* (must be first for subclassing) */
  71. /* Private state */
  72. gs_scheduler_t *scheduler;
  73. ctx_status_t status;
  74. ctx_index_t index; /* > 0 */
  75. bool detach; /* true if a detach has been */
  76. /* executed for this context */
  77. bool saved_local_vm; /* (see above) */
  78. bool visible; /* during GC, true if visible; */
  79. /* otherwise, always true */
  80. ctx_index_t next_index; /* next context with same status */
  81. /* (active, waiting on same lock, */
  82. /* waiting on same condition, */
  83. /* waiting to be destroyed) */
  84. ctx_index_t joiner_index; /* context waiting on a join */
  85. /* for this one */
  86. gs_context_t *table_next; /* hash table chain -- this must be a real */
  87. /* pointer, for looking up indices */
  88. };
  89. inline private bool
  90. context_is_visible(const gs_context_t *pctx)
  91. {
  92. return (pctx && pctx->visible);
  93. }
  94. inline private gs_context_t *
  95. visible_context(gs_context_t *pctx)
  96. {
  97. return (pctx && pctx->visible ? pctx : (gs_context_t *)0);
  98. }
  99. /* GC descriptor */
  100. private
  101. CLEAR_MARKS_PROC(context_clear_marks)
  102. {
  103. gs_context_t *const pctx = vptr;
  104. (*st_context_state.clear_marks)
  105. (cmem, &pctx->state, sizeof(pctx->state), &st_context_state);
  106. }
  107. private
  108. ENUM_PTRS_WITH(context_enum_ptrs, gs_context_t *pctx)
  109. ENUM_PREFIX(st_context_state, 2);
  110. case 0: return ENUM_OBJ(pctx->scheduler);
  111. case 1: {
  112. /* Return the next *visible* context. */
  113. const gs_context_t *next = pctx->table_next;
  114. while (next && !next->visible)
  115. next = next->table_next;
  116. return ENUM_OBJ(next);
  117. }
  118. ENUM_PTRS_END
  119. private RELOC_PTRS_WITH(context_reloc_ptrs, gs_context_t *pctx)
  120. RELOC_PREFIX(st_context_state);
  121. RELOC_VAR(pctx->scheduler);
  122. /* Don't relocate table_next -- the scheduler object handles that. */
  123. RELOC_PTRS_END
  124. gs_private_st_complex_only(st_context, gs_context_t, "gs_context_t",
  125. context_clear_marks, context_enum_ptrs, context_reloc_ptrs, 0);
  126. /*
  127. * Context list structure. Note that this uses context indices, not
  128. * pointers, to avoid having to worry about pointers between local VMs.
  129. */
  130. typedef struct ctx_list_s {
  131. ctx_index_t head_index;
  132. ctx_index_t tail_index;
  133. } ctx_list_t;
  134. /* Condition structure */
  135. typedef struct gs_condition_s {
  136. ctx_list_t waiting; /* contexts waiting on this condition */
  137. } gs_condition_t;
  138. gs_private_st_simple(st_condition, gs_condition_t, "conditiontype");
  139. /* Lock structure */
  140. typedef struct gs_lock_s {
  141. ctx_list_t waiting; /* contexts waiting for this lock, */
  142. /* must be first for subclassing */
  143. ctx_index_t holder_index; /* context holding the lock, if any */
  144. gs_scheduler_t *scheduler;
  145. } gs_lock_t;
  146. gs_private_st_ptrs1(st_lock, gs_lock_t, "locktype",
  147. lock_enum_ptrs, lock_reloc_ptrs, scheduler);
  148. /* Global state */
  149. /*typedef struct gs_scheduler_s gs_scheduler_t; *//* (above) */
  150. struct gs_scheduler_s {
  151. gs_context_t *current;
  152. long usertime_initial; /* usertime when current started running */
  153. ctx_list_t active;
  154. vm_reclaim_proc((*save_vm_reclaim));
  155. ctx_index_t dead_index;
  156. #define CTX_TABLE_SIZE 19
  157. gs_context_t *table[CTX_TABLE_SIZE];
  158. };
  159. /* Convert a context index to a context pointer. */
  160. private gs_context_t *
  161. index_context(const gs_scheduler_t *psched, long index)
  162. {
  163. gs_context_t *pctx;
  164. if (index == 0)
  165. return 0;
  166. pctx = psched->table[index % CTX_TABLE_SIZE];
  167. while (pctx != 0 && pctx->index != index)
  168. pctx = pctx->table_next;
  169. return pctx;
  170. }
  171. /* Structure definition */
  172. gs_private_st_composite(st_scheduler, gs_scheduler_t, "gs_scheduler",
  173. scheduler_enum_ptrs, scheduler_reloc_ptrs);
  174. /*
  175. * The only cross-local-VM pointers in the context machinery are the
  176. * table_next pointers in contexts, and the current and table[] pointers
  177. * in the scheduler. We need to handle all of these specially.
  178. */
  179. private ENUM_PTRS_WITH(scheduler_enum_ptrs, gs_scheduler_t *psched)
  180. {
  181. index -= 1;
  182. if (index < CTX_TABLE_SIZE) {
  183. gs_context_t *pctx = psched->table[index];
  184. while (pctx && !pctx->visible)
  185. pctx = pctx->table_next;
  186. return ENUM_OBJ(pctx);
  187. }
  188. return 0;
  189. }
  190. case 0: return ENUM_OBJ(visible_context(psched->current));
  191. ENUM_PTRS_END
  192. private RELOC_PTRS_WITH(scheduler_reloc_ptrs, gs_scheduler_t *psched)
  193. {
  194. if (psched->current->visible)
  195. RELOC_VAR(psched->current);
  196. {
  197. int i;
  198. for (i = 0; i < CTX_TABLE_SIZE; ++i) {
  199. gs_context_t **ppctx = &psched->table[i];
  200. gs_context_t **pnext;
  201. for (; *ppctx; ppctx = pnext) {
  202. pnext = &(*ppctx)->table_next;
  203. if ((*ppctx)->visible)
  204. RELOC_VAR(*ppctx);
  205. }
  206. }
  207. }
  208. }
  209. RELOC_PTRS_END
  210. /*
  211. * The context scheduler requires special handling during garbage
  212. * collection, since it is the only structure that can legitimately
  213. * reference objects in multiple local VMs. To deal with this, we wrap the
  214. * interpreter's garbage collector with code that prevents it from seeing
  215. * contexts in other than the current local VM. ****** WORKS FOR LOCAL GC,
  216. * NOT FOR GLOBAL ******
  217. */
  218. private void
  219. context_reclaim(vm_spaces * pspaces, bool global)
  220. {
  221. /*
  222. * Search through the registered roots to find the current context.
  223. * (This is a hack so we can find the scheduler.)
  224. */
  225. int i;
  226. gs_context_t *pctx = 0; /* = 0 is bogus to pacify compilers */
  227. gs_scheduler_t *psched = 0;
  228. gs_ref_memory_t *lmem = 0; /* = 0 is bogus to pacify compilers */
  229. chunk_locator_t loc;
  230. for (i = countof(pspaces->memories.indexed) - 1; psched == 0 && i > 0; --i) {
  231. gs_ref_memory_t *mem = pspaces->memories.indexed[i];
  232. const gs_gc_root_t *root = mem->roots;
  233. for (; root; root = root->next) {
  234. if (gs_object_type((gs_memory_t *)mem, *root->p) == &st_context) {
  235. pctx = *root->p;
  236. psched = pctx->scheduler;
  237. lmem = mem;
  238. break;
  239. }
  240. }
  241. }
  242. /* Hide all contexts in other (local) VMs. */
  243. /*
  244. * See context_create below for why we look for the context
  245. * in stable memory.
  246. */
  247. loc.memory = (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)lmem);
  248. loc.cp = 0;
  249. for (i = 0; i < CTX_TABLE_SIZE; ++i)
  250. for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
  251. pctx->visible = chunk_locate_ptr(pctx, &loc);
  252. #ifdef DEBUG
  253. if (!psched->current->visible) {
  254. lprintf("Current context is invisible!\n");
  255. gs_abort((gs_memory_t *)lmem);
  256. }
  257. #endif
  258. /* Do the actual garbage collection. */
  259. psched->save_vm_reclaim(pspaces, global);
  260. /* Make all contexts visible again. */
  261. for (i = 0; i < CTX_TABLE_SIZE; ++i)
  262. for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
  263. pctx->visible = true;
  264. }
  265. /* Forward references */
  266. private int context_create(gs_scheduler_t *, gs_context_t **,
  267. const gs_dual_memory_t *,
  268. const gs_context_state_t *, bool);
  269. private long context_usertime(void);
  270. private int context_param(const gs_scheduler_t *, os_ptr, gs_context_t **);
  271. private void context_destroy(gs_context_t *);
  272. private void stack_copy(ref_stack_t *, const ref_stack_t *, uint, uint);
  273. private int lock_acquire(os_ptr, gs_context_t *);
  274. private int lock_release(ref *);
  275. /* Internal procedures */
  276. private void
  277. context_load(gs_scheduler_t *psched, gs_context_t *pctx)
  278. {
  279. if_debug1('"', "[\"]loading %ld\n", pctx->index);
  280. if ( pctx->state.keep_usertime )
  281. psched->usertime_initial = context_usertime();
  282. context_state_load(&pctx->state);
  283. }
  284. private void
  285. context_store(gs_scheduler_t *psched, gs_context_t *pctx)
  286. {
  287. if_debug1('"', "[\"]storing %ld\n", pctx->index);
  288. context_state_store(&pctx->state);
  289. if ( pctx->state.keep_usertime )
  290. pctx->state.usertime_total +=
  291. context_usertime() - psched->usertime_initial;
  292. }
  293. /* List manipulation */
  294. private void
  295. add_last(const gs_scheduler_t *psched, ctx_list_t *pl, gs_context_t *pc)
  296. {
  297. pc->next_index = 0;
  298. if (pl->head_index == 0)
  299. pl->head_index = pc->index;
  300. else
  301. index_context(psched, pl->tail_index)->next_index = pc->index;
  302. pl->tail_index = pc->index;
  303. }
  304. /* ------ Initialization ------ */
  305. private int ctx_initialize(i_ctx_t **);
  306. private int ctx_reschedule(i_ctx_t **);
  307. private int ctx_time_slice(i_ctx_t **);
  308. private int
  309. zcontext_init(i_ctx_t *i_ctx_p)
  310. {
  311. /* Complete initialization after the interpreter is entered. */
  312. gs_interp_reschedule_proc = ctx_initialize;
  313. gs_interp_time_slice_proc = ctx_initialize;
  314. gs_interp_time_slice_ticks = 0;
  315. return 0;
  316. }
  317. /*
  318. * The interpreter calls this procedure at the first reschedule point.
  319. * It completes context initialization.
  320. */
  321. private int
  322. ctx_initialize(i_ctx_t **pi_ctx_p)
  323. {
  324. i_ctx_t *i_ctx_p = *pi_ctx_p; /* for gs_imemory */
  325. gs_ref_memory_t *imem = iimemory_system;
  326. gs_scheduler_t *psched =
  327. gs_alloc_struct_immovable((gs_memory_t *) imem, gs_scheduler_t,
  328. &st_scheduler, "gs_scheduler");
  329. psched->current = 0;
  330. psched->active.head_index = psched->active.tail_index = 0;
  331. psched->save_vm_reclaim = i_ctx_p->memory.spaces.vm_reclaim;
  332. i_ctx_p->memory.spaces.vm_reclaim = context_reclaim;
  333. psched->dead_index = 0;
  334. memset(psched->table, 0, sizeof(psched->table));
  335. /* Create an initial context. */
  336. if (context_create(psched, &psched->current, &gs_imemory, *pi_ctx_p, true) < 0) {
  337. lprintf("Can't create initial context!");
  338. gs_abort(imemory);
  339. }
  340. psched->current->scheduler = psched;
  341. /* Hook into the interpreter. */
  342. *pi_ctx_p = &psched->current->state;
  343. gs_interp_reschedule_proc = ctx_reschedule;
  344. gs_interp_time_slice_proc = ctx_time_slice;
  345. gs_interp_time_slice_ticks = reschedule_interval;
  346. return 0;
  347. }
  348. /* ------ Interpreter interface to scheduler ------ */
  349. /* When an operator decides it is time to run a new context, */
  350. /* it returns o_reschedule. The interpreter saves all its state in */
  351. /* memory, calls ctx_reschedule, and then loads the state from memory. */
  352. private int
  353. ctx_reschedule(i_ctx_t **pi_ctx_p)
  354. {
  355. gs_context_t *current = (gs_context_t *)*pi_ctx_p;
  356. gs_scheduler_t *psched = current->scheduler;
  357. #ifdef DEBUG
  358. if (*pi_ctx_p != &current->state) {
  359. lprintf2("current->state = 0x%lx, != i_ctx_p = 0x%lx!\n",
  360. (ulong)&current->state, (ulong)*pi_ctx_p);
  361. }
  362. #endif
  363. /* If there are any dead contexts waiting to be released, */
  364. /* take care of that now. */
  365. while (psched->dead_index != 0) {
  366. gs_context_t *dead = index_context(psched, psched->dead_index);
  367. long next_index = dead->next_index;
  368. if (current == dead) {
  369. if_debug1('"', "[\"]storing dead %ld\n", current->index);
  370. context_state_store(&current->state);
  371. current = 0;
  372. }
  373. context_destroy(dead);
  374. psched->dead_index = next_index;
  375. }
  376. /* Update saved_local_vm. See above for the invariant. */
  377. if (current != 0)
  378. current->saved_local_vm =
  379. current->state.memory.space_local->saved != 0;
  380. /* Run the first ready context, taking the 'save' lock into account. */
  381. {
  382. gs_context_t *prev = 0;
  383. gs_context_t *ready;
  384. for (ready = index_context(psched, psched->active.head_index);;
  385. prev = ready, ready = index_context(psched, ready->next_index)
  386. ) {
  387. if (ready == 0) {
  388. if (current != 0)
  389. context_store(psched, current);
  390. lprintf("No context to run!");
  391. return_error(e_Fatal);
  392. }
  393. /* See above for an explanation of the following test. */
  394. if (ready->state.memory.space_local->saved != 0 &&
  395. !ready->saved_local_vm
  396. )
  397. continue;
  398. /* Found a context to run. */
  399. {
  400. ctx_index_t next_index = ready->next_index;
  401. if (prev)
  402. prev->next_index = next_index;
  403. else
  404. psched->active.head_index = next_index;
  405. if (!next_index)
  406. psched->active.tail_index = (prev ? prev->index : 0);
  407. }
  408. break;
  409. }
  410. if (ready == current)
  411. return 0; /* no switch */
  412. /*
  413. * Save the state of the current context in psched->current,
  414. * if any context is current.
  415. */
  416. if (current != 0)
  417. context_store(psched, current);
  418. psched->current = ready;
  419. /* Load the state of the new current context. */
  420. context_load(psched, ready);
  421. /* Switch the interpreter's context state pointer. */
  422. *pi_ctx_p = &ready->state;
  423. }
  424. return 0;
  425. }
  426. /* If the interpreter wants to time-slice, it saves its state, */
  427. /* calls ctx_time_slice, and reloads its state. */
  428. private int
  429. ctx_time_slice(i_ctx_t **pi_ctx_p)
  430. {
  431. gs_scheduler_t *psched = ((gs_context_t *)*pi_ctx_p)->scheduler;
  432. if (psched->active.head_index == 0)
  433. return 0;
  434. if_debug0('"', "[\"]time-slice\n");
  435. add_last(psched, &psched->active, psched->current);
  436. return ctx_reschedule(pi_ctx_p);
  437. }
  438. /* ------ Context operators ------ */
  439. /* - currentcontext <context> */
  440. private int
  441. zcurrentcontext(i_ctx_t *i_ctx_p)
  442. {
  443. os_ptr op = osp;
  444. const gs_context_t *current = (const gs_context_t *)i_ctx_p;
  445. push(1);
  446. make_int(op, current->index);
  447. return 0;
  448. }
  449. /* <context> detach - */
  450. private int
  451. zdetach(i_ctx_t *i_ctx_p)
  452. {
  453. os_ptr op = osp;
  454. const gs_scheduler_t *psched = ((gs_context_t *)i_ctx_p)->scheduler;
  455. gs_context_t *pctx;
  456. int code;
  457. if ((code = context_param(psched, op, &pctx)) < 0)
  458. return code;
  459. if_debug2('\'', "[']detach %ld, status = %d\n",
  460. pctx->index, pctx->status);
  461. if (pctx->joiner_index != 0 || pctx->detach)
  462. return_error(e_invalidcontext);
  463. switch (pctx->status) {
  464. case cs_active:
  465. pctx->detach = true;
  466. break;
  467. case cs_done:
  468. context_destroy(pctx);
  469. }
  470. pop(1);
  471. return 0;
  472. }
  473. private int
  474. do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin,
  475. const ref * pstdout, uint mcount, bool local),
  476. values_older_than(const ref_stack_t * pstack, uint first, uint last,
  477. int max_space);
  478. private int
  479. fork_done(i_ctx_t *),
  480. fork_done_with_error(i_ctx_t *),
  481. finish_join(i_ctx_t *),
  482. reschedule_now(i_ctx_t *);
  483. /* <mark> <obj1> ... <objN> <proc> .fork <context> */
  484. /* <mark> <obj1> ... <objN> <proc> <stdin|null> <stdout|null> */
  485. /* .localfork <context> */
  486. private int
  487. zfork(i_ctx_t *i_ctx_p)
  488. {
  489. os_ptr op = osp;
  490. uint mcount = ref_stack_counttomark(&o_stack);
  491. ref rnull;
  492. if (mcount == 0)
  493. return_error(e_unmatchedmark);
  494. make_null(&rnull);
  495. return do_fork(i_ctx_p, op, &rnull, &rnull, mcount, false);
  496. }
  497. private int
  498. zlocalfork(i_ctx_t *i_ctx_p)
  499. {
  500. os_ptr op = osp;
  501. uint mcount = ref_stack_counttomark(&o_stack);
  502. int code;
  503. if (mcount == 0)
  504. return_error(e_unmatchedmark);
  505. code = values_older_than(&o_stack, 1, mcount - 1, avm_local);
  506. if (code < 0)
  507. return code;
  508. code = do_fork(i_ctx_p, op - 2, op - 1, op, mcount - 2, true);
  509. if (code < 0)
  510. return code;
  511. op = osp;
  512. op[-2] = *op;
  513. pop(2);
  514. return code;
  515. }
  516. /* Internal procedure to actually do the fork operation. */
  517. private int
  518. do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin, const ref * pstdout,
  519. uint mcount, bool local)
  520. {
  521. gs_context_t *pcur = (gs_context_t *)i_ctx_p;
  522. gs_scheduler_t *psched = pcur->scheduler;
  523. stream *s;
  524. gs_dual_memory_t dmem;
  525. gs_context_t *pctx;
  526. ref old_userdict, new_userdict;
  527. int code;
  528. check_proc(*op);
  529. if (iimemory_local->save_level)
  530. return_error(e_invalidcontext);
  531. if (r_has_type(pstdout, t_null)) {
  532. code = zget_stdout(i_ctx_p, &s);
  533. if (code < 0)
  534. return code;
  535. pstdout = &ref_stdio[1];
  536. } else
  537. check_read_file(s, pstdout);
  538. if (r_has_type(pstdin, t_null)) {
  539. code = zget_stdin(i_ctx_p, &s);
  540. if (code < 0)
  541. return code;
  542. pstdin = &ref_stdio[0];
  543. } else
  544. check_read_file(s, pstdin);
  545. dmem = gs_imemory;
  546. if (local) {
  547. /* Share global VM, private local VM. */
  548. ref *puserdict;
  549. uint userdict_size;
  550. gs_memory_t *parent = iimemory_local->non_gc_memory;
  551. gs_ref_memory_t *lmem;
  552. gs_ref_memory_t *lmem_stable;
  553. if (dict_find_string(systemdict, "userdict", &puserdict) <= 0 ||
  554. !r_has_type(puserdict, t_dictionary)
  555. )
  556. return_error(e_Fatal);
  557. old_userdict = *puserdict;
  558. userdict_size = dict_maxlength(&old_userdict);
  559. lmem = ialloc_alloc_state(parent, iimemory_local->chunk_size);
  560. lmem_stable = ialloc_alloc_state(parent, iimemory_local->chunk_size);
  561. if (lmem == 0 || lmem_stable == 0) {
  562. gs_free_object(parent, lmem_stable, "do_fork");
  563. gs_free_object(parent, lmem, "do_fork");
  564. return_error(e_VMerror);
  565. }
  566. lmem->space = avm_local;
  567. lmem_stable->space = avm_local;
  568. lmem->stable_memory = (gs_memory_t *)lmem_stable;
  569. dmem.space_local = lmem;
  570. code = context_create(psched, &pctx, &dmem, &pcur->state, false);
  571. if (code < 0) {
  572. /****** FREE lmem ******/
  573. return code;
  574. }
  575. /*
  576. * Create a new userdict. PostScript code will take care of
  577. * the rest of the initialization of the new context.
  578. */
  579. code = dict_alloc(lmem, userdict_size, &new_userdict);
  580. if (code < 0) {
  581. context_destroy(pctx);
  582. /****** FREE lmem ******/
  583. return code;
  584. }
  585. } else {
  586. /* Share global and local VM. */
  587. code = context_create(psched, &pctx, &dmem, &pcur->state, false);
  588. if (code < 0) {
  589. /****** FREE lmem ******/
  590. return code;
  591. }
  592. /*
  593. * Copy the gstate stack. The current method is not elegant;
  594. * in fact, I'm not entirely sure it works.
  595. */
  596. {
  597. int n;
  598. const gs_state *old;
  599. gs_state *new;
  600. for (n = 0, old = igs; old != 0; old = gs_state_saved(old))
  601. ++n;
  602. for (old = pctx->state.pgs; old != 0; old = gs_state_saved(old))
  603. --n;
  604. for (; n > 0 && code >= 0; --n)
  605. code = gs_gsave(pctx->state.pgs);
  606. if (code < 0) {
  607. /****** FREE lmem & GSTATES ******/
  608. return code;
  609. }
  610. for (old = igs, new = pctx->state.pgs;
  611. old != 0 /* (== new != 0) */ && code >= 0;
  612. old = gs_state_saved(old), new = gs_state_saved(new)
  613. )
  614. code = gs_setgstate(new, old);
  615. if (code < 0) {
  616. /****** FREE lmem & GSTATES ******/
  617. return code;
  618. }
  619. }
  620. }
  621. pctx->state.language_level = i_ctx_p->language_level;
  622. pctx->state.dict_stack.min_size = idict_stack.min_size;
  623. pctx->state.dict_stack.userdict_index = idict_stack.userdict_index;
  624. pctx->state.stdio[0] = *pstdin;
  625. pctx->state.stdio[1] = *pstdout;
  626. pctx->state.stdio[2] = pcur->state.stdio[2];
  627. /* Initialize the interpreter stacks. */
  628. {
  629. ref_stack_t *dstack = (ref_stack_t *)&pctx->state.dict_stack;
  630. uint count = ref_stack_count(&d_stack);
  631. uint copy = (local ? min_dstack_size : count);
  632. ref_stack_push(dstack, copy);
  633. stack_copy(dstack, &d_stack, copy, count - copy);
  634. if (local) {
  635. /* Substitute the new userdict for the old one. */
  636. long i;
  637. for (i = 0; i < copy; ++i) {
  638. ref *pdref = ref_stack_index(dstack, i);
  639. if (obj_eq(imemory, pdref, &old_userdict))
  640. *pdref = new_userdict;
  641. }
  642. }
  643. }
  644. {
  645. ref_stack_t *estack = (ref_stack_t *)&pctx->state.exec_stack;
  646. ref_stack_push(estack, 3);
  647. /* fork_done must be executed in both normal and error cases. */
  648. make_mark_estack(estack->p - 2, es_other, fork_done_with_error);
  649. make_oper(estack->p - 1, 0, fork_done);
  650. *estack->p = *op;
  651. }
  652. {
  653. ref_stack_t *ostack = (ref_stack_t *)&pctx->state.op_stack;
  654. uint count = mcount - 2;
  655. ref_stack_push(ostack, count);
  656. stack_copy(ostack, &o_stack, count, osp - op + 1);
  657. }
  658. pctx->state.binary_object_format = pcur->state.binary_object_format;
  659. add_last(psched, &psched->active, pctx);
  660. pop(mcount - 1);
  661. op = osp;
  662. make_int(op, pctx->index);
  663. return 0;
  664. }
  665. /*
  666. * Check that all values being passed by fork or join are old enough
  667. * to be valid in the environment to which they are being transferred.
  668. */
  669. private int
  670. values_older_than(const ref_stack_t * pstack, uint first, uint last,
  671. int next_space)
  672. {
  673. uint i;
  674. for (i = first; i <= last; ++i)
  675. if (r_space(ref_stack_index(pstack, (long)i)) >= next_space)
  676. return_error(e_invalidaccess);
  677. return 0;
  678. }
  679. /* This gets executed when a context terminates normally. */
  680. /****** MUST DO ALL RESTORES ******/
  681. /****** WHAT IF invalidrestore? ******/
  682. private int
  683. fork_done(i_ctx_t *i_ctx_p)
  684. {
  685. os_ptr op = osp;
  686. gs_context_t *pcur = (gs_context_t *)i_ctx_p;
  687. gs_scheduler_t *psched = pcur->scheduler;
  688. if_debug2('\'', "[']done %ld%s\n", pcur->index,
  689. (pcur->detach ? ", detached" : ""));
  690. /*
  691. * Clear the context's dictionary, execution and graphics stacks
  692. * now, to retain as little as possible in case of a garbage
  693. * collection or restore. We know that fork_done is the
  694. * next-to-bottom entry on the execution stack.
  695. */
  696. ref_stack_pop_to(&d_stack, min_dstack_size);
  697. pop_estack(&pcur->state, ref_stack_count(&e_stack) - 1);
  698. gs_grestoreall(igs);
  699. /*
  700. * If there are any unmatched saves, we need to execute restores
  701. * until there aren't. An invalidrestore is possible and will
  702. * result in an error termination.
  703. */
  704. if (iimemory_local->save_level) {
  705. ref *prestore;
  706. if (dict_find_string(systemdict, "restore", &prestore) <= 0) {
  707. lprintf("restore not found in systemdict!");
  708. return_error(e_Fatal);
  709. }
  710. if (pcur->detach) {
  711. ref_stack_clear(&o_stack); /* help avoid invalidrestore */
  712. op = osp;
  713. }
  714. push(1);
  715. make_tv(op, t_save, saveid, alloc_save_current_id(&gs_imemory));
  716. push_op_estack(fork_done);
  717. ++esp;
  718. ref_assign(esp, prestore);
  719. return o_push_estack;
  720. }
  721. if (pcur->detach) {
  722. /*
  723. * We would like to free the context's memory, but we can't do
  724. * it yet, because the interpreter still has references to it.
  725. * Instead, queue the context to be freed the next time we
  726. * reschedule. We can, however, clear its operand stack now.
  727. */
  728. ref_stack_clear(&o_stack);
  729. context_store(psched, pcur);
  730. pcur->next_index = psched->dead_index;
  731. psched->dead_index = pcur->index;
  732. psched->current = 0;
  733. } else {
  734. gs_context_t *pctx = index_context(psched, pcur->joiner_index);
  735. pcur->status = cs_done;
  736. /* Schedule the context waiting to join this one, if any. */
  737. if (pctx != 0)
  738. add_last(psched, &psched->active, pctx);
  739. }
  740. return o_reschedule;
  741. }
  742. /*
  743. * This gets executed when the stack is being unwound for an error
  744. * termination.
  745. */
  746. private int
  747. fork_done_with_error(i_ctx_t *i_ctx_p)
  748. {
  749. /****** WHAT TO DO? ******/
  750. return fork_done(i_ctx_p);
  751. }
  752. /* <context> join <mark> <obj1> ... <objN> */
  753. private int
  754. zjoin(i_ctx_t *i_ctx_p)
  755. {
  756. os_ptr op = osp;
  757. gs_context_t *current = (gs_context_t *)i_ctx_p;
  758. gs_scheduler_t *psched = current->scheduler;
  759. gs_context_t *pctx;
  760. int code;
  761. if ((code = context_param(psched, op, &pctx)) < 0)
  762. return code;
  763. if_debug2('\'', "[']join %ld, status = %d\n",
  764. pctx->index, pctx->status);
  765. /*
  766. * It doesn't seem logically necessary, but the Red Book says that
  767. * the context being joined must share both global and local VM with
  768. * the current context.
  769. */
  770. if (pctx->joiner_index != 0 || pctx->detach || pctx == current ||
  771. pctx->state.memory.space_global !=
  772. current->state.memory.space_global ||
  773. pctx->state.memory.space_local !=
  774. current->state.memory.space_local ||
  775. iimemory_local->save_level != 0
  776. )
  777. return_error(e_invalidcontext);
  778. switch (pctx->status) {
  779. case cs_active:
  780. /*
  781. * We need to re-execute the join after the joined
  782. * context is done. Since we can't return both
  783. * o_push_estack and o_reschedule, we push a call on
  784. * reschedule_now, which accomplishes the latter.
  785. */
  786. check_estack(2);
  787. push_op_estack(finish_join);
  788. push_op_estack(reschedule_now);
  789. pctx->joiner_index = current->index;
  790. return o_push_estack;
  791. case cs_done:
  792. {
  793. const ref_stack_t *ostack =
  794. (ref_stack_t *)&pctx->state.op_stack;
  795. uint count = ref_stack_count(ostack);
  796. push(count);
  797. {
  798. ref *rp = ref_stack_index(&o_stack, count);
  799. make_mark(rp);
  800. }
  801. stack_copy(&o_stack, ostack, count, 0);
  802. context_destroy(pctx);
  803. }
  804. }
  805. return 0;
  806. }
  807. /* Finish a deferred join. */
  808. private int
  809. finish_join(i_ctx_t *i_ctx_p)
  810. {
  811. os_ptr op = osp;
  812. gs_context_t *current = (gs_context_t *)i_ctx_p;
  813. gs_scheduler_t *psched = current->scheduler;
  814. gs_context_t *pctx;
  815. int code;
  816. if ((code = context_param(psched, op, &pctx)) < 0)
  817. return code;
  818. if_debug2('\'', "[']finish_join %ld, status = %d\n",
  819. pctx->index, pctx->status);
  820. if (pctx->joiner_index != current->index)
  821. return_error(e_invalidcontext);
  822. pctx->joiner_index = 0;
  823. return zjoin(i_ctx_p);
  824. }
  825. /* Reschedule now. */
  826. private int
  827. reschedule_now(i_ctx_t *i_ctx_p)
  828. {
  829. return o_reschedule;
  830. }
  831. /* - yield - */
  832. private int
  833. zyield(i_ctx_t *i_ctx_p)
  834. {
  835. gs_context_t *current = (gs_context_t *)i_ctx_p;
  836. gs_scheduler_t *psched = current->scheduler;
  837. if (psched->active.head_index == 0)
  838. return 0;
  839. if_debug0('"', "[\"]yield\n");
  840. add_last(psched, &psched->active, current);
  841. return o_reschedule;
  842. }
  843. /* ------ Condition and lock operators ------ */
  844. private int
  845. monitor_cleanup(i_ctx_t *),
  846. monitor_release(i_ctx_t *),
  847. await_lock(i_ctx_t *);
  848. private void
  849. activate_waiting(gs_scheduler_t *, ctx_list_t * pcl);
  850. /* - condition <condition> */
  851. private int
  852. zcondition(i_ctx_t *i_ctx_p)
  853. {
  854. os_ptr op = osp;
  855. gs_condition_t *pcond =
  856. ialloc_struct(gs_condition_t, &st_condition, "zcondition");
  857. if (pcond == 0)
  858. return_error(e_VMerror);
  859. pcond->waiting.head_index = pcond->waiting.tail_index = 0;
  860. push(1);
  861. make_istruct(op, a_all, pcond);
  862. return 0;
  863. }
  864. /* - lock <lock> */
  865. private int
  866. zlock(i_ctx_t *i_ctx_p)
  867. {
  868. os_ptr op = osp;
  869. gs_lock_t *plock = ialloc_struct(gs_lock_t, &st_lock, "zlock");
  870. if (plock == 0)
  871. return_error(e_VMerror);
  872. plock->holder_index = 0;
  873. plock->waiting.head_index = plock->waiting.tail_index = 0;
  874. push(1);
  875. make_istruct(op, a_all, plock);
  876. return 0;
  877. }
  878. /* <lock> <proc> monitor - */
  879. private int
  880. zmonitor(i_ctx_t *i_ctx_p)
  881. {
  882. gs_context_t *current = (gs_context_t *)i_ctx_p;
  883. os_ptr op = osp;
  884. gs_lock_t *plock;
  885. gs_context_t *pctx;
  886. int code;
  887. check_stype(op[-1], st_lock);
  888. check_proc(*op);
  889. plock = r_ptr(op - 1, gs_lock_t);
  890. pctx = index_context(current->scheduler, plock->holder_index);
  891. if_debug1('\'', "[']monitor 0x%lx\n", (ulong) plock);
  892. if (pctx != 0) {
  893. if (pctx == current ||
  894. (iimemory_local->save_level != 0 &&
  895. pctx->state.memory.space_local ==
  896. current->state.memory.space_local)
  897. )
  898. return_error(e_invalidcontext);
  899. }
  900. /*
  901. * We push on the e-stack:
  902. * The lock object
  903. * An e-stack mark with monitor_cleanup, to release the lock
  904. * in case of an error
  905. * monitor_release, to release the lock in the normal case
  906. * The procedure to execute
  907. */
  908. check_estack(4);
  909. code = lock_acquire(op - 1, current);
  910. if (code != 0) { /* We didn't acquire the lock. Re-execute this later. */
  911. push_op_estack(zmonitor);
  912. return code; /* o_reschedule */
  913. }
  914. *++esp = op[-1];
  915. push_mark_estack(es_other, monitor_cleanup);
  916. push_op_estack(monitor_release);
  917. *++esp = *op;
  918. pop(2);
  919. return o_push_estack;
  920. }
  921. /* Release the monitor lock when unwinding for an error or exit. */
  922. private int
  923. monitor_cleanup(i_ctx_t *i_ctx_p)
  924. {
  925. int code = lock_release(esp);
  926. if (code < 0)
  927. return code;
  928. --esp;
  929. return o_pop_estack;
  930. }
  931. /* Release the monitor lock when the procedure completes. */
  932. private int
  933. monitor_release(i_ctx_t *i_ctx_p)
  934. {
  935. int code = lock_release(esp - 1);
  936. if (code < 0)
  937. return code;
  938. esp -= 2;
  939. return o_pop_estack;
  940. }
  941. /* <condition> notify - */
  942. private int
  943. znotify(i_ctx_t *i_ctx_p)
  944. {
  945. os_ptr op = osp;
  946. gs_context_t *current = (gs_context_t *)i_ctx_p;
  947. gs_condition_t *pcond;
  948. check_stype(*op, st_condition);
  949. pcond = r_ptr(op, gs_condition_t);
  950. if_debug1('"', "[\"]notify 0x%lx\n", (ulong) pcond);
  951. pop(1);
  952. op--;
  953. if (pcond->waiting.head_index == 0) /* nothing to do */
  954. return 0;
  955. activate_waiting(current->scheduler, &pcond->waiting);
  956. return zyield(i_ctx_p);
  957. }
  958. /* <lock> <condition> wait - */
  959. private int
  960. zwait(i_ctx_t *i_ctx_p)
  961. {
  962. os_ptr op = osp;
  963. gs_context_t *current = (gs_context_t *)i_ctx_p;
  964. gs_scheduler_t *psched = current->scheduler;
  965. gs_lock_t *plock;
  966. gs_context_t *pctx;
  967. gs_condition_t *pcond;
  968. check_stype(op[-1], st_lock);
  969. plock = r_ptr(op - 1, gs_lock_t);
  970. check_stype(*op, st_condition);
  971. pcond = r_ptr(op, gs_condition_t);
  972. if_debug2('"', "[\"]wait lock 0x%lx, condition 0x%lx\n",
  973. (ulong) plock, (ulong) pcond);
  974. pctx = index_context(psched, plock->holder_index);
  975. if (pctx == 0 || pctx != psched->current ||
  976. (iimemory_local->save_level != 0 &&
  977. (r_space(op - 1) == avm_local || r_space(op) == avm_local))
  978. )
  979. return_error(e_invalidcontext);
  980. check_estack(1);
  981. lock_release(op - 1);
  982. add_last(psched, &pcond->waiting, pctx);
  983. push_op_estack(await_lock);
  984. return o_reschedule;
  985. }
  986. /* When the condition is signaled, wait for acquiring the lock. */
  987. private int
  988. await_lock(i_ctx_t *i_ctx_p)
  989. {
  990. gs_context_t *current = (gs_context_t *)i_ctx_p;
  991. os_ptr op = osp;
  992. int code = lock_acquire(op - 1, current);
  993. if (code == 0) {
  994. pop(2);
  995. return 0;
  996. }
  997. /* We didn't acquire the lock. Re-execute the wait. */
  998. push_op_estack(await_lock);
  999. return code; /* o_reschedule */
  1000. }
  1001. /* Activate a list of waiting contexts, and reset the list. */
  1002. private void
  1003. activate_waiting(gs_scheduler_t *psched, ctx_list_t * pcl)
  1004. {
  1005. gs_context_t *pctx = index_context(psched, pcl->head_index);
  1006. gs_context_t *next;
  1007. for (; pctx != 0; pctx = next) {
  1008. next = index_context(psched, pctx->next_index);
  1009. add_last(psched, &psched->active, pctx);
  1010. }
  1011. pcl->head_index = pcl->tail_index = 0;
  1012. }
  1013. /* ------ Miscellaneous operators ------ */
  1014. /* - usertime <int> */
  1015. private int
  1016. zusertime_context(i_ctx_t *i_ctx_p)
  1017. {
  1018. gs_context_t *current = (gs_context_t *)i_ctx_p;
  1019. gs_scheduler_t *psched = current->scheduler;
  1020. os_ptr op = osp;
  1021. long utime = context_usertime();
  1022. push(1);
  1023. if (!current->state.keep_usertime) {
  1024. /*
  1025. * This is the first time this context has executed usertime:
  1026. * we must track its execution time from now on.
  1027. */
  1028. psched->usertime_initial = utime;
  1029. current->state.keep_usertime = true;
  1030. }
  1031. make_int(op, current->state.usertime_total + utime -
  1032. psched->usertime_initial);
  1033. return 0;
  1034. }
  1035. /* ------ Internal procedures ------ */
  1036. /* Create a context. */
  1037. private int
  1038. context_create(gs_scheduler_t * psched, gs_context_t ** ppctx,
  1039. const gs_dual_memory_t * dmem,
  1040. const gs_context_state_t *i_ctx_p, bool copy_state)
  1041. {
  1042. /*
  1043. * Contexts are always created at the outermost save level, so they do
  1044. * not need to be allocated in stable memory for the sake of
  1045. * save/restore. However, context_reclaim needs to be able to test
  1046. * whether a given context belongs to a given local VM, and allocating
  1047. * contexts in stable local VM avoids the need to scan multiple save
  1048. * levels when making this test.
  1049. */
  1050. gs_memory_t *mem = gs_memory_stable((gs_memory_t *)dmem->space_local);
  1051. gs_context_t *pctx;
  1052. int code;
  1053. long ctx_index;
  1054. gs_context_t **pte;
  1055. pctx = gs_alloc_struct(mem, gs_context_t, &st_context, "context_create");
  1056. if (pctx == 0)
  1057. return_error(e_VMerror);
  1058. if (copy_state) {
  1059. pctx->state = *i_ctx_p;
  1060. } else {
  1061. gs_context_state_t *pctx_st = &pctx->state;
  1062. code = context_state_alloc(&pctx_st, systemdict, dmem);
  1063. if (code < 0) {
  1064. gs_free_object(mem, pctx, "context_create");
  1065. return code;
  1066. }
  1067. }
  1068. ctx_index = gs_next_ids(mem, 1);
  1069. pctx->scheduler = psched;
  1070. pctx->status = cs_active;
  1071. pctx->index = ctx_index;
  1072. pctx->detach = false;
  1073. pctx->saved_local_vm = false;
  1074. pctx->visible = true;
  1075. pctx->next_index = 0;
  1076. pctx->joiner_index = 0;
  1077. pte = &psched->table[ctx_index % CTX_TABLE_SIZE];
  1078. pctx->table_next = *pte;
  1079. *pte = pctx;
  1080. *ppctx = pctx;
  1081. if (gs_debug_c('\'') | gs_debug_c('"'))
  1082. dlprintf2("[']create %ld at 0x%lx\n", ctx_index, (ulong) pctx);
  1083. return 0;
  1084. }
  1085. /* Check a context ID. Note that we do not check for context validity. */
  1086. private int
  1087. context_param(const gs_scheduler_t * psched, os_ptr op, gs_context_t ** ppctx)
  1088. {
  1089. gs_context_t *pctx;
  1090. check_type(*op, t_integer);
  1091. pctx = index_context(psched, op->value.intval);
  1092. if (pctx == 0)
  1093. return_error(e_invalidcontext);
  1094. *ppctx = pctx;
  1095. return 0;
  1096. }
  1097. /* Read the usertime as a single value. */
  1098. private long
  1099. context_usertime(void)
  1100. {
  1101. long secs_ns[2];
  1102. gp_get_usertime(secs_ns);
  1103. return secs_ns[0] * 1000 + secs_ns[1] / 1000000;
  1104. }
  1105. /* Destroy a context. */
  1106. private void
  1107. context_destroy(gs_context_t * pctx)
  1108. {
  1109. gs_ref_memory_t *mem = pctx->state.memory.space_local;
  1110. gs_scheduler_t *psched = pctx->scheduler;
  1111. gs_context_t **ppctx = &psched->table[pctx->index % CTX_TABLE_SIZE];
  1112. while (*ppctx != pctx)
  1113. ppctx = &(*ppctx)->table_next;
  1114. *ppctx = (*ppctx)->table_next;
  1115. if (gs_debug_c('\'') | gs_debug_c('"'))
  1116. dlprintf3("[']destroy %ld at 0x%lx, status = %d\n",
  1117. pctx->index, (ulong) pctx, pctx->status);
  1118. if (!context_state_free(&pctx->state))
  1119. gs_free_object((gs_memory_t *) mem, pctx, "context_destroy");
  1120. }
  1121. /* Copy the top elements of one stack to another. */
  1122. /* Note that this does not push the elements: */
  1123. /* the destination stack must have enough space preallocated. */
  1124. private void
  1125. stack_copy(ref_stack_t * to, const ref_stack_t * from, uint count,
  1126. uint from_index)
  1127. {
  1128. long i;
  1129. for (i = (long)count - 1; i >= 0; --i)
  1130. *ref_stack_index(to, i) = *ref_stack_index(from, i + from_index);
  1131. }
  1132. /* Acquire a lock. Return 0 if acquired, o_reschedule if not. */
  1133. private int
  1134. lock_acquire(os_ptr op, gs_context_t * pctx)
  1135. {
  1136. gs_lock_t *plock = r_ptr(op, gs_lock_t);
  1137. if (plock->holder_index == 0) {
  1138. plock->holder_index = pctx->index;
  1139. plock->scheduler = pctx->scheduler;
  1140. return 0;
  1141. }
  1142. add_last(pctx->scheduler, &plock->waiting, pctx);
  1143. return o_reschedule;
  1144. }
  1145. /* Release a lock. Return 0 if OK, e_invalidcontext if not. */
  1146. private int
  1147. lock_release(ref * op)
  1148. {
  1149. gs_lock_t *plock = r_ptr(op, gs_lock_t);
  1150. gs_scheduler_t *psched = plock->scheduler;
  1151. gs_context_t *pctx = index_context(psched, plock->holder_index);
  1152. if (pctx != 0 && pctx == psched->current) {
  1153. plock->holder_index = 0;
  1154. activate_waiting(psched, &plock->waiting);
  1155. return 0;
  1156. }
  1157. return_error(e_invalidcontext);
  1158. }
  1159. /* ------ Initialization procedure ------ */
  1160. /* We need to split the table because of the 16-element limit. */
  1161. const op_def zcontext1_op_defs[] = {
  1162. {"0condition", zcondition},
  1163. {"0currentcontext", zcurrentcontext},
  1164. {"1detach", zdetach},
  1165. {"2.fork", zfork},
  1166. {"1join", zjoin},
  1167. {"4.localfork", zlocalfork},
  1168. {"0lock", zlock},
  1169. {"2monitor", zmonitor},
  1170. {"1notify", znotify},
  1171. {"2wait", zwait},
  1172. {"0yield", zyield},
  1173. /* Note that the following replace prior definitions */
  1174. /* in the indicated files: */
  1175. {"0usertime", zusertime_context}, /* zmisc.c */
  1176. op_def_end(0)
  1177. };
  1178. const op_def zcontext2_op_defs[] = {
  1179. /* Internal operators */
  1180. {"0%fork_done", fork_done},
  1181. {"1%finish_join", finish_join},
  1182. {"0%monitor_cleanup", monitor_cleanup},
  1183. {"0%monitor_release", monitor_release},
  1184. {"2%await_lock", await_lock},
  1185. op_def_end(zcontext_init)
  1186. };