gdevprna.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /* Copyright (C) 1998, 1999 Aladdin Enterprises. All rights reserved.
  2. This file is part of AFPL Ghostscript.
  3. AFPL Ghostscript is distributed with NO WARRANTY OF ANY KIND. No author or
  4. distributor accepts any responsibility for the consequences of using it, or
  5. for whether it serves any particular purpose or works at all, unless he or
  6. she says so in writing. Refer to the Aladdin Free Public License (the
  7. "License") for full details.
  8. Every copy of AFPL Ghostscript must include a copy of the License, normally
  9. in a plain ASCII text file named PUBLIC. The License grants you the right
  10. to copy, modify and redistribute AFPL Ghostscript, but only under certain
  11. conditions described in the License. Among other things, the License
  12. requires that the copyright notice and this notice be preserved on all
  13. copies.
  14. */
  15. /*$Id: gdevprna.c,v 1.2 2000/09/19 19:00:21 lpd Exp $ */
  16. /* Generic asynchronous printer driver support */
  17. /* Initial version 2/1/98 by John Desrosiers (soho@crl.com) */
  18. /* Revised 8/7/98 by L. Peter Deutsch (ghost@aladdin.com) for */
  19. /* memory manager changes */
  20. /* 12/1/98 soho@crl.com - Removed unnecessary flush & reopen in */
  21. /* gdev_prn_async_write_get_hardware_params */
  22. #include "gdevprna.h"
  23. #include "gsalloc.h"
  24. #include "gsdevice.h"
  25. #include "gsmemlok.h"
  26. #include "gsmemret.h"
  27. #include "gsnogc.h"
  28. #include "gxcldev.h"
  29. #include "gxclpath.h"
  30. #include "gxpageq.h"
  31. #include "gzht.h" /* for gx_ht_cache_default_bits */
  32. /* ----------------- Constants ----------------------- */
  33. /*
  34. * Fixed overhead # bytes to run renderer in (+ driver-spec'd variable bytes):
  35. * empirical & still very subject to change.
  36. */
  37. #define RendererAllocationOverheadBytes 503000 /* minimum is 503,000 as of 4/26/99 */
  38. #ifdef DEBUG
  39. /* 196000 is pretty much the minimum, given 16K phys memfile blocks */
  40. /*# define DebugBandlistMemorySize 196000*/ /* comment out to disable fixed (debug) bandlist size */
  41. #endif /* defined(DEBUG) */
  42. /* ---------------- Standard device procedures ---------------- */
  43. private dev_proc_close_device(gdev_prn_async_write_close_device);
  44. private dev_proc_output_page(gdev_prn_async_write_output_page);
  45. private dev_proc_put_params(gdev_prn_async_write_put_params);
  46. private dev_proc_get_hardware_params(gdev_prn_async_write_get_hardware_params);
  47. private dev_proc_put_params(gdev_prn_async_render_put_params);
  48. /* ---------------- Forward Declarations ---------------------- */
  49. private void gdev_prn_dealloc(P1(gx_device_printer *));
  50. private proc_free_up_bandlist_memory(gdev_prn_async_write_free_up_bandlist_memory);
  51. private int flush_page(P2(gx_device_printer *, bool));
  52. private int reopen_clist_after_flush(P1(gx_device_printer *));
  53. private void reinit_printer_into_printera(P1(gx_device_printer * const));
  54. private int alloc_bandlist_memory(P2(gs_memory_t **, gs_memory_t *));
  55. private void free_bandlist_memory(P1(gs_memory_t *));
  56. private int alloc_render_memory(P3(gs_memory_t **, gs_memory_t *, long));
  57. private void free_render_memory(P1(gs_memory_t *));
  58. private gs_memory_recover_status_t
  59. prna_mem_recover(P2(gs_memory_retrying_t *rmem, void *proc_data));
  60. /* ------ Open/close ------ */
  61. /*
  62. * Open this printer device in ASYNC (overlapped) mode.
  63. * This routine must always called by the concrete device's xx_open routine
  64. * in lieu of gdev_prn_open.
  65. */
  66. int
  67. gdev_prn_async_write_open(gx_device_printer * pwdev, int max_raster,
  68. int min_band_height, int max_src_image_row)
  69. {
  70. gx_device *const pdev = (gx_device *) pwdev;
  71. int code;
  72. bool writer_is_open = false;
  73. gx_device_clist_writer *const pcwdev =
  74. &((gx_device_clist *) pwdev)->writer;
  75. gx_device_clist_reader *pcrdev = 0;
  76. gx_device_printer *prdev = 0;
  77. gs_memory_t *render_memory = 0; /* renderer's mem allocator */
  78. pwdev->page_queue = 0;
  79. pwdev->bandlist_memory = 0;
  80. pwdev->async_renderer = 0;
  81. /* allocate & init render memory */
  82. /* The big memory consumers are: */
  83. /* - the buffer used to read images from the command list */
  84. /* - buffer used by gx_real_default_strip_copy_rop() */
  85. /* - line pointer tables for memory devices used in plane extraction */
  86. /* - the halftone cache */
  87. /* - the band rendering buffer */
  88. /* The * 2's in the next statement are a ****** HACK ****** to deal with */
  89. /* sandbars in the memory manager. */
  90. if ((code = alloc_render_memory(&render_memory,
  91. &gs_memory_default, RendererAllocationOverheadBytes + max_raster
  92. /* the first * 2 is not a hack */
  93. + (max_raster + sizeof(void *) * 2) * min_band_height
  94. + max_src_image_row + gx_ht_cache_default_bits() * 2)) < 0)
  95. goto open_err;
  96. /* Alloc & init bandlist allocators */
  97. /* Bandlist mem is threadsafe & common to rdr/wtr, so it's used */
  98. /* for page queue & cmd list buffers. */
  99. if ((code = alloc_bandlist_memory
  100. (&pwdev->bandlist_memory, &gs_memory_default)) < 0)
  101. goto open_err;
  102. /* Dictate banding parameters for both renderer & writer */
  103. /* Protect from user change, since user changing these won't be */
  104. /* detected, ergo the necessary close/reallocate/open wouldn't happen. */
  105. pwdev->space_params.banding_type = BandingAlways;
  106. pwdev->space_params.params_are_read_only = true;
  107. /* Make a copy of device for use as rendering device b4 opening writer */
  108. code = gs_copydevice((gx_device **) & prdev, pdev, render_memory);
  109. pcrdev = &((gx_device_clist *) prdev)->reader;
  110. if (code < 0)
  111. goto open_err;
  112. /* -------------- Open cmd list WRITER instance of device ------- */
  113. /* --------------------------------------------------------------- */
  114. /* This is wrong, because it causes the same thing in the renderer */
  115. pwdev->OpenOutputFile = 0; /* Don't open output file in writer */
  116. /* Hack: set this vector to tell gdev_prn_open to allocate for async rendering */
  117. pwdev->free_up_bandlist_memory = &gdev_prn_async_write_free_up_bandlist_memory;
  118. /* prevent clist writer from queuing path graphics & force it to split images */
  119. pwdev->clist_disable_mask |= clist_disable_fill_path |
  120. clist_disable_stroke_path | clist_disable_complex_clip |
  121. clist_disable_nonrect_hl_image | clist_disable_pass_thru_params;
  122. if ((code = gdev_prn_open(pdev)) >= 0) {
  123. writer_is_open = true;
  124. /* set up constant async-specific fields in device */
  125. reinit_printer_into_printera(pwdev);
  126. /* keep ptr to renderer device */
  127. pwdev->async_renderer = prdev;
  128. /* Allocate the page queue, then initialize it */
  129. /* Use bandlist memory since it's shared between rdr & wtr */
  130. if ((pwdev->page_queue = gx_page_queue_alloc(pwdev->bandlist_memory)) == 0)
  131. code = gs_note_error(gs_error_VMerror);
  132. else
  133. /* Allocate from clist allocator since it is thread-safe */
  134. code = gx_page_queue_init(pwdev->page_queue, pwdev->bandlist_memory);
  135. }
  136. /* ------------ Open cmd list RENDERER instance of device ------- */
  137. /* --------------------------------------------------------------- */
  138. if (code >= 0) {
  139. gx_semaphore_t *open_semaphore;
  140. /* Force writer's actual band params into reader's requested params */
  141. prdev->space_params.band = pcwdev->page_info.band_params;
  142. /* copydevice has already set up prdev->memory = render_memory */
  143. /* prdev->bandlist_memory = pwdev->bandlist_memory; */
  144. prdev->buffer_memory = prdev->memory;
  145. /* enable renderer to accept changes to params computed by writer */
  146. prdev->space_params.params_are_read_only = false;
  147. /* page queue is common to both devices */
  148. prdev->page_queue = pwdev->page_queue;
  149. /* Start renderer thread & wait for its successful open of device */
  150. if (!(open_semaphore = gx_semaphore_alloc(&gs_memory_default)))
  151. code = gs_note_error(gs_error_VMerror);
  152. else {
  153. gdev_prn_start_render_params thread_params;
  154. thread_params.writer_device = pwdev;
  155. thread_params.open_semaphore = open_semaphore;
  156. thread_params.open_code = 0;
  157. code = (*pwdev->printer_procs.start_render_thread)
  158. (&thread_params);
  159. if (code >= 0)
  160. gx_semaphore_wait(open_semaphore);
  161. code = thread_params.open_code;
  162. gx_semaphore_free(open_semaphore);
  163. }
  164. }
  165. /* ----- Set the recovery procedure for the mem allocator ----- */
  166. if (code >= 0) {
  167. gs_memory_retrying_set_recover(
  168. (gs_memory_retrying_t *)&gs_memory_default,
  169. prna_mem_recover,
  170. (void *)pcwdev
  171. );
  172. }
  173. /* --------------------- Wrap up --------------------------------- */
  174. /* --------------------------------------------------------------- */
  175. if (code < 0) {
  176. open_err:
  177. /* error mop-up */
  178. if (render_memory && !prdev)
  179. free_render_memory(render_memory);
  180. gdev_prn_dealloc(pwdev);
  181. if (writer_is_open) {
  182. gdev_prn_close(pdev);
  183. pwdev->free_up_bandlist_memory = 0;
  184. }
  185. }
  186. return code;
  187. }
  188. /* This procedure is called from within the memory allocator when regular */
  189. /* malloc's fail -- this procedure tries to free up pages from the queue */
  190. /* and returns a status code indicating whether any more can be freed. */
  191. private gs_memory_recover_status_t
  192. prna_mem_recover(gs_memory_retrying_t *rmem, void *proc_data)
  193. {
  194. int pages_remain = 0;
  195. gx_device_clist_writer *cldev = proc_data;
  196. if (cldev->free_up_bandlist_memory != NULL)
  197. pages_remain =
  198. (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, false );
  199. return (pages_remain > 0) ? RECOVER_STATUS_RETRY_OK : RECOVER_STATUS_NO_RETRY;
  200. }
  201. /* (Re)set printer device fields which get trampled by gdevprn_open & put_params */
  202. private void
  203. reinit_printer_into_printera(
  204. gx_device_printer * const pdev /* printer to convert */
  205. )
  206. {
  207. /* Change some of the procedure vector to point at async procedures */
  208. /* Originals were already saved by gdev_prn_open */
  209. if (dev_proc(pdev, close_device) == gdev_prn_close)
  210. set_dev_proc(pdev, close_device, gdev_prn_async_write_close_device);
  211. set_dev_proc(pdev, output_page, gdev_prn_async_write_output_page);
  212. set_dev_proc(pdev, put_params, gdev_prn_async_write_put_params);
  213. set_dev_proc(pdev, get_xfont_procs, gx_default_get_xfont_procs);
  214. set_dev_proc(pdev, get_xfont_device, gx_default_get_xfont_device);
  215. set_dev_proc(pdev, get_hardware_params, gdev_prn_async_write_get_hardware_params);
  216. /* clist writer calls this if it runs out of memory & wants to retry */
  217. pdev->free_up_bandlist_memory = &gdev_prn_async_write_free_up_bandlist_memory;
  218. }
  219. /* Generic closing for the writer device. */
  220. private int
  221. gdev_prn_async_write_close_device(gx_device * pdev)
  222. {
  223. gx_device_printer *const pwdev = (gx_device_printer *) pdev;
  224. /* Signal render thread to close & terminate when done */
  225. gx_page_queue_add_page(pwdev->page_queue,
  226. GX_PAGE_QUEUE_ACTION_TERMINATE, 0, 0);
  227. /* Wait for renderer to finish all pages & terminate req */
  228. gx_page_queue_wait_until_empty(pwdev->page_queue);
  229. /* Cascade down to original close rtn */
  230. gdev_prn_close(pdev);
  231. pwdev->free_up_bandlist_memory = 0;
  232. /* Deallocte dynamic stuff */
  233. gdev_prn_dealloc(pwdev);
  234. return 0;
  235. }
  236. /* Deallocte dynamic memory attached to device. Aware of possible imcomplete open */
  237. private void
  238. gdev_prn_dealloc(gx_device_printer * pwdev)
  239. {
  240. gx_device_printer *const prdev = pwdev->async_renderer;
  241. /* Delete renderer device & its memory allocator */
  242. if (prdev) {
  243. gs_memory_t *render_alloc = prdev->memory;
  244. gs_free_object(render_alloc, prdev, "gdev_prn_dealloc");
  245. free_render_memory(render_alloc);
  246. }
  247. /* Free page queue */
  248. if (pwdev->page_queue) {
  249. gx_page_queue_dnit(pwdev->page_queue);
  250. gs_free_object(pwdev->bandlist_memory, pwdev->page_queue,
  251. "gdev_prn_dealloc");
  252. pwdev->page_queue = 0;
  253. }
  254. /* Free memory bandlist allocators */
  255. if (pwdev->bandlist_memory)
  256. free_bandlist_memory(pwdev->bandlist_memory);
  257. }
  258. /* Open the render portion of a printer device in ASYNC (overlapped) mode.
  259. * This routine is always called by concrete device's xx_open_render_device
  260. * in lieu of gdev_prn_open.
  261. */
  262. int
  263. gdev_prn_async_render_open(gx_device_printer * prdev)
  264. {
  265. gx_device *const pdev = (gx_device *) prdev;
  266. prdev->is_async_renderer = true;
  267. return gdev_prn_open(pdev);
  268. }
  269. /* Generic closing for the rendering device. */
  270. int
  271. gdev_prn_async_render_close_device(gx_device_printer * prdev)
  272. {
  273. gx_device *const pdev = (gx_device *) prdev;
  274. return gdev_prn_close(pdev);
  275. }
  276. /* (Re)set renderer device fields which get trampled by gdevprn_open & put_params */
  277. private void
  278. reinit_printer_into_renderer(
  279. gx_device_printer * const pdev /* printer to convert */
  280. )
  281. {
  282. set_dev_proc(pdev, put_params, gdev_prn_async_render_put_params);
  283. }
  284. /* ---------- Start rasterizer thread ------------ */
  285. /*
  286. * Must be called by async device driver implementation (see gdevprna.h
  287. * under "Synchronizing the Instances"). This is the rendering loop, which
  288. * requires its own thread for as long as the device is open. This proc only
  289. * returns after the device is closed, or if the open failed. NB that an
  290. * open error leaves things in a state where the writer thread will not be
  291. * able to close since it's expecting the renderer to acknowledge its
  292. * requests before the writer can close. Ergo, if this routine fails you'll
  293. * crash unless the caller fixes the problem & successfully retries this.
  294. */
  295. int /* rets 0 ok, -ve error code if open failed */
  296. gdev_prn_async_render_thread(
  297. gdev_prn_start_render_params * params
  298. )
  299. {
  300. gx_device_printer *const pwdev = params->writer_device;
  301. gx_device_printer *const prdev = pwdev->async_renderer;
  302. gx_page_queue_entry_t *entry;
  303. int code;
  304. /* Open device, but don't use default if user didn't override */
  305. if (prdev->printer_procs.open_render_device ==
  306. gx_default_open_render_device)
  307. code = gdev_prn_async_render_open(prdev);
  308. else
  309. code = (*prdev->printer_procs.open_render_device) (prdev);
  310. reinit_printer_into_renderer(prdev);
  311. /* The cmd list logic assumes reader's & writer's tile caches are same size */
  312. if (code >= 0 &&
  313. ((gx_device_clist *) pwdev)->writer.page_tile_cache_size !=
  314. ((gx_device_clist *) prdev)->writer.page_tile_cache_size) {
  315. gdev_prn_async_render_close_device(prdev);
  316. code = gs_note_error(gs_error_VMerror);
  317. }
  318. params->open_code = code;
  319. gx_semaphore_signal(params->open_semaphore);
  320. if (code < 0)
  321. return code;
  322. /* fake open, since not called by gs_opendevice */
  323. prdev->is_open = true;
  324. /* Successful open */
  325. while ((entry = gx_page_queue_start_dequeue(prdev->page_queue))
  326. && entry->action != GX_PAGE_QUEUE_ACTION_TERMINATE) {
  327. /* Force printer open again if it mysteriously closed. */
  328. /* This shouldn't ever happen, but... */
  329. if (!prdev->is_open) {
  330. if (prdev->printer_procs.open_render_device ==
  331. gx_default_open_render_device)
  332. code = gdev_prn_async_render_open(prdev);
  333. else
  334. code = (*prdev->printer_procs.open_render_device) (prdev);
  335. reinit_printer_into_renderer(prdev);
  336. if (code >= 0) {
  337. prdev->is_open = true;
  338. gdev_prn_output_page((gx_device *) prdev, 0, true);
  339. }
  340. }
  341. if (prdev->is_open) {
  342. /* Force retrieved entry onto render device */
  343. ((gx_device_clist *) prdev)->common.page_info = entry->page_info;
  344. /* Set up device geometry */
  345. if (clist_setup_params((gx_device *) prdev) >= 0)
  346. /* Go this again, since setup_params may have trashed it */
  347. ((gx_device_clist *) prdev)->common.page_info = entry->page_info;
  348. /* Call appropriate renderer routine to deal w/buffer */
  349. /* Ignore status, since we don't know how to deal w/errors! */
  350. switch (entry->action) {
  351. case GX_PAGE_QUEUE_ACTION_FULL_PAGE:
  352. (*dev_proc(prdev, output_page))((gx_device *) prdev,
  353. entry->num_copies, true);
  354. break;
  355. case GX_PAGE_QUEUE_ACTION_PARTIAL_PAGE:
  356. case GX_PAGE_QUEUE_ACTION_COPY_PAGE:
  357. (*dev_proc(prdev, output_page))((gx_device *) prdev,
  358. entry->num_copies, false);
  359. break;
  360. }
  361. /*
  362. * gx_page_queue_finish_dequeue will close and free the band
  363. * list files, so we don't need to call clist_close_output_file.
  364. */
  365. }
  366. /* Finalize dequeue & free retrieved queue entry */
  367. gx_page_queue_finish_dequeue(entry);
  368. }
  369. /* Close device, but don't use default if user hasn't overriden. */
  370. /* Ignore status, since returning bad status means open failed */
  371. if (prdev->printer_procs.close_render_device ==
  372. gx_default_close_render_device)
  373. gdev_prn_async_render_close_device(prdev);
  374. else
  375. (*prdev->printer_procs.close_render_device)(prdev);
  376. /* undo fake open, since not called by gs_closedevice */
  377. prdev->is_open = false;
  378. /* Now that device is closed, acknowledge gx_page_queue_terminate */
  379. gx_page_queue_finish_dequeue(entry);
  380. return 0;
  381. }
  382. /* ------ Get/put parameters ------ */
  383. /* Put parameters. */
  384. private int
  385. gdev_prn_async_write_put_params(gx_device * pdev, gs_param_list * plist)
  386. {
  387. gx_device_clist_writer *const pclwdev =
  388. &((gx_device_clist *) pdev)->writer;
  389. gx_device_printer *const pwdev = (gx_device_printer *) pdev;
  390. gdev_prn_space_params save_sp = pwdev->space_params;
  391. int save_height = pwdev->height;
  392. int save_width = pwdev->width;
  393. int code, ecode;
  394. if (!pwdev->is_open)
  395. return (*pwdev->orig_procs.put_params) (pdev, plist);
  396. /*
  397. * First, cascade to real device's put_params.
  398. * If that put_params made any changes that require re-opening
  399. * the device, just flush the page; the parameter block at the
  400. * head of the next page will reflect the changes just made.
  401. * If the put_params requires no re-open, just slip it into the
  402. * stream in the command buffer. This way, the
  403. * writer device should parallel the renderer status at the same point
  404. * in their respective executions.
  405. *
  406. * NB. that all this works only because we take the position that
  407. * put_params can make no change that actually affects hardware's state
  408. * before the final output_page on the RASTERIZER.
  409. */
  410. /* Call original procedure, but "closed" to prevent closing device */
  411. pwdev->is_open = false; /* prevent put_params from closing device */
  412. code = (*pwdev->orig_procs.put_params) (pdev, plist);
  413. pwdev->is_open = true;
  414. pwdev->OpenOutputFile = 0; /* This is wrong, because it causes the same thing in the renderer */
  415. /* Flush device or emit to command list, depending if device changed geometry */
  416. if (memcmp(&pwdev->space_params, &save_sp, sizeof(save_sp)) != 0 ||
  417. pwdev->width != save_width || pwdev->height != save_height
  418. ) {
  419. int pageq_remaining;
  420. int new_width = pwdev->width;
  421. int new_height = pwdev->height;
  422. gdev_prn_space_params new_sp = pwdev->space_params;
  423. /* Need to start a new page, reallocate clist memory */
  424. pwdev->width = save_width;
  425. pwdev->height = save_height;
  426. pwdev->space_params = save_sp;
  427. /* First, get rid of any pending partial pages */
  428. code = flush_page(pwdev, false);
  429. /* Free and reallocate the printer memory. */
  430. pageq_remaining = 1; /* assume there are pages left in queue */
  431. do {
  432. ecode =
  433. gdev_prn_reallocate_memory(pdev,
  434. &new_sp, new_width, new_height);
  435. if (ecode >= 0)
  436. break; /* managed to recover enough memory */
  437. if (!pdev->is_open) {
  438. /* Disaster! Device was forced closed, which async drivers */
  439. /* aren't suppsed to do. */
  440. gdev_prn_async_write_close_device(pdev);
  441. return ecode; /* caller 'spozed to know could be closed now */
  442. }
  443. pclwdev->error_is_retryable = (ecode == gs_error_VMerror);
  444. }
  445. while (pageq_remaining >= 1 &&
  446. (pageq_remaining = ecode =
  447. clist_VMerror_recover(pclwdev, ecode)) >= 0);
  448. if (ecode < 0) {
  449. gdev_prn_free_memory(pdev);
  450. pclwdev->is_open = false;
  451. code = ecode;
  452. }
  453. } else if (code >= 0) {
  454. do
  455. if ((ecode = cmd_put_params(pclwdev, plist)) >= 0)
  456. break;
  457. while ((ecode = clist_VMerror_recover(pclwdev, ecode)) >= 0);
  458. if (ecode < 0 && pclwdev->error_is_retryable &&
  459. pclwdev->driver_call_nesting == 0
  460. )
  461. ecode = clist_VMerror_recover_flush(pclwdev, ecode);
  462. if (ecode < 0)
  463. code = ecode;
  464. }
  465. /* Reset fields that got trashed by gdev_prn_put_params and/or gdev_prn_open */
  466. reinit_printer_into_printera(pwdev);
  467. return code;
  468. }
  469. /* Get hardware-detected params. Drain page queue, then call renderer version */
  470. private int
  471. gdev_prn_async_write_get_hardware_params(gx_device * pdev, gs_param_list * plist)
  472. {
  473. gx_device_printer *const pwdev = (gx_device_printer *) pdev;
  474. gx_device_printer *const prdev = pwdev->async_renderer;
  475. if (!pwdev->is_open || !prdev)
  476. /* if not open, just use device's get hw params */
  477. return (dev_proc(pwdev, get_hardware_params))(pdev, plist);
  478. else {
  479. /* wait for empty pipeline */
  480. gx_page_queue_wait_until_empty(pwdev->page_queue);
  481. /* get reader's h/w params, now that writer & reader are sync'ed */
  482. return (dev_proc(prdev, get_hardware_params))
  483. ((gx_device *) prdev, plist);
  484. }
  485. }
  486. /* Put parameters on RENDERER. */
  487. private int /* returns -ve err code only if FATAL error (can't keep rendering) */
  488. gdev_prn_async_render_put_params(gx_device * pdev, gs_param_list * plist)
  489. {
  490. gx_device_printer *const prdev = (gx_device_printer *) pdev;
  491. bool save_is_open = prdev->is_open;
  492. /* put_parms from clist are guaranteed to never re-init device */
  493. /* They're also pretty much guaranteed to always succeed */
  494. (*prdev->orig_procs.put_params) (pdev, plist);
  495. /* If device closed itself, try to open & clear it */
  496. if (!prdev->is_open && save_is_open) {
  497. int code;
  498. if (prdev->printer_procs.open_render_device ==
  499. gx_default_open_render_device)
  500. code = gdev_prn_async_render_open(prdev);
  501. else
  502. code = (*prdev->printer_procs.open_render_device) (prdev);
  503. reinit_printer_into_renderer(prdev);
  504. if (code >= 0)
  505. /****** CLEAR PAGE SOMEHOW ******/;
  506. else
  507. return code; /* this'll cause clist to stop processing this band! */
  508. }
  509. return 0; /* return this unless FATAL status */
  510. }
  511. /* ------ Others ------ */
  512. /* Output page causes file to get added to page queue for later rasterizing */
  513. private int
  514. gdev_prn_async_write_output_page(gx_device * pdev, int num_copies, int flush)
  515. {
  516. gx_device_printer *const pwdev = (gx_device_printer *) pdev;
  517. gx_device_clist_writer *const pcwdev =
  518. &((gx_device_clist *) pdev)->writer;
  519. int flush_code;
  520. int add_code;
  521. int open_code;
  522. int one_last_time = 1;
  523. /* do NOT close files before sending to page queue */
  524. flush_code = clist_end_page(pcwdev);
  525. add_code = gx_page_queue_add_page(pwdev->page_queue,
  526. (flush ? GX_PAGE_QUEUE_ACTION_FULL_PAGE :
  527. GX_PAGE_QUEUE_ACTION_COPY_PAGE),
  528. &pcwdev->page_info, num_copies);
  529. if (flush && (flush_code >= 0) && (add_code >= 0)) {
  530. /* This page is finished */
  531. gx_finish_output_page(pdev, num_copies, flush);
  532. }
  533. /* Open new band files to take the place of ones added to page queue */
  534. while ((open_code = (*gs_clist_device_procs.open_device)
  535. ((gx_device *) pdev)) == gs_error_VMerror) {
  536. /* Open failed, try after a page gets rendered */
  537. if (!gx_page_queue_wait_one_page(pwdev->page_queue)
  538. && one_last_time-- <= 0)
  539. break;
  540. }
  541. return
  542. (flush_code < 0 ? flush_code : open_code < 0 ? open_code :
  543. add_code < 0 ? add_code : 0);
  544. }
  545. /* Free bandlist memory waits until the rasterizer runs enough to free some mem */
  546. private int /* -ve err, 0 if no pages remain to rasterize, 1 if more pages to go */
  547. gdev_prn_async_write_free_up_bandlist_memory(gx_device * pdev, bool flush_current)
  548. {
  549. gx_device_printer *const pwdev = (gx_device_printer *) pdev;
  550. if (flush_current) {
  551. int code = flush_page(pwdev, true);
  552. if (code < 0)
  553. return code;
  554. }
  555. return gx_page_queue_wait_one_page(pwdev->page_queue);
  556. }
  557. /* -------- Utility Routines --------- */
  558. /* Flush out any partial pages accumulated in device */
  559. /* LEAVE DEVICE in a state where it must be re-opened/re-init'd */
  560. private int /* ret 0 ok no flush, -ve error code */
  561. flush_page(
  562. gx_device_printer * pwdev, /* async writer device to flush */
  563. bool partial /* true if only partial page */
  564. )
  565. {
  566. gx_device_clist *const pcldev = (gx_device_clist *) pwdev;
  567. gx_device_clist_writer *const pcwdev = &pcldev->writer;
  568. int flush_code = 0;
  569. int add_code = 0;
  570. /* do NOT close files before sending to page queue */
  571. flush_code = clist_end_page(pcwdev);
  572. add_code = gx_page_queue_add_page(pwdev->page_queue,
  573. (partial ? GX_PAGE_QUEUE_ACTION_PARTIAL_PAGE :
  574. GX_PAGE_QUEUE_ACTION_FULL_PAGE),
  575. &pcwdev->page_info, 0);
  576. /* Device no longer has BANDFILES, so it must be re-init'd by caller */
  577. pcwdev->page_info.bfile = pcwdev->page_info.cfile = 0;
  578. /* return the worst of the status. */
  579. if (flush_code < 0)
  580. return flush_code;
  581. return add_code;
  582. }
  583. /* Flush any pending partial pages, re-open device */
  584. private int
  585. reopen_clist_after_flush(
  586. gx_device_printer * pwdev /* async writer device to flush */
  587. )
  588. {
  589. int open_code;
  590. int one_last_time = 1;
  591. /* Open new band files to take the place of ones added to page queue */
  592. while ((open_code = (*gs_clist_device_procs.open_device)
  593. ((gx_device *) pwdev)) == gs_error_VMerror) {
  594. /* Open failed, try after a page gets rendered */
  595. if (!gx_page_queue_wait_one_page(pwdev->page_queue)
  596. && one_last_time-- <= 0)
  597. break;
  598. }
  599. return open_code;
  600. }
  601. /*
  602. * The bandlist does allocations on the writer's thread & deallocations on
  603. * the reader's thread, so it needs to have mutual exclusion from itself, as
  604. * well as from other memory allocators since the reader can run at the same
  605. * time as the interpreter. The bandlist allocator therefore consists of
  606. * a monitor-locking wrapper around either a direct heap allocator or (for
  607. * testing) a fixed-limit allocator.
  608. */
  609. /* Create a bandlist allocator. */
  610. private int
  611. alloc_bandlist_memory(gs_memory_t ** final_allocator,
  612. gs_memory_t * base_allocator)
  613. {
  614. gs_memory_t *data_allocator = 0;
  615. gs_memory_locked_t *locked_allocator = 0;
  616. int code = 0;
  617. #if defined(DEBUG) && defined(DebugBandlistMemorySize)
  618. code = alloc_render_memory(&data_allocator, base_allocator,
  619. DebugBandlistMemorySize);
  620. if (code < 0)
  621. return code;
  622. #else
  623. data_allocator = (gs_memory_t *)gs_malloc_memory_init();
  624. if (!data_allocator)
  625. return_error(gs_error_VMerror);
  626. #endif
  627. locked_allocator = (gs_memory_locked_t *)
  628. gs_alloc_bytes_immovable(data_allocator, sizeof(gs_memory_locked_t),
  629. "alloc_bandlist_memory(locked allocator)");
  630. if (!locked_allocator)
  631. goto alloc_err;
  632. code = gs_memory_locked_init(locked_allocator, data_allocator);
  633. if (code < 0)
  634. goto alloc_err;
  635. *final_allocator = (gs_memory_t *)locked_allocator;
  636. return 0;
  637. alloc_err:
  638. if (locked_allocator)
  639. free_bandlist_memory((gs_memory_t *)locked_allocator);
  640. else if (data_allocator)
  641. gs_memory_free_all(data_allocator, FREE_ALL_EVERYTHING,
  642. "alloc_bandlist_memory(data allocator)");
  643. return (code < 0 ? code : gs_note_error(gs_error_VMerror));
  644. }
  645. /* Free a bandlist allocator. */
  646. private void
  647. free_bandlist_memory(gs_memory_t *bandlist_allocator)
  648. {
  649. gs_memory_locked_t *const lmem = (gs_memory_locked_t *)bandlist_allocator;
  650. gs_memory_t *data_mem = gs_memory_locked_target(lmem);
  651. gs_memory_free_all(bandlist_allocator,
  652. FREE_ALL_STRUCTURES | FREE_ALL_ALLOCATOR,
  653. "free_bandlist_memory(locked allocator)");
  654. if (data_mem)
  655. gs_memory_free_all(data_mem, FREE_ALL_EVERYTHING,
  656. "free_bandlist_memory(data allocator)");
  657. }
  658. /* Create an allocator with a fixed memory limit. */
  659. private int
  660. alloc_render_memory(gs_memory_t **final_allocator,
  661. gs_memory_t *base_allocator, long space)
  662. {
  663. gs_ref_memory_t *rmem =
  664. ialloc_alloc_state((gs_raw_memory_t *)base_allocator, space);
  665. vm_spaces spaces;
  666. int i, code;
  667. if (rmem == 0)
  668. return_error(gs_error_VMerror);
  669. code = ialloc_add_chunk(rmem, space, "alloc_render_memory");
  670. if (code < 0) {
  671. gs_memory_free_all((gs_memory_t *)rmem, FREE_ALL_EVERYTHING,
  672. "alloc_render_memory");
  673. return code;
  674. }
  675. *final_allocator = (gs_memory_t *)rmem;
  676. /* Call the reclaim procedure to delete the string marking tables */
  677. /* Only need this once since no other chunks will ever exist */
  678. for ( i = 0; i < countof(spaces_indexed); ++i )
  679. spaces_indexed[i] = 0;
  680. space_local = space_global = (gs_ref_memory_t *)rmem;
  681. spaces.vm_reclaim = gs_nogc_reclaim; /* no real GC on this chunk */
  682. GS_RECLAIM(&spaces, false);
  683. return 0;
  684. }
  685. /* Free an allocator with a fixed memory limit. */
  686. private void
  687. free_render_memory(gs_memory_t *render_allocator)
  688. {
  689. if (render_allocator)
  690. gs_memory_free_all(render_allocator, FREE_ALL_EVERYTHING,
  691. "free_render_memory");
  692. }