3
0

compress.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. /*
  2. * bzip2 is written by Julian Seward <jseward@bzip.org>.
  3. * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
  4. * See README and LICENSE files in this directory for more information.
  5. */
  6. /*-------------------------------------------------------------*/
  7. /*--- Compression machinery (not incl block sorting) ---*/
  8. /*--- compress.c ---*/
  9. /*-------------------------------------------------------------*/
  10. /* ------------------------------------------------------------------
  11. This file is part of bzip2/libbzip2, a program and library for
  12. lossless, block-sorting data compression.
  13. bzip2/libbzip2 version 1.0.4 of 20 December 2006
  14. Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
  15. Please read the WARNING, DISCLAIMER and PATENTS sections in the
  16. README file.
  17. This program is released under the terms of the license contained
  18. in the file LICENSE.
  19. ------------------------------------------------------------------ */
  20. /* CHANGES
  21. * 0.9.0 -- original version.
  22. * 0.9.0a/b -- no changes in this file.
  23. * 0.9.0c -- changed setting of nGroups in sendMTFValues()
  24. * so as to do a bit better on small files
  25. */
  26. /* #include "bzlib_private.h" */
  27. #if BZIP2_SPEED >= 5
  28. # define ALWAYS_INLINE_5 ALWAYS_INLINE
  29. #else
  30. # define ALWAYS_INLINE_5 /*nothing*/
  31. #endif
  32. /*---------------------------------------------------*/
  33. /*--- Bit stream I/O ---*/
  34. /*---------------------------------------------------*/
  35. /*---------------------------------------------------*/
  36. static
  37. void BZ2_bsInitWrite(EState* s)
  38. {
  39. s->bsLive = 0;
  40. s->bsBuff = 0;
  41. }
  42. /*---------------------------------------------------*/
  43. static NOINLINE
  44. void bsFinishWrite(EState* s)
  45. {
  46. while (s->bsLive > 0) {
  47. *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
  48. s->bsBuff <<= 8;
  49. s->bsLive -= 8;
  50. }
  51. }
  52. /*---------------------------------------------------*/
  53. static
  54. /* Helps only on level 5, on other levels hurts. ? */
  55. ALWAYS_INLINE_5
  56. void bsW(EState* s, int32_t n, uint32_t v)
  57. {
  58. while (s->bsLive >= 8) {
  59. *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
  60. s->bsBuff <<= 8;
  61. s->bsLive -= 8;
  62. }
  63. s->bsBuff |= (v << (32 - s->bsLive - n));
  64. s->bsLive += n;
  65. }
  66. /* Same with n == 16: */
  67. static
  68. ALWAYS_INLINE_5
  69. void bsW16(EState* s, uint32_t v)
  70. {
  71. while (s->bsLive >= 8) {
  72. *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
  73. s->bsBuff <<= 8;
  74. s->bsLive -= 8;
  75. }
  76. s->bsBuff |= (v << (16 - s->bsLive));
  77. s->bsLive += 16;
  78. }
  79. /* Same with n == 1: */
  80. static
  81. ALWAYS_INLINE /* one callsite */
  82. void bsW1_1(EState* s)
  83. {
  84. /* need space for only 1 bit, no need for loop freeing > 8 bits */
  85. if (s->bsLive >= 8) {
  86. *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
  87. s->bsBuff <<= 8;
  88. s->bsLive -= 8;
  89. }
  90. s->bsBuff |= (1 << (31 - s->bsLive));
  91. s->bsLive += 1;
  92. }
  93. static
  94. ALWAYS_INLINE_5
  95. void bsW1_0(EState* s)
  96. {
  97. /* need space for only 1 bit, no need for loop freeing > 8 bits */
  98. if (s->bsLive >= 8) {
  99. *s->posZ++ = (uint8_t)(s->bsBuff >> 24);
  100. s->bsBuff <<= 8;
  101. s->bsLive -= 8;
  102. }
  103. //s->bsBuff |= (0 << (31 - s->bsLive));
  104. s->bsLive += 1;
  105. }
  106. /*---------------------------------------------------*/
  107. static ALWAYS_INLINE
  108. void bsPutU16(EState* s, unsigned u)
  109. {
  110. bsW16(s, u);
  111. }
  112. /*---------------------------------------------------*/
  113. static
  114. void bsPutU32(EState* s, unsigned u)
  115. {
  116. //bsW(s, 32, u); // can't use: may try "uint32 << -n"
  117. bsW16(s, (u >> 16) & 0xffff);
  118. bsW16(s, u & 0xffff);
  119. }
  120. /*---------------------------------------------------*/
  121. /*--- The back end proper ---*/
  122. /*---------------------------------------------------*/
  123. /*---------------------------------------------------*/
  124. static
  125. void makeMaps_e(EState* s)
  126. {
  127. int i;
  128. unsigned cnt = 0;
  129. for (i = 0; i < 256; i++) {
  130. if (s->inUse[i]) {
  131. s->unseqToSeq[i] = cnt;
  132. cnt++;
  133. }
  134. }
  135. s->nInUse = cnt;
  136. }
  137. /*---------------------------------------------------*/
  138. /*
  139. * This bit of code is performance-critical.
  140. * On 32bit x86, gcc-6.3.0 was observed to spill ryy_j to stack,
  141. * resulting in abysmal performance (x3 slowdown).
  142. * Forcing it into a separate function alleviates register pressure,
  143. * and spillage no longer happens.
  144. * Other versions of gcc do not exhibit this problem, but out-of-line code
  145. * seems to be helping them too (code is both smaller and faster).
  146. * Therefore NOINLINE is enabled for the entire 32bit x86 arch for now,
  147. * without a check for gcc version.
  148. */
  149. static
  150. #if defined __i386__
  151. NOINLINE
  152. #endif
  153. int inner_loop(uint8_t *yy, uint8_t ll_i)
  154. {
  155. register uint8_t rtmp;
  156. register uint8_t* ryy_j;
  157. rtmp = yy[1];
  158. yy[1] = yy[0];
  159. ryy_j = &(yy[1]);
  160. while (ll_i != rtmp) {
  161. register uint8_t rtmp2;
  162. ryy_j++;
  163. rtmp2 = rtmp;
  164. rtmp = *ryy_j;
  165. *ryy_j = rtmp2;
  166. }
  167. yy[0] = rtmp;
  168. return ryy_j - &(yy[0]);
  169. }
  170. static NOINLINE
  171. void generateMTFValues(EState* s)
  172. {
  173. uint8_t yy[256];
  174. int i;
  175. int zPend;
  176. int32_t wr;
  177. /*
  178. * After sorting (eg, here),
  179. * s->arr1[0 .. s->nblock-1] holds sorted order,
  180. * and
  181. * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
  182. * holds the original block data.
  183. *
  184. * The first thing to do is generate the MTF values,
  185. * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
  186. *
  187. * Because there are strictly fewer or equal MTF values
  188. * than block values, ptr values in this area are overwritten
  189. * with MTF values only when they are no longer needed.
  190. *
  191. * The final compressed bitstream is generated into the
  192. * area starting at &((uint8_t*)s->arr2)[s->nblock]
  193. *
  194. * These storage aliases are set up in bzCompressInit(),
  195. * except for the last one, which is arranged in
  196. * compressBlock().
  197. */
  198. uint32_t* ptr = s->ptr;
  199. makeMaps_e(s);
  200. wr = 0;
  201. zPend = 0;
  202. for (i = 0; i <= s->nInUse+1; i++)
  203. s->mtfFreq[i] = 0;
  204. for (i = 0; i < s->nInUse; i++)
  205. yy[i] = (uint8_t) i;
  206. for (i = 0; i < s->nblock; i++) {
  207. uint8_t ll_i = ll_i; /* gcc 4.3.1 thinks it may be used w/o init */
  208. int32_t j;
  209. AssertD(wr <= i, "generateMTFValues(1)");
  210. j = ptr[i] - 1;
  211. if (j < 0)
  212. j += s->nblock;
  213. ll_i = s->unseqToSeq[s->block[j]];
  214. AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
  215. if (yy[0] == ll_i) {
  216. zPend++;
  217. continue;
  218. }
  219. if (zPend > 0) {
  220. process_zPend:
  221. zPend--;
  222. while (1) {
  223. #if 0
  224. if (zPend & 1) {
  225. s->mtfv[wr] = BZ_RUNB; wr++;
  226. s->mtfFreq[BZ_RUNB]++;
  227. } else {
  228. s->mtfv[wr] = BZ_RUNA; wr++;
  229. s->mtfFreq[BZ_RUNA]++;
  230. }
  231. #else /* same as above, since BZ_RUNA is 0 and BZ_RUNB is 1 */
  232. unsigned run = zPend & 1;
  233. s->mtfv[wr] = run;
  234. wr++;
  235. s->mtfFreq[run]++;
  236. #endif
  237. zPend -= 2;
  238. if (zPend < 0)
  239. break;
  240. zPend = (unsigned)zPend / 2;
  241. /* bbox: unsigned div is easier */
  242. }
  243. if (i < 0) /* came via "goto process_zPend"? exit */
  244. goto end;
  245. zPend = 0;
  246. }
  247. j = inner_loop(yy, ll_i);
  248. s->mtfv[wr] = j+1;
  249. wr++;
  250. s->mtfFreq[j+1]++;
  251. }
  252. i = -1;
  253. if (zPend > 0)
  254. goto process_zPend; /* "process it and come back here" */
  255. end:
  256. s->mtfv[wr] = s->nInUse+1;
  257. wr++;
  258. s->mtfFreq[s->nInUse+1]++;
  259. s->nMTF = wr;
  260. }
  261. /*---------------------------------------------------*/
  262. #define BZ_LESSER_ICOST 0
  263. #define BZ_GREATER_ICOST 15
  264. static NOINLINE
  265. void sendMTFValues(EState* s)
  266. {
  267. int32_t t, i;
  268. unsigned iter;
  269. unsigned gs;
  270. int32_t alphaSize;
  271. unsigned nSelectors, selCtr;
  272. int32_t nGroups;
  273. /*
  274. * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
  275. * is a global since the decoder also needs it.
  276. *
  277. * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
  278. * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
  279. * are also globals only used in this proc.
  280. * Made global to keep stack frame size small.
  281. */
  282. #define code sendMTFValues__code
  283. #define rfreq sendMTFValues__rfreq
  284. #define len_pack sendMTFValues__len_pack
  285. unsigned /*uint16_t*/ cost[BZ_N_GROUPS];
  286. uint16_t* mtfv = s->mtfv;
  287. alphaSize = s->nInUse + 2;
  288. for (t = 0; t < BZ_N_GROUPS; t++) {
  289. unsigned v;
  290. for (v = 0; v < alphaSize; v++)
  291. s->len[t][v] = BZ_GREATER_ICOST;
  292. }
  293. /*--- Decide how many coding tables to use ---*/
  294. AssertH(s->nMTF > 0, 3001);
  295. // 1..199 = 2
  296. // 200..599 = 3
  297. // 600..1199 = 4
  298. // 1200..2399 = 5
  299. // 2400..99999 = 6
  300. nGroups = 2;
  301. nGroups += (s->nMTF >= 200);
  302. nGroups += (s->nMTF >= 600);
  303. nGroups += (s->nMTF >= 1200);
  304. nGroups += (s->nMTF >= 2400);
  305. /*--- Generate an initial set of coding tables ---*/
  306. {
  307. unsigned nPart, remF;
  308. nPart = nGroups;
  309. remF = s->nMTF;
  310. gs = 0;
  311. while (nPart > 0) {
  312. unsigned v;
  313. unsigned ge;
  314. unsigned tFreq, aFreq;
  315. tFreq = remF / nPart;
  316. ge = gs;
  317. aFreq = 0;
  318. while (aFreq < tFreq && ge < alphaSize) {
  319. aFreq += s->mtfFreq[ge++];
  320. }
  321. ge--;
  322. if (ge > gs
  323. && nPart != nGroups && nPart != 1
  324. && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
  325. ) {
  326. aFreq -= s->mtfFreq[ge];
  327. ge--;
  328. }
  329. for (v = 0; v < alphaSize; v++)
  330. if (v >= gs && v <= ge)
  331. s->len[nPart-1][v] = BZ_LESSER_ICOST;
  332. else
  333. s->len[nPart-1][v] = BZ_GREATER_ICOST;
  334. nPart--;
  335. gs = ge + 1;
  336. remF -= aFreq;
  337. }
  338. }
  339. /*
  340. * Iterate up to BZ_N_ITERS times to improve the tables.
  341. */
  342. for (iter = 0; iter < BZ_N_ITERS; iter++) {
  343. for (t = 0; t < nGroups; t++) {
  344. unsigned v;
  345. for (v = 0; v < alphaSize; v++)
  346. s->rfreq[t][v] = 0;
  347. }
  348. #if BZIP2_SPEED >= 5
  349. /*
  350. * Set up an auxiliary length table which is used to fast-track
  351. * the common case (nGroups == 6).
  352. */
  353. if (nGroups == 6) {
  354. unsigned v;
  355. for (v = 0; v < alphaSize; v++) {
  356. s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
  357. s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
  358. s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
  359. }
  360. }
  361. #endif
  362. nSelectors = 0;
  363. gs = 0;
  364. while (1) {
  365. unsigned ge;
  366. unsigned bt, bc;
  367. /*--- Set group start & end marks. --*/
  368. if (gs >= s->nMTF)
  369. break;
  370. ge = gs + BZ_G_SIZE - 1;
  371. if (ge >= s->nMTF)
  372. ge = s->nMTF-1;
  373. /*
  374. * Calculate the cost of this group as coded
  375. * by each of the coding tables.
  376. */
  377. for (t = 0; t < nGroups; t++)
  378. cost[t] = 0;
  379. #if BZIP2_SPEED >= 5
  380. if (nGroups == 6 && 50 == ge-gs+1) {
  381. /*--- fast track the common case ---*/
  382. register uint32_t cost01, cost23, cost45;
  383. register uint16_t icv;
  384. cost01 = cost23 = cost45 = 0;
  385. #define BZ_ITER(nn) \
  386. icv = mtfv[gs+(nn)]; \
  387. cost01 += s->len_pack[icv][0]; \
  388. cost23 += s->len_pack[icv][1]; \
  389. cost45 += s->len_pack[icv][2];
  390. BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
  391. BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
  392. BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
  393. BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
  394. BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
  395. BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
  396. BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
  397. BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
  398. BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
  399. BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
  400. #undef BZ_ITER
  401. cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
  402. cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
  403. cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
  404. } else
  405. #endif
  406. {
  407. /*--- slow version which correctly handles all situations ---*/
  408. for (i = gs; i <= ge; i++) {
  409. unsigned /*uint16_t*/ icv = mtfv[i];
  410. for (t = 0; t < nGroups; t++)
  411. cost[t] += s->len[t][icv];
  412. }
  413. }
  414. /*
  415. * Find the coding table which is best for this group,
  416. * and record its identity in the selector table.
  417. */
  418. /*bc = 999999999;*/
  419. /*bt = -1;*/
  420. bc = cost[0];
  421. bt = 0;
  422. for (t = 1 /*0*/; t < nGroups; t++) {
  423. if (cost[t] < bc) {
  424. bc = cost[t];
  425. bt = t;
  426. }
  427. }
  428. s->selector[nSelectors] = bt;
  429. nSelectors++;
  430. /*
  431. * Increment the symbol frequencies for the selected table.
  432. */
  433. /* 1% faster compress. +800 bytes */
  434. #if BZIP2_SPEED >= 4
  435. if (nGroups == 6 && 50 == ge-gs+1) {
  436. /*--- fast track the common case ---*/
  437. #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
  438. BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
  439. BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
  440. BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
  441. BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
  442. BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
  443. BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
  444. BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
  445. BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
  446. BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
  447. BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
  448. #undef BZ_ITUR
  449. gs = ge + 1;
  450. } else
  451. #endif
  452. {
  453. /*--- slow version which correctly handles all situations ---*/
  454. while (gs <= ge) {
  455. s->rfreq[bt][mtfv[gs]]++;
  456. gs++;
  457. }
  458. /* already is: gs = ge + 1; */
  459. }
  460. }
  461. /*
  462. * Recompute the tables based on the accumulated frequencies.
  463. */
  464. /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
  465. * comment in huffman.c for details. */
  466. for (t = 0; t < nGroups; t++)
  467. BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
  468. }
  469. AssertH(nGroups < 8, 3002);
  470. AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
  471. /*--- Compute MTF values for the selectors. ---*/
  472. {
  473. uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
  474. for (i = 0; i < nGroups; i++)
  475. pos[i] = i;
  476. for (i = 0; i < nSelectors; i++) {
  477. unsigned j;
  478. ll_i = s->selector[i];
  479. j = 0;
  480. tmp = pos[j];
  481. while (ll_i != tmp) {
  482. j++;
  483. tmp2 = tmp;
  484. tmp = pos[j];
  485. pos[j] = tmp2;
  486. }
  487. pos[0] = tmp;
  488. s->selectorMtf[i] = j;
  489. }
  490. }
  491. /*--- Assign actual codes for the tables. --*/
  492. for (t = 0; t < nGroups; t++) {
  493. unsigned minLen = 32; //todo: s->len[t][0];
  494. unsigned maxLen = 0; //todo: s->len[t][0];
  495. for (i = 0; i < alphaSize; i++) {
  496. if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
  497. if (s->len[t][i] < minLen) minLen = s->len[t][i];
  498. }
  499. AssertH(!(maxLen > 17 /*20*/), 3004);
  500. AssertH(!(minLen < 1), 3005);
  501. BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
  502. }
  503. /*--- Transmit the mapping table. ---*/
  504. {
  505. /* bbox: optimized a bit more than in bzip2 */
  506. int inUse16 = 0;
  507. for (i = 0; i < 16; i++) {
  508. if (sizeof(long) <= 4) {
  509. inUse16 = inUse16*2 +
  510. ((*(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 0])
  511. | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 4])
  512. | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 8])
  513. | *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
  514. } else { /* Our CPU can do better */
  515. inUse16 = inUse16*2 +
  516. ((*(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 0])
  517. | *(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
  518. }
  519. }
  520. bsW16(s, inUse16);
  521. inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
  522. for (i = 0; i < 16; i++) {
  523. if (inUse16 < 0) {
  524. unsigned v16 = 0;
  525. unsigned j;
  526. for (j = 0; j < 16; j++)
  527. v16 = v16*2 + s->inUse[i * 16 + j];
  528. bsW16(s, v16);
  529. }
  530. inUse16 <<= 1;
  531. }
  532. }
  533. /*--- Now the selectors. ---*/
  534. bsW(s, 3, nGroups);
  535. bsW(s, 15, nSelectors);
  536. for (i = 0; i < nSelectors; i++) {
  537. unsigned j;
  538. for (j = 0; j < s->selectorMtf[i]; j++)
  539. bsW1_1(s);
  540. bsW1_0(s);
  541. }
  542. /*--- Now the coding tables. ---*/
  543. for (t = 0; t < nGroups; t++) {
  544. unsigned curr = s->len[t][0];
  545. bsW(s, 5, curr);
  546. for (i = 0; i < alphaSize; i++) {
  547. while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ }
  548. while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ }
  549. bsW1_0(s);
  550. }
  551. }
  552. /*--- And finally, the block data proper ---*/
  553. selCtr = 0;
  554. gs = 0;
  555. while (1) {
  556. unsigned ge;
  557. if (gs >= s->nMTF)
  558. break;
  559. ge = gs + BZ_G_SIZE - 1;
  560. if (ge >= s->nMTF)
  561. ge = s->nMTF-1;
  562. AssertH(s->selector[selCtr] < nGroups, 3006);
  563. /* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
  564. #if 0
  565. if (nGroups == 6 && 50 == ge-gs+1) {
  566. /*--- fast track the common case ---*/
  567. uint16_t mtfv_i;
  568. uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
  569. int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
  570. #define BZ_ITAH(nn) \
  571. mtfv_i = mtfv[gs+(nn)]; \
  572. bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
  573. BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
  574. BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
  575. BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
  576. BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
  577. BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
  578. BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
  579. BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
  580. BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
  581. BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
  582. BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
  583. #undef BZ_ITAH
  584. gs = ge+1;
  585. } else
  586. #endif
  587. {
  588. /*--- slow version which correctly handles all situations ---*/
  589. /* code is bit bigger, but moves multiply out of the loop */
  590. uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
  591. int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
  592. while (gs <= ge) {
  593. bsW(s,
  594. s_len_sel_selCtr[mtfv[gs]],
  595. s_code_sel_selCtr[mtfv[gs]]
  596. );
  597. gs++;
  598. }
  599. /* already is: gs = ge+1; */
  600. }
  601. selCtr++;
  602. }
  603. AssertH(selCtr == nSelectors, 3007);
  604. #undef code
  605. #undef rfreq
  606. #undef len_pack
  607. }
  608. /*---------------------------------------------------*/
  609. static
  610. void BZ2_compressBlock(EState* s, int is_last_block)
  611. {
  612. int32_t origPtr = origPtr;
  613. if (s->nblock > 0) {
  614. BZ_FINALISE_CRC(s->blockCRC);
  615. s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
  616. s->combinedCRC ^= s->blockCRC;
  617. if (s->blockNo > 1)
  618. s->posZ = s->zbits; // was: s->numZ = 0;
  619. origPtr = BZ2_blockSort(s);
  620. }
  621. s->zbits = &((uint8_t*)s->arr2)[s->nblock];
  622. s->posZ = s->zbits;
  623. s->state_out_pos = s->zbits;
  624. /*-- If this is the first block, create the stream header. --*/
  625. if (s->blockNo == 1) {
  626. BZ2_bsInitWrite(s);
  627. /*bsPutU8(s, BZ_HDR_B);*/
  628. /*bsPutU8(s, BZ_HDR_Z);*/
  629. /*bsPutU8(s, BZ_HDR_h);*/
  630. /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
  631. bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
  632. }
  633. if (s->nblock > 0) {
  634. /*bsPutU8(s, 0x31);*/
  635. /*bsPutU8(s, 0x41);*/
  636. /*bsPutU8(s, 0x59);*/
  637. /*bsPutU8(s, 0x26);*/
  638. bsPutU32(s, 0x31415926);
  639. /*bsPutU8(s, 0x53);*/
  640. /*bsPutU8(s, 0x59);*/
  641. bsPutU16(s, 0x5359);
  642. /*-- Now the block's CRC, so it is in a known place. --*/
  643. bsPutU32(s, s->blockCRC);
  644. /*
  645. * Now a single bit indicating (non-)randomisation.
  646. * As of version 0.9.5, we use a better sorting algorithm
  647. * which makes randomisation unnecessary. So always set
  648. * the randomised bit to 'no'. Of course, the decoder
  649. * still needs to be able to handle randomised blocks
  650. * so as to maintain backwards compatibility with
  651. * older versions of bzip2.
  652. */
  653. bsW1_0(s);
  654. bsW(s, 24, origPtr);
  655. generateMTFValues(s);
  656. sendMTFValues(s);
  657. }
  658. /*-- If this is the last block, add the stream trailer. --*/
  659. if (is_last_block) {
  660. /*bsPutU8(s, 0x17);*/
  661. /*bsPutU8(s, 0x72);*/
  662. /*bsPutU8(s, 0x45);*/
  663. /*bsPutU8(s, 0x38);*/
  664. bsPutU32(s, 0x17724538);
  665. /*bsPutU8(s, 0x50);*/
  666. /*bsPutU8(s, 0x90);*/
  667. bsPutU16(s, 0x5090);
  668. bsPutU32(s, s->combinedCRC);
  669. bsFinishWrite(s);
  670. }
  671. }
  672. /*-------------------------------------------------------------*/
  673. /*--- end compress.c ---*/
  674. /*-------------------------------------------------------------*/