3
0

xz_dec_bcj.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /*
  2. * Branch/Call/Jump (BCJ) filter decoders
  3. *
  4. * Authors: Lasse Collin <lasse.collin@tukaani.org>
  5. * Igor Pavlov <http://7-zip.org/>
  6. *
  7. * This file has been put into the public domain.
  8. * You can do whatever you want with this file.
  9. */
  10. #include "xz_private.h"
  11. /*
  12. * The rest of the file is inside this ifdef. It makes things a little more
  13. * convenient when building without support for any BCJ filters.
  14. */
  15. #ifdef XZ_DEC_BCJ
  16. struct xz_dec_bcj {
  17. /* Type of the BCJ filter being used */
  18. enum {
  19. BCJ_X86 = 4, /* x86 or x86-64 */
  20. BCJ_POWERPC = 5, /* Big endian only */
  21. BCJ_IA64 = 6, /* Big or little endian */
  22. BCJ_ARM = 7, /* Little endian only */
  23. BCJ_ARMTHUMB = 8, /* Little endian only */
  24. BCJ_SPARC = 9 /* Big or little endian */
  25. } type;
  26. /*
  27. * Return value of the next filter in the chain. We need to preserve
  28. * this information across calls, because we must not call the next
  29. * filter anymore once it has returned XZ_STREAM_END.
  30. */
  31. enum xz_ret ret;
  32. /* True if we are operating in single-call mode. */
  33. bool single_call;
  34. /*
  35. * Absolute position relative to the beginning of the uncompressed
  36. * data (in a single .xz Block). We care only about the lowest 32
  37. * bits so this doesn't need to be uint64_t even with big files.
  38. */
  39. uint32_t pos;
  40. /* x86 filter state */
  41. uint32_t x86_prev_mask;
  42. /* Temporary space to hold the variables from struct xz_buf */
  43. uint8_t *out;
  44. size_t out_pos;
  45. size_t out_size;
  46. struct {
  47. /* Amount of already filtered data in the beginning of buf */
  48. size_t filtered;
  49. /* Total amount of data currently stored in buf */
  50. size_t size;
  51. /*
  52. * Buffer to hold a mix of filtered and unfiltered data. This
  53. * needs to be big enough to hold Alignment + 2 * Look-ahead:
  54. *
  55. * Type Alignment Look-ahead
  56. * x86 1 4
  57. * PowerPC 4 0
  58. * IA-64 16 0
  59. * ARM 4 0
  60. * ARM-Thumb 2 2
  61. * SPARC 4 0
  62. */
  63. uint8_t buf[16];
  64. } temp;
  65. };
  66. #ifdef XZ_DEC_X86
  67. /*
  68. * This is used to test the most significant byte of a memory address
  69. * in an x86 instruction.
  70. */
  71. static inline int bcj_x86_test_msbyte(uint8_t b)
  72. {
  73. return b == 0x00 || b == 0xFF;
  74. }
  75. static noinline_for_stack size_t XZ_FUNC bcj_x86(
  76. struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  77. {
  78. static const bool mask_to_allowed_status[8]
  79. = { true, true, true, false, true, false, false, false };
  80. static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 };
  81. size_t i;
  82. size_t prev_pos = (size_t)-1;
  83. uint32_t prev_mask = s->x86_prev_mask;
  84. uint32_t src;
  85. uint32_t dest;
  86. uint32_t j;
  87. uint8_t b;
  88. if (size <= 4)
  89. return 0;
  90. size -= 4;
  91. for (i = 0; i < size; ++i) {
  92. if ((buf[i] & 0xFE) != 0xE8)
  93. continue;
  94. prev_pos = i - prev_pos;
  95. if (prev_pos > 3) {
  96. prev_mask = 0;
  97. } else {
  98. prev_mask = (prev_mask << (prev_pos - 1)) & 7;
  99. if (prev_mask != 0) {
  100. b = buf[i + 4 - mask_to_bit_num[prev_mask]];
  101. if (!mask_to_allowed_status[prev_mask]
  102. || bcj_x86_test_msbyte(b)) {
  103. prev_pos = i;
  104. prev_mask = (prev_mask << 1) | 1;
  105. continue;
  106. }
  107. }
  108. }
  109. prev_pos = i;
  110. if (bcj_x86_test_msbyte(buf[i + 4])) {
  111. src = get_unaligned_le32(buf + i + 1);
  112. while (true) {
  113. dest = src - (s->pos + (uint32_t)i + 5);
  114. if (prev_mask == 0)
  115. break;
  116. j = mask_to_bit_num[prev_mask] * 8;
  117. b = (uint8_t)(dest >> (24 - j));
  118. if (!bcj_x86_test_msbyte(b))
  119. break;
  120. src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
  121. }
  122. dest &= 0x01FFFFFF;
  123. dest |= (uint32_t)0 - (dest & 0x01000000);
  124. put_unaligned_le32(dest, buf + i + 1);
  125. i += 4;
  126. } else {
  127. prev_mask = (prev_mask << 1) | 1;
  128. }
  129. }
  130. prev_pos = i - prev_pos;
  131. s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
  132. return i;
  133. }
  134. #endif
  135. #ifdef XZ_DEC_POWERPC
  136. static noinline_for_stack size_t XZ_FUNC bcj_powerpc(
  137. struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  138. {
  139. size_t i;
  140. uint32_t instr;
  141. for (i = 0; i + 4 <= size; i += 4) {
  142. instr = get_unaligned_be32(buf + i);
  143. if ((instr & 0xFC000003) == 0x48000001) {
  144. instr &= 0x03FFFFFC;
  145. instr -= s->pos + (uint32_t)i;
  146. instr &= 0x03FFFFFC;
  147. instr |= 0x48000001;
  148. put_unaligned_be32(instr, buf + i);
  149. }
  150. }
  151. return i;
  152. }
  153. #endif
  154. #ifdef XZ_DEC_IA64
  155. static noinline_for_stack size_t XZ_FUNC bcj_ia64(
  156. struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  157. {
  158. static const uint8_t branch_table[32] = {
  159. 0, 0, 0, 0, 0, 0, 0, 0,
  160. 0, 0, 0, 0, 0, 0, 0, 0,
  161. 4, 4, 6, 6, 0, 0, 7, 7,
  162. 4, 4, 0, 0, 4, 4, 0, 0
  163. };
  164. /*
  165. * The local variables take a little bit stack space, but it's less
  166. * than what LZMA2 decoder takes, so it doesn't make sense to reduce
  167. * stack usage here without doing that for the LZMA2 decoder too.
  168. */
  169. /* Loop counters */
  170. size_t i;
  171. size_t j;
  172. /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
  173. uint32_t slot;
  174. /* Bitwise offset of the instruction indicated by slot */
  175. uint32_t bit_pos;
  176. /* bit_pos split into byte and bit parts */
  177. uint32_t byte_pos;
  178. uint32_t bit_res;
  179. /* Address part of an instruction */
  180. uint32_t addr;
  181. /* Mask used to detect which instructions to convert */
  182. uint32_t mask;
  183. /* 41-bit instruction stored somewhere in the lowest 48 bits */
  184. uint64_t instr;
  185. /* Instruction normalized with bit_res for easier manipulation */
  186. uint64_t norm;
  187. for (i = 0; i + 16 <= size; i += 16) {
  188. mask = branch_table[buf[i] & 0x1F];
  189. for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
  190. if (((mask >> slot) & 1) == 0)
  191. continue;
  192. byte_pos = bit_pos >> 3;
  193. bit_res = bit_pos & 7;
  194. instr = 0;
  195. for (j = 0; j < 6; ++j)
  196. instr |= (uint64_t)(buf[i + j + byte_pos])
  197. << (8 * j);
  198. norm = instr >> bit_res;
  199. if (((norm >> 37) & 0x0F) == 0x05
  200. && ((norm >> 9) & 0x07) == 0) {
  201. addr = (norm >> 13) & 0x0FFFFF;
  202. addr |= ((uint32_t)(norm >> 36) & 1) << 20;
  203. addr <<= 4;
  204. addr -= s->pos + (uint32_t)i;
  205. addr >>= 4;
  206. norm &= ~((uint64_t)0x8FFFFF << 13);
  207. norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
  208. norm |= (uint64_t)(addr & 0x100000)
  209. << (36 - 20);
  210. instr &= (1 << bit_res) - 1;
  211. instr |= norm << bit_res;
  212. for (j = 0; j < 6; j++)
  213. buf[i + j + byte_pos]
  214. = (uint8_t)(instr >> (8 * j));
  215. }
  216. }
  217. }
  218. return i;
  219. }
  220. #endif
  221. #ifdef XZ_DEC_ARM
  222. static noinline_for_stack size_t XZ_FUNC bcj_arm(
  223. struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  224. {
  225. size_t i;
  226. uint32_t addr;
  227. for (i = 0; i + 4 <= size; i += 4) {
  228. if (buf[i + 3] == 0xEB) {
  229. addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
  230. | ((uint32_t)buf[i + 2] << 16);
  231. addr <<= 2;
  232. addr -= s->pos + (uint32_t)i + 8;
  233. addr >>= 2;
  234. buf[i] = (uint8_t)addr;
  235. buf[i + 1] = (uint8_t)(addr >> 8);
  236. buf[i + 2] = (uint8_t)(addr >> 16);
  237. }
  238. }
  239. return i;
  240. }
  241. #endif
  242. #ifdef XZ_DEC_ARMTHUMB
  243. static noinline_for_stack size_t XZ_FUNC bcj_armthumb(
  244. struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  245. {
  246. size_t i;
  247. uint32_t addr;
  248. for (i = 0; i + 4 <= size; i += 2) {
  249. if ((buf[i + 1] & 0xF8) == 0xF0
  250. && (buf[i + 3] & 0xF8) == 0xF8) {
  251. addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
  252. | ((uint32_t)buf[i] << 11)
  253. | (((uint32_t)buf[i + 3] & 0x07) << 8)
  254. | (uint32_t)buf[i + 2];
  255. addr <<= 1;
  256. addr -= s->pos + (uint32_t)i + 4;
  257. addr >>= 1;
  258. buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
  259. buf[i] = (uint8_t)(addr >> 11);
  260. buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
  261. buf[i + 2] = (uint8_t)addr;
  262. i += 2;
  263. }
  264. }
  265. return i;
  266. }
  267. #endif
  268. #ifdef XZ_DEC_SPARC
  269. static noinline_for_stack size_t XZ_FUNC bcj_sparc(
  270. struct xz_dec_bcj *s, uint8_t *buf, size_t size)
  271. {
  272. size_t i;
  273. uint32_t instr;
  274. for (i = 0; i + 4 <= size; i += 4) {
  275. instr = get_unaligned_be32(buf + i);
  276. if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
  277. instr <<= 2;
  278. instr -= s->pos + (uint32_t)i;
  279. instr >>= 2;
  280. instr = ((uint32_t)0x40000000 - (instr & 0x400000))
  281. | 0x40000000 | (instr & 0x3FFFFF);
  282. put_unaligned_be32(instr, buf + i);
  283. }
  284. }
  285. return i;
  286. }
  287. #endif
  288. /*
  289. * Apply the selected BCJ filter. Update *pos and s->pos to match the amount
  290. * of data that got filtered.
  291. *
  292. * NOTE: This is implemented as a switch statement to avoid using function
  293. * pointers, which could be problematic in the kernel boot code, which must
  294. * avoid pointers to static data (at least on x86).
  295. */
  296. static void XZ_FUNC bcj_apply(struct xz_dec_bcj *s,
  297. uint8_t *buf, size_t *pos, size_t size)
  298. {
  299. size_t filtered;
  300. buf += *pos;
  301. size -= *pos;
  302. switch (s->type) {
  303. #ifdef XZ_DEC_X86
  304. case BCJ_X86:
  305. filtered = bcj_x86(s, buf, size);
  306. break;
  307. #endif
  308. #ifdef XZ_DEC_POWERPC
  309. case BCJ_POWERPC:
  310. filtered = bcj_powerpc(s, buf, size);
  311. break;
  312. #endif
  313. #ifdef XZ_DEC_IA64
  314. case BCJ_IA64:
  315. filtered = bcj_ia64(s, buf, size);
  316. break;
  317. #endif
  318. #ifdef XZ_DEC_ARM
  319. case BCJ_ARM:
  320. filtered = bcj_arm(s, buf, size);
  321. break;
  322. #endif
  323. #ifdef XZ_DEC_ARMTHUMB
  324. case BCJ_ARMTHUMB:
  325. filtered = bcj_armthumb(s, buf, size);
  326. break;
  327. #endif
  328. #ifdef XZ_DEC_SPARC
  329. case BCJ_SPARC:
  330. filtered = bcj_sparc(s, buf, size);
  331. break;
  332. #endif
  333. default:
  334. /* Never reached but silence compiler warnings. */
  335. filtered = 0;
  336. break;
  337. }
  338. *pos += filtered;
  339. s->pos += filtered;
  340. }
  341. /*
  342. * Flush pending filtered data from temp to the output buffer.
  343. * Move the remaining mixture of possibly filtered and unfiltered
  344. * data to the beginning of temp.
  345. */
  346. static void XZ_FUNC bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
  347. {
  348. size_t copy_size;
  349. copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos);
  350. memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
  351. b->out_pos += copy_size;
  352. s->temp.filtered -= copy_size;
  353. s->temp.size -= copy_size;
  354. memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
  355. }
  356. /*
  357. * The BCJ filter functions are primitive in sense that they process the
  358. * data in chunks of 1-16 bytes. To hide this issue, this function does
  359. * some buffering.
  360. */
  361. XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_bcj_run(struct xz_dec_bcj *s,
  362. struct xz_dec_lzma2 *lzma2, struct xz_buf *b)
  363. {
  364. size_t out_start;
  365. /*
  366. * Flush pending already filtered data to the output buffer. Return
  367. * immediatelly if we couldn't flush everything, or if the next
  368. * filter in the chain had already returned XZ_STREAM_END.
  369. */
  370. if (s->temp.filtered > 0) {
  371. bcj_flush(s, b);
  372. if (s->temp.filtered > 0)
  373. return XZ_OK;
  374. if (s->ret == XZ_STREAM_END)
  375. return XZ_STREAM_END;
  376. }
  377. /*
  378. * If we have more output space than what is currently pending in
  379. * temp, copy the unfiltered data from temp to the output buffer
  380. * and try to fill the output buffer by decoding more data from the
  381. * next filter in the chain. Apply the BCJ filter on the new data
  382. * in the output buffer. If everything cannot be filtered, copy it
  383. * to temp and rewind the output buffer position accordingly.
  384. *
  385. * This needs to be always run when temp.size == 0 to handle a special
  386. * case where the output buffer is full and the next filter has no
  387. * more output coming but hasn't returned XZ_STREAM_END yet.
  388. */
  389. if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
  390. out_start = b->out_pos;
  391. memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
  392. b->out_pos += s->temp.size;
  393. s->ret = xz_dec_lzma2_run(lzma2, b);
  394. if (s->ret != XZ_STREAM_END
  395. && (s->ret != XZ_OK || s->single_call))
  396. return s->ret;
  397. bcj_apply(s, b->out, &out_start, b->out_pos);
  398. /*
  399. * As an exception, if the next filter returned XZ_STREAM_END,
  400. * we can do that too, since the last few bytes that remain
  401. * unfiltered are meant to remain unfiltered.
  402. */
  403. if (s->ret == XZ_STREAM_END)
  404. return XZ_STREAM_END;
  405. s->temp.size = b->out_pos - out_start;
  406. b->out_pos -= s->temp.size;
  407. memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
  408. /*
  409. * If there wasn't enough input to the next filter to fill
  410. * the output buffer with unfiltered data, there's no point
  411. * to try decoding more data to temp.
  412. */
  413. if (b->out_pos + s->temp.size < b->out_size)
  414. return XZ_OK;
  415. }
  416. /*
  417. * We have unfiltered data in temp. If the output buffer isn't full
  418. * yet, try to fill the temp buffer by decoding more data from the
  419. * next filter. Apply the BCJ filter on temp. Then we hopefully can
  420. * fill the actual output buffer by copying filtered data from temp.
  421. * A mix of filtered and unfiltered data may be left in temp; it will
  422. * be taken care on the next call to this function.
  423. */
  424. if (b->out_pos < b->out_size) {
  425. /* Make b->out{,_pos,_size} temporarily point to s->temp. */
  426. s->out = b->out;
  427. s->out_pos = b->out_pos;
  428. s->out_size = b->out_size;
  429. b->out = s->temp.buf;
  430. b->out_pos = s->temp.size;
  431. b->out_size = sizeof(s->temp.buf);
  432. s->ret = xz_dec_lzma2_run(lzma2, b);
  433. s->temp.size = b->out_pos;
  434. b->out = s->out;
  435. b->out_pos = s->out_pos;
  436. b->out_size = s->out_size;
  437. if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
  438. return s->ret;
  439. bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
  440. /*
  441. * If the next filter returned XZ_STREAM_END, we mark that
  442. * everything is filtered, since the last unfiltered bytes
  443. * of the stream are meant to be left as is.
  444. */
  445. if (s->ret == XZ_STREAM_END)
  446. s->temp.filtered = s->temp.size;
  447. bcj_flush(s, b);
  448. if (s->temp.filtered > 0)
  449. return XZ_OK;
  450. }
  451. return s->ret;
  452. }
  453. XZ_EXTERN struct xz_dec_bcj * XZ_FUNC xz_dec_bcj_create(bool single_call)
  454. {
  455. struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
  456. if (s != NULL)
  457. s->single_call = single_call;
  458. return s;
  459. }
  460. XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_bcj_reset(
  461. struct xz_dec_bcj *s, uint8_t id)
  462. {
  463. switch (id) {
  464. #ifdef XZ_DEC_X86
  465. case BCJ_X86:
  466. #endif
  467. #ifdef XZ_DEC_POWERPC
  468. case BCJ_POWERPC:
  469. #endif
  470. #ifdef XZ_DEC_IA64
  471. case BCJ_IA64:
  472. #endif
  473. #ifdef XZ_DEC_ARM
  474. case BCJ_ARM:
  475. #endif
  476. #ifdef XZ_DEC_ARMTHUMB
  477. case BCJ_ARMTHUMB:
  478. #endif
  479. #ifdef XZ_DEC_SPARC
  480. case BCJ_SPARC:
  481. #endif
  482. break;
  483. default:
  484. /* Unsupported Filter ID */
  485. return XZ_OPTIONS_ERROR;
  486. }
  487. s->type = id;
  488. s->ret = XZ_OK;
  489. s->pos = 0;
  490. s->x86_prev_mask = 0;
  491. s->temp.filtered = 0;
  492. s->temp.size = 0;
  493. return XZ_OK;
  494. }
  495. #endif