addmulmod.s 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. # qhasm: int32 input_0
  2. # qhasm: int32 input_1
  3. # qhasm: int32 input_2
  4. # qhasm: int32 input_3
  5. # qhasm: stack32 input_4
  6. # qhasm: stack32 input_5
  7. # qhasm: stack32 input_6
  8. # qhasm: stack32 input_7
  9. # qhasm: int32 caller_r4
  10. # qhasm: int32 caller_r5
  11. # qhasm: int32 caller_r6
  12. # qhasm: int32 caller_r7
  13. # qhasm: int32 caller_r8
  14. # qhasm: int32 caller_r9
  15. # qhasm: int32 caller_r10
  16. # qhasm: int32 caller_r11
  17. # qhasm: int32 caller_r12
  18. # qhasm: int32 caller_r14
  19. # qhasm: reg128 caller_q4
  20. # qhasm: reg128 caller_q5
  21. # qhasm: reg128 caller_q6
  22. # qhasm: reg128 caller_q7
  23. # qhasm: startcode
  24. .fpu neon
  25. .text
  26. # qhasm: reg128 r0
  27. # qhasm: reg128 r1
  28. # qhasm: reg128 r2
  29. # qhasm: reg128 r3
  30. # qhasm: reg128 r4
  31. # qhasm: reg128 x01
  32. # qhasm: reg128 x23
  33. # qhasm: reg128 x4
  34. # qhasm: reg128 y01
  35. # qhasm: reg128 y23
  36. # qhasm: reg128 y4
  37. # qhasm: reg128 _5y01
  38. # qhasm: reg128 _5y23
  39. # qhasm: reg128 _5y4
  40. # qhasm: reg128 c01
  41. # qhasm: reg128 c23
  42. # qhasm: reg128 c4
  43. # qhasm: reg128 t0
  44. # qhasm: reg128 t1
  45. # qhasm: reg128 t2
  46. # qhasm: reg128 t3
  47. # qhasm: reg128 t4
  48. # qhasm: reg128 mask
  49. # qhasm: enter crypto_onetimeauth_poly1305_neon2_addmulmod
  50. .align 2
  51. .global _crypto_onetimeauth_poly1305_neon2_addmulmod
  52. .global crypto_onetimeauth_poly1305_neon2_addmulmod
  53. .type _crypto_onetimeauth_poly1305_neon2_addmulmod STT_FUNC
  54. .type crypto_onetimeauth_poly1305_neon2_addmulmod STT_FUNC
  55. _crypto_onetimeauth_poly1305_neon2_addmulmod:
  56. crypto_onetimeauth_poly1305_neon2_addmulmod:
  57. sub sp,sp,#0
  58. # qhasm: 2x mask = 0xffffffff
  59. # asm 1: vmov.i64 >mask=reg128#1,#0xffffffff
  60. # asm 2: vmov.i64 >mask=q0,#0xffffffff
  61. vmov.i64 q0,#0xffffffff
  62. # qhasm: y01 aligned= mem128[input_2];input_2+=16
  63. # asm 1: vld1.8 {>y01=reg128#2%bot->y01=reg128#2%top},[<input_2=int32#3,: 128]!
  64. # asm 2: vld1.8 {>y01=d2->y01=d3},[<input_2=r2,: 128]!
  65. vld1.8 {d2-d3},[r2,: 128]!
  66. # qhasm: 4x _5y01 = y01 << 2
  67. # asm 1: vshl.i32 >_5y01=reg128#3,<y01=reg128#2,#2
  68. # asm 2: vshl.i32 >_5y01=q2,<y01=q1,#2
  69. vshl.i32 q2,q1,#2
  70. # qhasm: y23 aligned= mem128[input_2];input_2+=16
  71. # asm 1: vld1.8 {>y23=reg128#4%bot->y23=reg128#4%top},[<input_2=int32#3,: 128]!
  72. # asm 2: vld1.8 {>y23=d6->y23=d7},[<input_2=r2,: 128]!
  73. vld1.8 {d6-d7},[r2,: 128]!
  74. # qhasm: 4x _5y23 = y23 << 2
  75. # asm 1: vshl.i32 >_5y23=reg128#9,<y23=reg128#4,#2
  76. # asm 2: vshl.i32 >_5y23=q8,<y23=q3,#2
  77. vshl.i32 q8,q3,#2
  78. # qhasm: y4 aligned= mem64[input_2]y4[1]
  79. # asm 1: vld1.8 {<y4=reg128#10%bot},[<input_2=int32#3,: 64]
  80. # asm 2: vld1.8 {<y4=d18},[<input_2=r2,: 64]
  81. vld1.8 {d18},[r2,: 64]
  82. # qhasm: 4x _5y4 = y4 << 2
  83. # asm 1: vshl.i32 >_5y4=reg128#11,<y4=reg128#10,#2
  84. # asm 2: vshl.i32 >_5y4=q10,<y4=q9,#2
  85. vshl.i32 q10,q9,#2
  86. # qhasm: x01 aligned= mem128[input_1];input_1+=16
  87. # asm 1: vld1.8 {>x01=reg128#12%bot->x01=reg128#12%top},[<input_1=int32#2,: 128]!
  88. # asm 2: vld1.8 {>x01=d22->x01=d23},[<input_1=r1,: 128]!
  89. vld1.8 {d22-d23},[r1,: 128]!
  90. # qhasm: 4x _5y01 += y01
  91. # asm 1: vadd.i32 >_5y01=reg128#3,<_5y01=reg128#3,<y01=reg128#2
  92. # asm 2: vadd.i32 >_5y01=q2,<_5y01=q2,<y01=q1
  93. vadd.i32 q2,q2,q1
  94. # qhasm: x23 aligned= mem128[input_1];input_1+=16
  95. # asm 1: vld1.8 {>x23=reg128#13%bot->x23=reg128#13%top},[<input_1=int32#2,: 128]!
  96. # asm 2: vld1.8 {>x23=d24->x23=d25},[<input_1=r1,: 128]!
  97. vld1.8 {d24-d25},[r1,: 128]!
  98. # qhasm: 4x _5y23 += y23
  99. # asm 1: vadd.i32 >_5y23=reg128#9,<_5y23=reg128#9,<y23=reg128#4
  100. # asm 2: vadd.i32 >_5y23=q8,<_5y23=q8,<y23=q3
  101. vadd.i32 q8,q8,q3
  102. # qhasm: 4x _5y4 += y4
  103. # asm 1: vadd.i32 >_5y4=reg128#11,<_5y4=reg128#11,<y4=reg128#10
  104. # asm 2: vadd.i32 >_5y4=q10,<_5y4=q10,<y4=q9
  105. vadd.i32 q10,q10,q9
  106. # qhasm: c01 aligned= mem128[input_3];input_3+=16
  107. # asm 1: vld1.8 {>c01=reg128#14%bot->c01=reg128#14%top},[<input_3=int32#4,: 128]!
  108. # asm 2: vld1.8 {>c01=d26->c01=d27},[<input_3=r3,: 128]!
  109. vld1.8 {d26-d27},[r3,: 128]!
  110. # qhasm: 4x x01 += c01
  111. # asm 1: vadd.i32 >x01=reg128#12,<x01=reg128#12,<c01=reg128#14
  112. # asm 2: vadd.i32 >x01=q11,<x01=q11,<c01=q13
  113. vadd.i32 q11,q11,q13
  114. # qhasm: c23 aligned= mem128[input_3];input_3+=16
  115. # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_3=int32#4,: 128]!
  116. # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_3=r3,: 128]!
  117. vld1.8 {d26-d27},[r3,: 128]!
  118. # qhasm: 4x x23 += c23
  119. # asm 1: vadd.i32 >x23=reg128#13,<x23=reg128#13,<c23=reg128#14
  120. # asm 2: vadd.i32 >x23=q12,<x23=q12,<c23=q13
  121. vadd.i32 q12,q12,q13
  122. # qhasm: x4 aligned= mem64[input_1]x4[1]
  123. # asm 1: vld1.8 {<x4=reg128#14%bot},[<input_1=int32#2,: 64]
  124. # asm 2: vld1.8 {<x4=d26},[<input_1=r1,: 64]
  125. vld1.8 {d26},[r1,: 64]
  126. # qhasm: 2x mask unsigned>>=6
  127. # asm 1: vshr.u64 >mask=reg128#1,<mask=reg128#1,#6
  128. # asm 2: vshr.u64 >mask=q0,<mask=q0,#6
  129. vshr.u64 q0,q0,#6
  130. # qhasm: c4 aligned= mem64[input_3]c4[1]
  131. # asm 1: vld1.8 {<c4=reg128#15%bot},[<input_3=int32#4,: 64]
  132. # asm 2: vld1.8 {<c4=d28},[<input_3=r3,: 64]
  133. vld1.8 {d28},[r3,: 64]
  134. # qhasm: 4x x4 += c4
  135. # asm 1: vadd.i32 >x4=reg128#14,<x4=reg128#14,<c4=reg128#15
  136. # asm 2: vadd.i32 >x4=q13,<x4=q13,<c4=q14
  137. vadd.i32 q13,q13,q14
  138. # qhasm: r0[0,1] = x01[0] unsigned* y01[0]; r0[2,3] = x01[1] unsigned* y01[1]
  139. # asm 1: vmull.u32 >r0=reg128#15,<x01=reg128#12%bot,<y01=reg128#2%bot
  140. # asm 2: vmull.u32 >r0=q14,<x01=d22,<y01=d2
  141. vmull.u32 q14,d22,d2
  142. # qhasm: r0[0,1] += x01[2] unsigned* _5y4[0]; r0[2,3] += x01[3] unsigned* _5y4[1]
  143. # asm 1: vmlal.u32 <r0=reg128#15,<x01=reg128#12%top,<_5y4=reg128#11%bot
  144. # asm 2: vmlal.u32 <r0=q14,<x01=d23,<_5y4=d20
  145. vmlal.u32 q14,d23,d20
  146. # qhasm: r0[0,1] += x23[0] unsigned* _5y23[2]; r0[2,3] += x23[1] unsigned* _5y23[3]
  147. # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%bot,<_5y23=reg128#9%top
  148. # asm 2: vmlal.u32 <r0=q14,<x23=d24,<_5y23=d17
  149. vmlal.u32 q14,d24,d17
  150. # qhasm: r0[0,1] += x23[2] unsigned* _5y23[0]; r0[2,3] += x23[3] unsigned* _5y23[1]
  151. # asm 1: vmlal.u32 <r0=reg128#15,<x23=reg128#13%top,<_5y23=reg128#9%bot
  152. # asm 2: vmlal.u32 <r0=q14,<x23=d25,<_5y23=d16
  153. vmlal.u32 q14,d25,d16
  154. # qhasm: r0[0,1] += x4[0] unsigned* _5y01[2]; r0[2,3] += x4[1] unsigned* _5y01[3]
  155. # asm 1: vmlal.u32 <r0=reg128#15,<x4=reg128#14%bot,<_5y01=reg128#3%top
  156. # asm 2: vmlal.u32 <r0=q14,<x4=d26,<_5y01=d5
  157. vmlal.u32 q14,d26,d5
  158. # qhasm: r1[0,1] = x01[0] unsigned* y01[2]; r1[2,3] = x01[1] unsigned* y01[3]
  159. # asm 1: vmull.u32 >r1=reg128#3,<x01=reg128#12%bot,<y01=reg128#2%top
  160. # asm 2: vmull.u32 >r1=q2,<x01=d22,<y01=d3
  161. vmull.u32 q2,d22,d3
  162. # qhasm: r1[0,1] += x01[2] unsigned* y01[0]; r1[2,3] += x01[3] unsigned* y01[1]
  163. # asm 1: vmlal.u32 <r1=reg128#3,<x01=reg128#12%top,<y01=reg128#2%bot
  164. # asm 2: vmlal.u32 <r1=q2,<x01=d23,<y01=d2
  165. vmlal.u32 q2,d23,d2
  166. # qhasm: r1[0,1] += x23[0] unsigned* _5y4[0]; r1[2,3] += x23[1] unsigned* _5y4[1]
  167. # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%bot,<_5y4=reg128#11%bot
  168. # asm 2: vmlal.u32 <r1=q2,<x23=d24,<_5y4=d20
  169. vmlal.u32 q2,d24,d20
  170. # qhasm: r1[0,1] += x23[2] unsigned* _5y23[2]; r1[2,3] += x23[3] unsigned* _5y23[3]
  171. # asm 1: vmlal.u32 <r1=reg128#3,<x23=reg128#13%top,<_5y23=reg128#9%top
  172. # asm 2: vmlal.u32 <r1=q2,<x23=d25,<_5y23=d17
  173. vmlal.u32 q2,d25,d17
  174. # qhasm: r1[0,1] += x4[0] unsigned* _5y23[0]; r1[2,3] += x4[1] unsigned* _5y23[1]
  175. # asm 1: vmlal.u32 <r1=reg128#3,<x4=reg128#14%bot,<_5y23=reg128#9%bot
  176. # asm 2: vmlal.u32 <r1=q2,<x4=d26,<_5y23=d16
  177. vmlal.u32 q2,d26,d16
  178. # qhasm: r2[0,1] = x01[0] unsigned* y23[0]; r2[2,3] = x01[1] unsigned* y23[1]
  179. # asm 1: vmull.u32 >r2=reg128#16,<x01=reg128#12%bot,<y23=reg128#4%bot
  180. # asm 2: vmull.u32 >r2=q15,<x01=d22,<y23=d6
  181. vmull.u32 q15,d22,d6
  182. # qhasm: r2[0,1] += x01[2] unsigned* y01[2]; r2[2,3] += x01[3] unsigned* y01[3]
  183. # asm 1: vmlal.u32 <r2=reg128#16,<x01=reg128#12%top,<y01=reg128#2%top
  184. # asm 2: vmlal.u32 <r2=q15,<x01=d23,<y01=d3
  185. vmlal.u32 q15,d23,d3
  186. # qhasm: r2[0,1] += x23[0] unsigned* y01[0]; r2[2,3] += x23[1] unsigned* y01[1]
  187. # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%bot,<y01=reg128#2%bot
  188. # asm 2: vmlal.u32 <r2=q15,<x23=d24,<y01=d2
  189. vmlal.u32 q15,d24,d2
  190. # qhasm: r2[0,1] += x23[2] unsigned* _5y4[0]; r2[2,3] += x23[3] unsigned* _5y4[1]
  191. # asm 1: vmlal.u32 <r2=reg128#16,<x23=reg128#13%top,<_5y4=reg128#11%bot
  192. # asm 2: vmlal.u32 <r2=q15,<x23=d25,<_5y4=d20
  193. vmlal.u32 q15,d25,d20
  194. # qhasm: r2[0,1] += x4[0] unsigned* _5y23[2]; r2[2,3] += x4[1] unsigned* _5y23[3]
  195. # asm 1: vmlal.u32 <r2=reg128#16,<x4=reg128#14%bot,<_5y23=reg128#9%top
  196. # asm 2: vmlal.u32 <r2=q15,<x4=d26,<_5y23=d17
  197. vmlal.u32 q15,d26,d17
  198. # qhasm: r3[0,1] = x01[0] unsigned* y23[2]; r3[2,3] = x01[1] unsigned* y23[3]
  199. # asm 1: vmull.u32 >r3=reg128#9,<x01=reg128#12%bot,<y23=reg128#4%top
  200. # asm 2: vmull.u32 >r3=q8,<x01=d22,<y23=d7
  201. vmull.u32 q8,d22,d7
  202. # qhasm: r3[0,1] += x01[2] unsigned* y23[0]; r3[2,3] += x01[3] unsigned* y23[1]
  203. # asm 1: vmlal.u32 <r3=reg128#9,<x01=reg128#12%top,<y23=reg128#4%bot
  204. # asm 2: vmlal.u32 <r3=q8,<x01=d23,<y23=d6
  205. vmlal.u32 q8,d23,d6
  206. # qhasm: r3[0,1] += x23[0] unsigned* y01[2]; r3[2,3] += x23[1] unsigned* y01[3]
  207. # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%bot,<y01=reg128#2%top
  208. # asm 2: vmlal.u32 <r3=q8,<x23=d24,<y01=d3
  209. vmlal.u32 q8,d24,d3
  210. # qhasm: r3[0,1] += x23[2] unsigned* y01[0]; r3[2,3] += x23[3] unsigned* y01[1]
  211. # asm 1: vmlal.u32 <r3=reg128#9,<x23=reg128#13%top,<y01=reg128#2%bot
  212. # asm 2: vmlal.u32 <r3=q8,<x23=d25,<y01=d2
  213. vmlal.u32 q8,d25,d2
  214. # qhasm: r3[0,1] += x4[0] unsigned* _5y4[0]; r3[2,3] += x4[1] unsigned* _5y4[1]
  215. # asm 1: vmlal.u32 <r3=reg128#9,<x4=reg128#14%bot,<_5y4=reg128#11%bot
  216. # asm 2: vmlal.u32 <r3=q8,<x4=d26,<_5y4=d20
  217. vmlal.u32 q8,d26,d20
  218. # qhasm: r4[0,1] = x01[0] unsigned* y4[0]; r4[2,3] = x01[1] unsigned* y4[1]
  219. # asm 1: vmull.u32 >r4=reg128#10,<x01=reg128#12%bot,<y4=reg128#10%bot
  220. # asm 2: vmull.u32 >r4=q9,<x01=d22,<y4=d18
  221. vmull.u32 q9,d22,d18
  222. # qhasm: r4[0,1] += x01[2] unsigned* y23[2]; r4[2,3] += x01[3] unsigned* y23[3]
  223. # asm 1: vmlal.u32 <r4=reg128#10,<x01=reg128#12%top,<y23=reg128#4%top
  224. # asm 2: vmlal.u32 <r4=q9,<x01=d23,<y23=d7
  225. vmlal.u32 q9,d23,d7
  226. # qhasm: r4[0,1] += x23[0] unsigned* y23[0]; r4[2,3] += x23[1] unsigned* y23[1]
  227. # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%bot,<y23=reg128#4%bot
  228. # asm 2: vmlal.u32 <r4=q9,<x23=d24,<y23=d6
  229. vmlal.u32 q9,d24,d6
  230. # qhasm: r4[0,1] += x23[2] unsigned* y01[2]; r4[2,3] += x23[3] unsigned* y01[3]
  231. # asm 1: vmlal.u32 <r4=reg128#10,<x23=reg128#13%top,<y01=reg128#2%top
  232. # asm 2: vmlal.u32 <r4=q9,<x23=d25,<y01=d3
  233. vmlal.u32 q9,d25,d3
  234. # qhasm: r4[0,1] += x4[0] unsigned* y01[0]; r4[2,3] += x4[1] unsigned* y01[1]
  235. # asm 1: vmlal.u32 <r4=reg128#10,<x4=reg128#14%bot,<y01=reg128#2%bot
  236. # asm 2: vmlal.u32 <r4=q9,<x4=d26,<y01=d2
  237. vmlal.u32 q9,d26,d2
  238. # qhasm: 2x t1 = r0 unsigned>> 26
  239. # asm 1: vshr.u64 >t1=reg128#2,<r0=reg128#15,#26
  240. # asm 2: vshr.u64 >t1=q1,<r0=q14,#26
  241. vshr.u64 q1,q14,#26
  242. # qhasm: r0 &= mask
  243. # asm 1: vand >r0=reg128#4,<r0=reg128#15,<mask=reg128#1
  244. # asm 2: vand >r0=q3,<r0=q14,<mask=q0
  245. vand q3,q14,q0
  246. # qhasm: 2x r1 += t1
  247. # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#3,<t1=reg128#2
  248. # asm 2: vadd.i64 >r1=q1,<r1=q2,<t1=q1
  249. vadd.i64 q1,q2,q1
  250. # qhasm: 2x t4 = r3 unsigned>> 26
  251. # asm 1: vshr.u64 >t4=reg128#3,<r3=reg128#9,#26
  252. # asm 2: vshr.u64 >t4=q2,<r3=q8,#26
  253. vshr.u64 q2,q8,#26
  254. # qhasm: r3 &= mask
  255. # asm 1: vand >r3=reg128#9,<r3=reg128#9,<mask=reg128#1
  256. # asm 2: vand >r3=q8,<r3=q8,<mask=q0
  257. vand q8,q8,q0
  258. # qhasm: 2x r4 += t4
  259. # asm 1: vadd.i64 >r4=reg128#3,<r4=reg128#10,<t4=reg128#3
  260. # asm 2: vadd.i64 >r4=q2,<r4=q9,<t4=q2
  261. vadd.i64 q2,q9,q2
  262. # qhasm: 2x t2 = r1 unsigned>> 26
  263. # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#2,#26
  264. # asm 2: vshr.u64 >t2=q9,<r1=q1,#26
  265. vshr.u64 q9,q1,#26
  266. # qhasm: r1 &= mask
  267. # asm 1: vand >r1=reg128#2,<r1=reg128#2,<mask=reg128#1
  268. # asm 2: vand >r1=q1,<r1=q1,<mask=q0
  269. vand q1,q1,q0
  270. # qhasm: 2x t0 = r4 unsigned>> 26
  271. # asm 1: vshr.u64 >t0=reg128#11,<r4=reg128#3,#26
  272. # asm 2: vshr.u64 >t0=q10,<r4=q2,#26
  273. vshr.u64 q10,q2,#26
  274. # qhasm: 2x r2 += t2
  275. # asm 1: vadd.i64 >r2=reg128#10,<r2=reg128#16,<t2=reg128#10
  276. # asm 2: vadd.i64 >r2=q9,<r2=q15,<t2=q9
  277. vadd.i64 q9,q15,q9
  278. # qhasm: r4 &= mask
  279. # asm 1: vand >r4=reg128#3,<r4=reg128#3,<mask=reg128#1
  280. # asm 2: vand >r4=q2,<r4=q2,<mask=q0
  281. vand q2,q2,q0
  282. # qhasm: 2x r0 += t0
  283. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
  284. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
  285. vadd.i64 q3,q3,q10
  286. # qhasm: 2x t0 <<= 2
  287. # asm 1: vshl.i64 >t0=reg128#11,<t0=reg128#11,#2
  288. # asm 2: vshl.i64 >t0=q10,<t0=q10,#2
  289. vshl.i64 q10,q10,#2
  290. # qhasm: 2x t3 = r2 unsigned>> 26
  291. # asm 1: vshr.u64 >t3=reg128#12,<r2=reg128#10,#26
  292. # asm 2: vshr.u64 >t3=q11,<r2=q9,#26
  293. vshr.u64 q11,q9,#26
  294. # qhasm: 2x r0 += t0
  295. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#11
  296. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q10
  297. vadd.i64 q3,q3,q10
  298. # qhasm: x23 = r2 & mask
  299. # asm 1: vand >x23=reg128#10,<r2=reg128#10,<mask=reg128#1
  300. # asm 2: vand >x23=q9,<r2=q9,<mask=q0
  301. vand q9,q9,q0
  302. # qhasm: 2x r3 += t3
  303. # asm 1: vadd.i64 >r3=reg128#9,<r3=reg128#9,<t3=reg128#12
  304. # asm 2: vadd.i64 >r3=q8,<r3=q8,<t3=q11
  305. vadd.i64 q8,q8,q11
  306. # qhasm: 2x t1 = r0 unsigned>> 26
  307. # asm 1: vshr.u64 >t1=reg128#11,<r0=reg128#4,#26
  308. # asm 2: vshr.u64 >t1=q10,<r0=q3,#26
  309. vshr.u64 q10,q3,#26
  310. # qhasm: x23 = x23[0,2,1,3]
  311. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  312. # asm 2: vtrn.32 <x23=d18,<x23=d19
  313. vtrn.32 d18,d19
  314. # qhasm: x01 = r0 & mask
  315. # asm 1: vand >x01=reg128#4,<r0=reg128#4,<mask=reg128#1
  316. # asm 2: vand >x01=q3,<r0=q3,<mask=q0
  317. vand q3,q3,q0
  318. # qhasm: 2x r1 += t1
  319. # asm 1: vadd.i64 >r1=reg128#2,<r1=reg128#2,<t1=reg128#11
  320. # asm 2: vadd.i64 >r1=q1,<r1=q1,<t1=q10
  321. vadd.i64 q1,q1,q10
  322. # qhasm: 2x t4 = r3 unsigned>> 26
  323. # asm 1: vshr.u64 >t4=reg128#11,<r3=reg128#9,#26
  324. # asm 2: vshr.u64 >t4=q10,<r3=q8,#26
  325. vshr.u64 q10,q8,#26
  326. # qhasm: x01 = x01[0,2,1,3]
  327. # asm 1: vtrn.32 <x01=reg128#4%bot,<x01=reg128#4%top
  328. # asm 2: vtrn.32 <x01=d6,<x01=d7
  329. vtrn.32 d6,d7
  330. # qhasm: r3 &= mask
  331. # asm 1: vand >r3=reg128#1,<r3=reg128#9,<mask=reg128#1
  332. # asm 2: vand >r3=q0,<r3=q8,<mask=q0
  333. vand q0,q8,q0
  334. # qhasm: r1 = r1[0,2,1,3]
  335. # asm 1: vtrn.32 <r1=reg128#2%bot,<r1=reg128#2%top
  336. # asm 2: vtrn.32 <r1=d2,<r1=d3
  337. vtrn.32 d2,d3
  338. # qhasm: 2x x4 = r4 + t4
  339. # asm 1: vadd.i64 >x4=reg128#3,<r4=reg128#3,<t4=reg128#11
  340. # asm 2: vadd.i64 >x4=q2,<r4=q2,<t4=q10
  341. vadd.i64 q2,q2,q10
  342. # qhasm: r3 = r3[0,2,1,3]
  343. # asm 1: vtrn.32 <r3=reg128#1%bot,<r3=reg128#1%top
  344. # asm 2: vtrn.32 <r3=d0,<r3=d1
  345. vtrn.32 d0,d1
  346. # qhasm: x01 = x01[0,1] r1[0,1]
  347. # asm 1: vext.32 <x01=reg128#4%top,<r1=reg128#2%bot,<r1=reg128#2%bot,#0
  348. # asm 2: vext.32 <x01=d7,<r1=d2,<r1=d2,#0
  349. vext.32 d7,d2,d2,#0
  350. # qhasm: x23 = x23[0,1] r3[0,1]
  351. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#1%bot,<r3=reg128#1%bot,#0
  352. # asm 2: vext.32 <x23=d19,<r3=d0,<r3=d0,#0
  353. vext.32 d19,d0,d0,#0
  354. # qhasm: x4 = x4[0,2,1,3]
  355. # asm 1: vtrn.32 <x4=reg128#3%bot,<x4=reg128#3%top
  356. # asm 2: vtrn.32 <x4=d4,<x4=d5
  357. vtrn.32 d4,d5
  358. # qhasm: mem128[input_0] aligned= x01;input_0+=16
  359. # asm 1: vst1.8 {<x01=reg128#4%bot-<x01=reg128#4%top},[<input_0=int32#1,: 128]!
  360. # asm 2: vst1.8 {<x01=d6-<x01=d7},[<input_0=r0,: 128]!
  361. vst1.8 {d6-d7},[r0,: 128]!
  362. # qhasm: mem128[input_0] aligned= x23;input_0+=16
  363. # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1,: 128]!
  364. # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0,: 128]!
  365. vst1.8 {d18-d19},[r0,: 128]!
  366. # qhasm: mem64[input_0] aligned= x4[0]
  367. # asm 1: vst1.8 <x4=reg128#3%bot,[<input_0=int32#1,: 64]
  368. # asm 2: vst1.8 <x4=d4,[<input_0=r0,: 64]
  369. vst1.8 d4,[r0,: 64]
  370. # qhasm: return
  371. add sp,sp,#0
  372. bx lr