blocks.s 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519
  1. # qhasm: int32 input_0
  2. # qhasm: int32 input_1
  3. # qhasm: int32 input_2
  4. # qhasm: int32 input_3
  5. # qhasm: stack32 input_4
  6. # qhasm: stack32 input_5
  7. # qhasm: stack32 input_6
  8. # qhasm: stack32 input_7
  9. # qhasm: int32 caller_r4
  10. # qhasm: int32 caller_r5
  11. # qhasm: int32 caller_r6
  12. # qhasm: int32 caller_r7
  13. # qhasm: int32 caller_r8
  14. # qhasm: int32 caller_r9
  15. # qhasm: int32 caller_r10
  16. # qhasm: int32 caller_r11
  17. # qhasm: int32 caller_r12
  18. # qhasm: int32 caller_r14
  19. # qhasm: reg128 caller_q4
  20. # qhasm: reg128 caller_q5
  21. # qhasm: reg128 caller_q6
  22. # qhasm: reg128 caller_q7
  23. # qhasm: startcode
  24. .fpu neon
  25. .text
  26. # qhasm: reg128 r0
  27. # qhasm: reg128 r1
  28. # qhasm: reg128 r2
  29. # qhasm: reg128 r3
  30. # qhasm: reg128 r4
  31. # qhasm: reg128 x01
  32. # qhasm: reg128 x23
  33. # qhasm: reg128 x4
  34. # qhasm: reg128 y0
  35. # qhasm: reg128 y12
  36. # qhasm: reg128 y34
  37. # qhasm: reg128 5y12
  38. # qhasm: reg128 5y34
  39. # qhasm: stack128 y0_stack
  40. # qhasm: stack128 y12_stack
  41. # qhasm: stack128 y34_stack
  42. # qhasm: stack128 5y12_stack
  43. # qhasm: stack128 5y34_stack
  44. # qhasm: reg128 z0
  45. # qhasm: reg128 z12
  46. # qhasm: reg128 z34
  47. # qhasm: reg128 5z12
  48. # qhasm: reg128 5z34
  49. # qhasm: stack128 z0_stack
  50. # qhasm: stack128 z12_stack
  51. # qhasm: stack128 z34_stack
  52. # qhasm: stack128 5z12_stack
  53. # qhasm: stack128 5z34_stack
  54. # qhasm: stack128 two24
  55. # qhasm: int32 ptr
  56. # qhasm: reg128 c01
  57. # qhasm: reg128 c23
  58. # qhasm: reg128 d01
  59. # qhasm: reg128 d23
  60. # qhasm: reg128 t0
  61. # qhasm: reg128 t1
  62. # qhasm: reg128 t2
  63. # qhasm: reg128 t3
  64. # qhasm: reg128 t4
  65. # qhasm: reg128 mask
  66. # qhasm: reg128 u0
  67. # qhasm: reg128 u1
  68. # qhasm: reg128 u2
  69. # qhasm: reg128 u3
  70. # qhasm: reg128 u4
  71. # qhasm: reg128 v01
  72. # qhasm: reg128 mid
  73. # qhasm: reg128 v23
  74. # qhasm: reg128 v4
  75. # qhasm: int32 len
  76. # qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
  77. .align 4
  78. .global _crypto_onetimeauth_poly1305_neon2_blocks
  79. .global crypto_onetimeauth_poly1305_neon2_blocks
  80. .type _crypto_onetimeauth_poly1305_neon2_blocks STT_FUNC
  81. .type crypto_onetimeauth_poly1305_neon2_blocks STT_FUNC
  82. _crypto_onetimeauth_poly1305_neon2_blocks:
  83. crypto_onetimeauth_poly1305_neon2_blocks:
  84. vpush {q4,q5,q6,q7}
  85. mov r12,sp
  86. sub sp,sp,#192
  87. and sp,sp,#0xffffffe0
  88. # qhasm: len = input_3
  89. # asm 1: mov >len=int32#4,<input_3=int32#4
  90. # asm 2: mov >len=r3,<input_3=r3
  91. mov r3,r3
  92. # qhasm: new y0
  93. # qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8
  94. # asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
  95. # asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
  96. vld1.8 {d0},[r1]!
  97. # qhasm: y12 = mem128[input_1]; input_1 += 16
  98. # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
  99. # asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
  100. vld1.8 {d2-d3},[r1]!
  101. # qhasm: y34 = mem128[input_1]; input_1 += 16
  102. # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
  103. # asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
  104. vld1.8 {d4-d5},[r1]!
  105. # qhasm: input_1 += 8
  106. # asm 1: add >input_1=int32#2,<input_1=int32#2,#8
  107. # asm 2: add >input_1=r1,<input_1=r1,#8
  108. add r1,r1,#8
  109. # qhasm: new z0
  110. # qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8
  111. # asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
  112. # asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
  113. vld1.8 {d6},[r1]!
  114. # qhasm: z12 = mem128[input_1]; input_1 += 16
  115. # asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
  116. # asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
  117. vld1.8 {d8-d9},[r1]!
  118. # qhasm: z34 = mem128[input_1]; input_1 += 16
  119. # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
  120. # asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
  121. vld1.8 {d10-d11},[r1]!
  122. # qhasm: 2x mask = 0xffffffff
  123. # asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
  124. # asm 2: vmov.i64 >mask=q6,#0xffffffff
  125. vmov.i64 q6,#0xffffffff
  126. # qhasm: 2x u4 = 0xff
  127. # asm 1: vmov.i64 >u4=reg128#8,#0xff
  128. # asm 2: vmov.i64 >u4=q7,#0xff
  129. vmov.i64 q7,#0xff
  130. # qhasm: x01 aligned= mem128[input_0];input_0+=16
  131. # asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
  132. # asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
  133. vld1.8 {d16-d17},[r0,: 128]!
  134. # qhasm: x23 aligned= mem128[input_0];input_0+=16
  135. # asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
  136. # asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
  137. vld1.8 {d18-d19},[r0,: 128]!
  138. # qhasm: x4 aligned= mem64[input_0]x4[1]
  139. # asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
  140. # asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
  141. vld1.8 {d20},[r0,: 64]
  142. # qhasm: input_0 -= 32
  143. # asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
  144. # asm 2: sub >input_0=r0,<input_0=r0,#32
  145. sub r0,r0,#32
  146. # qhasm: 2x mask unsigned>>=6
  147. # asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
  148. # asm 2: vshr.u64 >mask=q6,<mask=q6,#6
  149. vshr.u64 q6,q6,#6
  150. # qhasm: 2x u4 unsigned>>= 7
  151. # asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
  152. # asm 2: vshr.u64 >u4=q7,<u4=q7,#7
  153. vshr.u64 q7,q7,#7
  154. # qhasm: 4x 5y12 = y12 << 2
  155. # asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
  156. # asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
  157. vshl.i32 q11,q1,#2
  158. # qhasm: 4x 5y34 = y34 << 2
  159. # asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
  160. # asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
  161. vshl.i32 q12,q2,#2
  162. # qhasm: 4x 5y12 += y12
  163. # asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
  164. # asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
  165. vadd.i32 q11,q11,q1
  166. # qhasm: 4x 5y34 += y34
  167. # asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
  168. # asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
  169. vadd.i32 q12,q12,q2
  170. # qhasm: 2x u4 <<= 24
  171. # asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
  172. # asm 2: vshl.i64 >u4=q7,<u4=q7,#24
  173. vshl.i64 q7,q7,#24
  174. # qhasm: 4x 5z12 = z12 << 2
  175. # asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
  176. # asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
  177. vshl.i32 q13,q4,#2
  178. # qhasm: 4x 5z34 = z34 << 2
  179. # asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
  180. # asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
  181. vshl.i32 q14,q5,#2
  182. # qhasm: 4x 5z12 += z12
  183. # asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
  184. # asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
  185. vadd.i32 q13,q13,q4
  186. # qhasm: 4x 5z34 += z34
  187. # asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
  188. # asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
  189. vadd.i32 q14,q14,q5
  190. # qhasm: new two24
  191. # qhasm: new y0_stack
  192. # qhasm: new y12_stack
  193. # qhasm: new y34_stack
  194. # qhasm: new 5y12_stack
  195. # qhasm: new 5y34_stack
  196. # qhasm: new z0_stack
  197. # qhasm: new z12_stack
  198. # qhasm: new z34_stack
  199. # qhasm: new 5z12_stack
  200. # qhasm: new 5z34_stack
  201. # qhasm: ptr = &two24
  202. # asm 1: lea >ptr=int32#2,<two24=stack128#1
  203. # asm 2: lea >ptr=r1,<two24=[sp,#0]
  204. add r1,sp,#0
  205. # qhasm: mem128[ptr] aligned= u4
  206. # asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
  207. # asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
  208. vst1.8 {d14-d15},[r1,: 128]
  209. # qhasm: r4 = u4
  210. # asm 1: vmov >r4=reg128#16,<u4=reg128#8
  211. # asm 2: vmov >r4=q15,<u4=q7
  212. vmov q15,q7
  213. # qhasm: r0 = u4
  214. # asm 1: vmov >r0=reg128#8,<u4=reg128#8
  215. # asm 2: vmov >r0=q7,<u4=q7
  216. vmov q7,q7
  217. # qhasm: ptr = &y0_stack
  218. # asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
  219. # asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
  220. add r1,sp,#16
  221. # qhasm: mem128[ptr] aligned= y0
  222. # asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
  223. # asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
  224. vst1.8 {d0-d1},[r1,: 128]
  225. # qhasm: ptr = &y12_stack
  226. # asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
  227. # asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
  228. add r1,sp,#32
  229. # qhasm: mem128[ptr] aligned= y12
  230. # asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
  231. # asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
  232. vst1.8 {d2-d3},[r1,: 128]
  233. # qhasm: ptr = &y34_stack
  234. # asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
  235. # asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
  236. add r1,sp,#48
  237. # qhasm: mem128[ptr] aligned= y34
  238. # asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
  239. # asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
  240. vst1.8 {d4-d5},[r1,: 128]
  241. # qhasm: ptr = &z0_stack
  242. # asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
  243. # asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
  244. add r1,sp,#96
  245. # qhasm: mem128[ptr] aligned= z0
  246. # asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
  247. # asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
  248. vst1.8 {d6-d7},[r1,: 128]
  249. # qhasm: ptr = &z12_stack
  250. # asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
  251. # asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
  252. add r1,sp,#112
  253. # qhasm: mem128[ptr] aligned= z12
  254. # asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
  255. # asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
  256. vst1.8 {d8-d9},[r1,: 128]
  257. # qhasm: ptr = &z34_stack
  258. # asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
  259. # asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
  260. add r1,sp,#128
  261. # qhasm: mem128[ptr] aligned= z34
  262. # asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
  263. # asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
  264. vst1.8 {d10-d11},[r1,: 128]
  265. # qhasm: ptr = &5y12_stack
  266. # asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
  267. # asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
  268. add r1,sp,#64
  269. # qhasm: mem128[ptr] aligned= 5y12
  270. # asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
  271. # asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
  272. vst1.8 {d22-d23},[r1,: 128]
  273. # qhasm: ptr = &5y34_stack
  274. # asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
  275. # asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
  276. add r1,sp,#80
  277. # qhasm: mem128[ptr] aligned= 5y34
  278. # asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
  279. # asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
  280. vst1.8 {d24-d25},[r1,: 128]
  281. # qhasm: ptr = &5z12_stack
  282. # asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
  283. # asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
  284. add r1,sp,#144
  285. # qhasm: mem128[ptr] aligned= 5z12
  286. # asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
  287. # asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
  288. vst1.8 {d26-d27},[r1,: 128]
  289. # qhasm: ptr = &5z34_stack
  290. # asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
  291. # asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
  292. add r1,sp,#160
  293. # qhasm: mem128[ptr] aligned= 5z34
  294. # asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
  295. # asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
  296. vst1.8 {d28-d29},[r1,: 128]
  297. # qhasm: unsigned>? len - 64
  298. # asm 1: cmp <len=int32#4,#64
  299. # asm 2: cmp <len=r3,#64
  300. cmp r3,#64
  301. # qhasm: goto below64bytes if !unsigned>
  302. bls ._below64bytes
  303. # qhasm: input_2 += 32
  304. # asm 1: add >input_2=int32#2,<input_2=int32#3,#32
  305. # asm 2: add >input_2=r1,<input_2=r2,#32
  306. add r1,r2,#32
  307. # qhasm: mainloop2:
  308. ._mainloop2:
  309. # qhasm: c01 = mem128[input_2];input_2+=16
  310. # asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
  311. # asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
  312. vld1.8 {d0-d1},[r1]!
  313. # qhasm: c23 = mem128[input_2];input_2+=16
  314. # asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
  315. # asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
  316. vld1.8 {d2-d3},[r1]!
  317. # qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3]
  318. # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
  319. # asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
  320. vmlal.u32 q15,d16,d11
  321. # qhasm: ptr = &z12_stack
  322. # asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
  323. # asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
  324. add r2,sp,#112
  325. # qhasm: z12 aligned= mem128[ptr]
  326. # asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
  327. # asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
  328. vld1.8 {d4-d5},[r2,: 128]
  329. # qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1]
  330. # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
  331. # asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
  332. vmlal.u32 q15,d17,d10
  333. # qhasm: ptr = &z0_stack
  334. # asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
  335. # asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
  336. add r2,sp,#96
  337. # qhasm: z0 aligned= mem128[ptr]
  338. # asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
  339. # asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
  340. vld1.8 {d6-d7},[r2,: 128]
  341. # qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3]
  342. # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
  343. # asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
  344. vmlal.u32 q15,d18,d5
  345. # qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
  346. # asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
  347. # asm 2: vtrn.32 <c01=d1,<c23=d3
  348. vtrn.32 d1,d3
  349. # qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1]
  350. # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
  351. # asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
  352. vmlal.u32 q15,d19,d4
  353. # qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1]
  354. # asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
  355. # asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
  356. vmlal.u32 q15,d20,d6
  357. # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
  358. # asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
  359. # asm 2: vshll.u32 >r3=q4,<c23=d3,#18
  360. vshll.u32 q4,d3,#18
  361. # qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
  362. # asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
  363. # asm 2: vtrn.32 <c01=d0,<c23=d2
  364. vtrn.32 d0,d2
  365. # qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1]
  366. # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
  367. # asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
  368. vmlal.u32 q4,d16,d10
  369. # qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3]
  370. # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
  371. # asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
  372. vmlal.u32 q4,d17,d5
  373. # qhasm: r0 = r0[1]c01[0]r0[2,3]
  374. # asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
  375. # asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
  376. vext.32 d14,d14,d0,#1
  377. # qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1]
  378. # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
  379. # asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
  380. vmlal.u32 q4,d18,d4
  381. # qhasm: input_2 -= 64
  382. # asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
  383. # asm 2: sub >input_2=r1,<input_2=r1,#64
  384. sub r1,r1,#64
  385. # qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1]
  386. # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
  387. # asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
  388. vmlal.u32 q4,d19,d6
  389. # qhasm: ptr = &5z34_stack
  390. # asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
  391. # asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
  392. add r2,sp,#160
  393. # qhasm: 5z34 aligned= mem128[ptr]
  394. # asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
  395. # asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
  396. vld1.8 {d10-d11},[r2,: 128]
  397. # qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3]
  398. # asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
  399. # asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
  400. vmlal.u32 q4,d20,d11
  401. # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
  402. # asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
  403. # asm 2: vrev64.i32 >r0=q7,<r0=q7
  404. vrev64.i32 q7,q7
  405. # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
  406. # asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
  407. # asm 2: vshll.u32 >r2=q13,<c01=d1,#12
  408. vshll.u32 q13,d1,#12
  409. # qhasm: d01 = mem128[input_2];input_2+=16
  410. # asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
  411. # asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
  412. vld1.8 {d22-d23},[r1]!
  413. # qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3]
  414. # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
  415. # asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
  416. vmlal.u32 q13,d16,d5
  417. # qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1]
  418. # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
  419. # asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
  420. vmlal.u32 q13,d17,d4
  421. # qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1]
  422. # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
  423. # asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
  424. vmlal.u32 q13,d18,d6
  425. # qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3]
  426. # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
  427. # asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
  428. vmlal.u32 q13,d19,d11
  429. # qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1]
  430. # asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
  431. # asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
  432. vmlal.u32 q13,d20,d10
  433. # qhasm: r0 = r0[0,1]c01[1]r0[2]
  434. # asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
  435. # asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
  436. vext.32 d15,d0,d15,#1
  437. # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
  438. # asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
  439. # asm 2: vshll.u32 >r1=q14,<c23=d2,#6
  440. vshll.u32 q14,d2,#6
  441. # qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1]
  442. # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
  443. # asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
  444. vmlal.u32 q14,d16,d4
  445. # qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1]
  446. # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
  447. # asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
  448. vmlal.u32 q14,d17,d6
  449. # qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3]
  450. # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
  451. # asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
  452. vmlal.u32 q14,d18,d11
  453. # qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
  454. # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
  455. # asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
  456. vmlal.u32 q14,d19,d10
  457. # qhasm: ptr = &5z12_stack
  458. # asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
  459. # asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
  460. add r2,sp,#144
  461. # qhasm: 5z12 aligned= mem128[ptr]
  462. # asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
  463. # asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
  464. vld1.8 {d0-d1},[r2,: 128]
  465. # qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3]
  466. # asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
  467. # asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
  468. vmlal.u32 q14,d20,d1
  469. # qhasm: d23 = mem128[input_2];input_2+=16
  470. # asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
  471. # asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
  472. vld1.8 {d2-d3},[r1]!
  473. # qhasm: input_2 += 32
  474. # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
  475. # asm 2: add >input_2=r1,<input_2=r1,#32
  476. add r1,r1,#32
  477. # qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1]
  478. # asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
  479. # asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
  480. vmlal.u32 q7,d20,d0
  481. # qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
  482. # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
  483. # asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
  484. vmlal.u32 q7,d18,d10
  485. # qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1]
  486. # asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
  487. # asm 2: vswp <d23=d2,<d01=d23
  488. vswp d2,d23
  489. # qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
  490. # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
  491. # asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
  492. vmlal.u32 q7,d19,d1
  493. # qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1]
  494. # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
  495. # asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
  496. vmlal.u32 q7,d16,d6
  497. # qhasm: new mid
  498. # qhasm: 2x v4 = d23 unsigned>> 40
  499. # asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
  500. # asm 2: vshr.u64 >v4=q3,<d23=q1,#40
  501. vshr.u64 q3,q1,#40
  502. # qhasm: mid = d01[1]d23[0] mid[2,3]
  503. # asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
  504. # asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
  505. vext.32 d0,d22,d2,#1
  506. # qhasm: new v23
  507. # qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
  508. # asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
  509. # asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
  510. vshrn.u64 d19,q1,#14
  511. # qhasm: mid = mid[0,1] d01[3]d23[2]
  512. # asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
  513. # asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
  514. vext.32 d1,d23,d3,#1
  515. # qhasm: new v01
  516. # qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
  517. # asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
  518. # asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
  519. vshrn.u64 d21,q11,#26
  520. # qhasm: v01 = d01[1]d01[0] v01[2,3]
  521. # asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
  522. # asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
  523. vext.32 d20,d22,d22,#1
  524. # qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3]
  525. # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
  526. # asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
  527. vmlal.u32 q7,d17,d11
  528. # qhasm: v01 = v01[1]d01[2] v01[2,3]
  529. # asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
  530. # asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
  531. vext.32 d20,d20,d23,#1
  532. # qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
  533. # asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
  534. # asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
  535. vshrn.u64 d18,q0,#20
  536. # qhasm: v4 = v4[0]v4[2]v4[1]v4[3]
  537. # asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
  538. # asm 2: vtrn.32 <v4=d6,<v4=d7
  539. vtrn.32 d6,d7
  540. # qhasm: 4x v01 &= 0x03ffffff
  541. # asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
  542. # asm 2: vand.i32 <v01=q10,#0x03ffffff
  543. vand.i32 q10,#0x03ffffff
  544. # qhasm: ptr = &y34_stack
  545. # asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
  546. # asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
  547. add r2,sp,#48
  548. # qhasm: y34 aligned= mem128[ptr]
  549. # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
  550. # asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
  551. vld1.8 {d4-d5},[r2,: 128]
  552. # qhasm: 4x v23 &= 0x03ffffff
  553. # asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
  554. # asm 2: vand.i32 <v23=q9,#0x03ffffff
  555. vand.i32 q9,#0x03ffffff
  556. # qhasm: ptr = &y12_stack
  557. # asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
  558. # asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
  559. add r2,sp,#32
  560. # qhasm: y12 aligned= mem128[ptr]
  561. # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
  562. # asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
  563. vld1.8 {d2-d3},[r2,: 128]
  564. # qhasm: 4x v4 |= 0x01000000
  565. # asm 1: vorr.i32 <v4=reg128#4,#0x01000000
  566. # asm 2: vorr.i32 <v4=q3,#0x01000000
  567. vorr.i32 q3,#0x01000000
  568. # qhasm: ptr = &y0_stack
  569. # asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
  570. # asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
  571. add r2,sp,#16
  572. # qhasm: y0 aligned= mem128[ptr]
  573. # asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
  574. # asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
  575. vld1.8 {d0-d1},[r2,: 128]
  576. # qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3]
  577. # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
  578. # asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
  579. vmlal.u32 q15,d20,d5
  580. # qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1]
  581. # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
  582. # asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
  583. vmlal.u32 q15,d21,d4
  584. # qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3]
  585. # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
  586. # asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
  587. vmlal.u32 q15,d18,d3
  588. # qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1]
  589. # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
  590. # asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
  591. vmlal.u32 q15,d19,d2
  592. # qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1]
  593. # asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
  594. # asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
  595. vmlal.u32 q15,d6,d0
  596. # qhasm: ptr = &5y34_stack
  597. # asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
  598. # asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
  599. add r2,sp,#80
  600. # qhasm: 5y34 aligned= mem128[ptr]
  601. # asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
  602. # asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
  603. vld1.8 {d24-d25},[r2,: 128]
  604. # qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1]
  605. # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
  606. # asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
  607. vmlal.u32 q4,d20,d4
  608. # qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3]
  609. # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
  610. # asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
  611. vmlal.u32 q4,d21,d3
  612. # qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1]
  613. # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
  614. # asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
  615. vmlal.u32 q4,d18,d2
  616. # qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1]
  617. # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
  618. # asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
  619. vmlal.u32 q4,d19,d0
  620. # qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3]
  621. # asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
  622. # asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
  623. vmlal.u32 q4,d6,d25
  624. # qhasm: ptr = &5y12_stack
  625. # asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
  626. # asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
  627. add r2,sp,#64
  628. # qhasm: 5y12 aligned= mem128[ptr]
  629. # asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
  630. # asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
  631. vld1.8 {d22-d23},[r2,: 128]
  632. # qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1]
  633. # asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
  634. # asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
  635. vmlal.u32 q7,d6,d22
  636. # qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
  637. # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
  638. # asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
  639. vmlal.u32 q7,d18,d24
  640. # qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
  641. # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
  642. # asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
  643. vmlal.u32 q7,d19,d23
  644. # qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1]
  645. # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
  646. # asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
  647. vmlal.u32 q7,d20,d0
  648. # qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3]
  649. # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
  650. # asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
  651. vmlal.u32 q7,d21,d25
  652. # qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1]
  653. # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
  654. # asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
  655. vmlal.u32 q14,d20,d2
  656. # qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1]
  657. # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
  658. # asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
  659. vmlal.u32 q14,d21,d0
  660. # qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3]
  661. # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
  662. # asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
  663. vmlal.u32 q14,d18,d25
  664. # qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
  665. # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
  666. # asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
  667. vmlal.u32 q14,d19,d24
  668. # qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3]
  669. # asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
  670. # asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
  671. vmlal.u32 q14,d6,d23
  672. # qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3]
  673. # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
  674. # asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
  675. vmlal.u32 q13,d20,d3
  676. # qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1]
  677. # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
  678. # asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
  679. vmlal.u32 q13,d21,d2
  680. # qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1]
  681. # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
  682. # asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
  683. vmlal.u32 q13,d18,d0
  684. # qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3]
  685. # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
  686. # asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
  687. vmlal.u32 q13,d19,d25
  688. # qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1]
  689. # asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
  690. # asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
  691. vmlal.u32 q13,d6,d24
  692. # qhasm: ptr = &two24
  693. # asm 1: lea >ptr=int32#3,<two24=stack128#1
  694. # asm 2: lea >ptr=r2,<two24=[sp,#0]
  695. add r2,sp,#0
  696. # qhasm: 2x t1 = r0 unsigned>> 26
  697. # asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
  698. # asm 2: vshr.u64 >t1=q3,<r0=q7,#26
  699. vshr.u64 q3,q7,#26
  700. # qhasm: len -= 64
  701. # asm 1: sub >len=int32#4,<len=int32#4,#64
  702. # asm 2: sub >len=r3,<len=r3,#64
  703. sub r3,r3,#64
  704. # qhasm: r0 &= mask
  705. # asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
  706. # asm 2: vand >r0=q5,<r0=q7,<mask=q6
  707. vand q5,q7,q6
  708. # qhasm: 2x r1 += t1
  709. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
  710. # asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
  711. vadd.i64 q3,q14,q3
  712. # qhasm: 2x t4 = r3 unsigned>> 26
  713. # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
  714. # asm 2: vshr.u64 >t4=q7,<r3=q4,#26
  715. vshr.u64 q7,q4,#26
  716. # qhasm: r3 &= mask
  717. # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
  718. # asm 2: vand >r3=q4,<r3=q4,<mask=q6
  719. vand q4,q4,q6
  720. # qhasm: 2x x4 = r4 + t4
  721. # asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
  722. # asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
  723. vadd.i64 q7,q15,q7
  724. # qhasm: r4 aligned= mem128[ptr]
  725. # asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
  726. # asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
  727. vld1.8 {d30-d31},[r2,: 128]
  728. # qhasm: 2x t2 = r1 unsigned>> 26
  729. # asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
  730. # asm 2: vshr.u64 >t2=q8,<r1=q3,#26
  731. vshr.u64 q8,q3,#26
  732. # qhasm: r1 &= mask
  733. # asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
  734. # asm 2: vand >r1=q3,<r1=q3,<mask=q6
  735. vand q3,q3,q6
  736. # qhasm: 2x t0 = x4 unsigned>> 26
  737. # asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
  738. # asm 2: vshr.u64 >t0=q9,<x4=q7,#26
  739. vshr.u64 q9,q7,#26
  740. # qhasm: 2x r2 += t2
  741. # asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
  742. # asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
  743. vadd.i64 q8,q13,q8
  744. # qhasm: x4 &= mask
  745. # asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
  746. # asm 2: vand >x4=q10,<x4=q7,<mask=q6
  747. vand q10,q7,q6
  748. # qhasm: 2x x01 = r0 + t0
  749. # asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
  750. # asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
  751. vadd.i64 q5,q5,q9
  752. # qhasm: r0 aligned= mem128[ptr]
  753. # asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
  754. # asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
  755. vld1.8 {d14-d15},[r2,: 128]
  756. # qhasm: ptr = &z34_stack
  757. # asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
  758. # asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
  759. add r2,sp,#128
  760. # qhasm: 2x t0 <<= 2
  761. # asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
  762. # asm 2: vshl.i64 >t0=q9,<t0=q9,#2
  763. vshl.i64 q9,q9,#2
  764. # qhasm: 2x t3 = r2 unsigned>> 26
  765. # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
  766. # asm 2: vshr.u64 >t3=q13,<r2=q8,#26
  767. vshr.u64 q13,q8,#26
  768. # qhasm: 2x x01 += t0
  769. # asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
  770. # asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
  771. vadd.i64 q14,q5,q9
  772. # qhasm: z34 aligned= mem128[ptr]
  773. # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
  774. # asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
  775. vld1.8 {d10-d11},[r2,: 128]
  776. # qhasm: x23 = r2 & mask
  777. # asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
  778. # asm 2: vand >x23=q9,<r2=q8,<mask=q6
  779. vand q9,q8,q6
  780. # qhasm: 2x r3 += t3
  781. # asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
  782. # asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
  783. vadd.i64 q4,q4,q13
  784. # qhasm: input_2 += 32
  785. # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
  786. # asm 2: add >input_2=r1,<input_2=r1,#32
  787. add r1,r1,#32
  788. # qhasm: 2x t1 = x01 unsigned>> 26
  789. # asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
  790. # asm 2: vshr.u64 >t1=q13,<x01=q14,#26
  791. vshr.u64 q13,q14,#26
  792. # qhasm: x23 = x23[0,2,1,3]
  793. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  794. # asm 2: vtrn.32 <x23=d18,<x23=d19
  795. vtrn.32 d18,d19
  796. # qhasm: x01 = x01 & mask
  797. # asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
  798. # asm 2: vand >x01=q8,<x01=q14,<mask=q6
  799. vand q8,q14,q6
  800. # qhasm: 2x r1 += t1
  801. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
  802. # asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
  803. vadd.i64 q3,q3,q13
  804. # qhasm: 2x t4 = r3 unsigned>> 26
  805. # asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
  806. # asm 2: vshr.u64 >t4=q13,<r3=q4,#26
  807. vshr.u64 q13,q4,#26
  808. # qhasm: x01 = x01[0,2,1,3]
  809. # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
  810. # asm 2: vtrn.32 <x01=d16,<x01=d17
  811. vtrn.32 d16,d17
  812. # qhasm: r3 &= mask
  813. # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
  814. # asm 2: vand >r3=q4,<r3=q4,<mask=q6
  815. vand q4,q4,q6
  816. # qhasm: r1 = r1[0,2,1,3]
  817. # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
  818. # asm 2: vtrn.32 <r1=d6,<r1=d7
  819. vtrn.32 d6,d7
  820. # qhasm: 2x x4 += t4
  821. # asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
  822. # asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
  823. vadd.i64 q10,q10,q13
  824. # qhasm: r3 = r3[0,2,1,3]
  825. # asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
  826. # asm 2: vtrn.32 <r3=d8,<r3=d9
  827. vtrn.32 d8,d9
  828. # qhasm: x01 = x01[0,1] r1[0,1]
  829. # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
  830. # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
  831. vext.32 d17,d6,d6,#0
  832. # qhasm: x23 = x23[0,1] r3[0,1]
  833. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
  834. # asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
  835. vext.32 d19,d8,d8,#0
  836. # qhasm: x4 = x4[0,2,1,3]
  837. # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
  838. # asm 2: vtrn.32 <x4=d20,<x4=d21
  839. vtrn.32 d20,d21
  840. # qhasm: unsigned>? len - 64
  841. # asm 1: cmp <len=int32#4,#64
  842. # asm 2: cmp <len=r3,#64
  843. cmp r3,#64
  844. # qhasm: goto mainloop2 if unsigned>
  845. bhi ._mainloop2
  846. # qhasm: input_2 -= 32
  847. # asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
  848. # asm 2: sub >input_2=r2,<input_2=r1,#32
  849. sub r2,r1,#32
  850. # qhasm: below64bytes:
  851. ._below64bytes:
  852. # qhasm: unsigned>? len - 32
  853. # asm 1: cmp <len=int32#4,#32
  854. # asm 2: cmp <len=r3,#32
  855. cmp r3,#32
  856. # qhasm: goto end if !unsigned>
  857. bls ._end
  858. # qhasm: mainloop:
  859. ._mainloop:
  860. # qhasm: new r0
  861. # qhasm: ptr = &two24
  862. # asm 1: lea >ptr=int32#2,<two24=stack128#1
  863. # asm 2: lea >ptr=r1,<two24=[sp,#0]
  864. add r1,sp,#0
  865. # qhasm: r4 aligned= mem128[ptr]
  866. # asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
  867. # asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
  868. vld1.8 {d8-d9},[r1,: 128]
  869. # qhasm: u4 aligned= mem128[ptr]
  870. # asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
  871. # asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
  872. vld1.8 {d10-d11},[r1,: 128]
  873. # qhasm: c01 = mem128[input_2];input_2+=16
  874. # asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
  875. # asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
  876. vld1.8 {d14-d15},[r2]!
  877. # qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3]
  878. # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
  879. # asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
  880. vmlal.u32 q4,d16,d5
  881. # qhasm: c23 = mem128[input_2];input_2+=16
  882. # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
  883. # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
  884. vld1.8 {d26-d27},[r2]!
  885. # qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1]
  886. # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
  887. # asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
  888. vmlal.u32 q4,d17,d4
  889. # qhasm: r0 = u4[1]c01[0]r0[2,3]
  890. # asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
  891. # asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
  892. vext.32 d6,d10,d14,#1
  893. # qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3]
  894. # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
  895. # asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
  896. vmlal.u32 q4,d18,d3
  897. # qhasm: r0 = r0[0,1]u4[1]c23[0]
  898. # asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
  899. # asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
  900. vext.32 d7,d10,d26,#1
  901. # qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1]
  902. # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
  903. # asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
  904. vmlal.u32 q4,d19,d2
  905. # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
  906. # asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
  907. # asm 2: vrev64.i32 >r0=q3,<r0=q3
  908. vrev64.i32 q3,q3
  909. # qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1]
  910. # asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
  911. # asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
  912. vmlal.u32 q4,d20,d0
  913. # qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1]
  914. # asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
  915. # asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
  916. vmlal.u32 q3,d20,d22
  917. # qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
  918. # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
  919. # asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
  920. vmlal.u32 q3,d18,d24
  921. # qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
  922. # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
  923. # asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
  924. vmlal.u32 q3,d19,d23
  925. # qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
  926. # asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
  927. # asm 2: vtrn.32 <c01=q7,<c23=q13
  928. vtrn.32 q7,q13
  929. # qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1]
  930. # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
  931. # asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
  932. vmlal.u32 q3,d16,d0
  933. # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
  934. # asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
  935. # asm 2: vshll.u32 >r3=q5,<c23=d27,#18
  936. vshll.u32 q5,d27,#18
  937. # qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3]
  938. # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
  939. # asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
  940. vmlal.u32 q3,d17,d25
  941. # qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1]
  942. # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
  943. # asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
  944. vmlal.u32 q5,d16,d4
  945. # qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3]
  946. # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
  947. # asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
  948. vmlal.u32 q5,d17,d3
  949. # qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1]
  950. # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
  951. # asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
  952. vmlal.u32 q5,d18,d2
  953. # qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1]
  954. # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
  955. # asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
  956. vmlal.u32 q5,d19,d0
  957. # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
  958. # asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
  959. # asm 2: vshll.u32 >r1=q13,<c23=d26,#6
  960. vshll.u32 q13,d26,#6
  961. # qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3]
  962. # asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
  963. # asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
  964. vmlal.u32 q5,d20,d25
  965. # qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1]
  966. # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
  967. # asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
  968. vmlal.u32 q13,d16,d2
  969. # qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1]
  970. # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
  971. # asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
  972. vmlal.u32 q13,d17,d0
  973. # qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3]
  974. # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
  975. # asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
  976. vmlal.u32 q13,d18,d25
  977. # qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
  978. # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
  979. # asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
  980. vmlal.u32 q13,d19,d24
  981. # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
  982. # asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
  983. # asm 2: vshll.u32 >r2=q7,<c01=d15,#12
  984. vshll.u32 q7,d15,#12
  985. # qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3]
  986. # asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
  987. # asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
  988. vmlal.u32 q13,d20,d23
  989. # qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3]
  990. # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
  991. # asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
  992. vmlal.u32 q7,d16,d3
  993. # qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1]
  994. # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
  995. # asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
  996. vmlal.u32 q7,d17,d2
  997. # qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1]
  998. # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
  999. # asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
  1000. vmlal.u32 q7,d18,d0
  1001. # qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3]
  1002. # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
  1003. # asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
  1004. vmlal.u32 q7,d19,d25
  1005. # qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1]
  1006. # asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
  1007. # asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
  1008. vmlal.u32 q7,d20,d24
  1009. # qhasm: 2x t1 = r0 unsigned>> 26
  1010. # asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
  1011. # asm 2: vshr.u64 >t1=q8,<r0=q3,#26
  1012. vshr.u64 q8,q3,#26
  1013. # qhasm: r0 &= mask
  1014. # asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
  1015. # asm 2: vand >r0=q3,<r0=q3,<mask=q6
  1016. vand q3,q3,q6
  1017. # qhasm: 2x r1 += t1
  1018. # asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
  1019. # asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
  1020. vadd.i64 q8,q13,q8
  1021. # qhasm: 2x t4 = r3 unsigned>> 26
  1022. # asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
  1023. # asm 2: vshr.u64 >t4=q9,<r3=q5,#26
  1024. vshr.u64 q9,q5,#26
  1025. # qhasm: r3 &= mask
  1026. # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
  1027. # asm 2: vand >r3=q5,<r3=q5,<mask=q6
  1028. vand q5,q5,q6
  1029. # qhasm: 2x r4 += t4
  1030. # asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
  1031. # asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
  1032. vadd.i64 q4,q4,q9
  1033. # qhasm: 2x t2 = r1 unsigned>> 26
  1034. # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
  1035. # asm 2: vshr.u64 >t2=q9,<r1=q8,#26
  1036. vshr.u64 q9,q8,#26
  1037. # qhasm: r1 &= mask
  1038. # asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
  1039. # asm 2: vand >r1=q10,<r1=q8,<mask=q6
  1040. vand q10,q8,q6
  1041. # qhasm: 2x t0 = r4 unsigned>> 26
  1042. # asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
  1043. # asm 2: vshr.u64 >t0=q8,<r4=q4,#26
  1044. vshr.u64 q8,q4,#26
  1045. # qhasm: 2x r2 += t2
  1046. # asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
  1047. # asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
  1048. vadd.i64 q7,q7,q9
  1049. # qhasm: r4 &= mask
  1050. # asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
  1051. # asm 2: vand >r4=q4,<r4=q4,<mask=q6
  1052. vand q4,q4,q6
  1053. # qhasm: 2x r0 += t0
  1054. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
  1055. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
  1056. vadd.i64 q3,q3,q8
  1057. # qhasm: 2x t0 <<= 2
  1058. # asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
  1059. # asm 2: vshl.i64 >t0=q8,<t0=q8,#2
  1060. vshl.i64 q8,q8,#2
  1061. # qhasm: 2x t3 = r2 unsigned>> 26
  1062. # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
  1063. # asm 2: vshr.u64 >t3=q13,<r2=q7,#26
  1064. vshr.u64 q13,q7,#26
  1065. # qhasm: 2x r0 += t0
  1066. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
  1067. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
  1068. vadd.i64 q3,q3,q8
  1069. # qhasm: x23 = r2 & mask
  1070. # asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
  1071. # asm 2: vand >x23=q9,<r2=q7,<mask=q6
  1072. vand q9,q7,q6
  1073. # qhasm: 2x r3 += t3
  1074. # asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
  1075. # asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
  1076. vadd.i64 q5,q5,q13
  1077. # qhasm: 2x t1 = r0 unsigned>> 26
  1078. # asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
  1079. # asm 2: vshr.u64 >t1=q7,<r0=q3,#26
  1080. vshr.u64 q7,q3,#26
  1081. # qhasm: x01 = r0 & mask
  1082. # asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
  1083. # asm 2: vand >x01=q8,<r0=q3,<mask=q6
  1084. vand q8,q3,q6
  1085. # qhasm: 2x r1 += t1
  1086. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
  1087. # asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
  1088. vadd.i64 q3,q10,q7
  1089. # qhasm: 2x t4 = r3 unsigned>> 26
  1090. # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
  1091. # asm 2: vshr.u64 >t4=q7,<r3=q5,#26
  1092. vshr.u64 q7,q5,#26
  1093. # qhasm: r3 &= mask
  1094. # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
  1095. # asm 2: vand >r3=q5,<r3=q5,<mask=q6
  1096. vand q5,q5,q6
  1097. # qhasm: 2x x4 = r4 + t4
  1098. # asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
  1099. # asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
  1100. vadd.i64 q10,q4,q7
  1101. # qhasm: len -= 32
  1102. # asm 1: sub >len=int32#4,<len=int32#4,#32
  1103. # asm 2: sub >len=r3,<len=r3,#32
  1104. sub r3,r3,#32
  1105. # qhasm: x01 = x01[0,2,1,3]
  1106. # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
  1107. # asm 2: vtrn.32 <x01=d16,<x01=d17
  1108. vtrn.32 d16,d17
  1109. # qhasm: x23 = x23[0,2,1,3]
  1110. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  1111. # asm 2: vtrn.32 <x23=d18,<x23=d19
  1112. vtrn.32 d18,d19
  1113. # qhasm: r1 = r1[0,2,1,3]
  1114. # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
  1115. # asm 2: vtrn.32 <r1=d6,<r1=d7
  1116. vtrn.32 d6,d7
  1117. # qhasm: r3 = r3[0,2,1,3]
  1118. # asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
  1119. # asm 2: vtrn.32 <r3=d10,<r3=d11
  1120. vtrn.32 d10,d11
  1121. # qhasm: x4 = x4[0,2,1,3]
  1122. # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
  1123. # asm 2: vtrn.32 <x4=d20,<x4=d21
  1124. vtrn.32 d20,d21
  1125. # qhasm: x01 = x01[0,1] r1[0,1]
  1126. # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
  1127. # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
  1128. vext.32 d17,d6,d6,#0
  1129. # qhasm: x23 = x23[0,1] r3[0,1]
  1130. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
  1131. # asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
  1132. vext.32 d19,d10,d10,#0
  1133. # qhasm: unsigned>? len - 32
  1134. # asm 1: cmp <len=int32#4,#32
  1135. # asm 2: cmp <len=r3,#32
  1136. cmp r3,#32
  1137. # qhasm: goto mainloop if unsigned>
  1138. bhi ._mainloop
  1139. # qhasm: end:
  1140. ._end:
  1141. # qhasm: mem128[input_0] = x01;input_0+=16
  1142. # asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
  1143. # asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
  1144. vst1.8 {d16-d17},[r0]!
  1145. # qhasm: mem128[input_0] = x23;input_0+=16
  1146. # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
  1147. # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
  1148. vst1.8 {d18-d19},[r0]!
  1149. # qhasm: mem64[input_0] = x4[0]
  1150. # asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
  1151. # asm 2: vst1.8 <x4=d20,[<input_0=r0]
  1152. vst1.8 d20,[r0]
  1153. # qhasm: len = len
  1154. # asm 1: mov >len=int32#1,<len=int32#4
  1155. # asm 2: mov >len=r0,<len=r3
  1156. mov r0,r3
  1157. # qhasm: qpopreturn len
  1158. mov sp,r12
  1159. vpop {q4,q5,q6,q7}
  1160. bx lr