blocks.s 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517
  1. # qhasm: int32 input_0
  2. # qhasm: int32 input_1
  3. # qhasm: int32 input_2
  4. # qhasm: int32 input_3
  5. # qhasm: stack32 input_4
  6. # qhasm: stack32 input_5
  7. # qhasm: stack32 input_6
  8. # qhasm: stack32 input_7
  9. # qhasm: int32 caller_r4
  10. # qhasm: int32 caller_r5
  11. # qhasm: int32 caller_r6
  12. # qhasm: int32 caller_r7
  13. # qhasm: int32 caller_r8
  14. # qhasm: int32 caller_r9
  15. # qhasm: int32 caller_r10
  16. # qhasm: int32 caller_r11
  17. # qhasm: int32 caller_r12
  18. # qhasm: int32 caller_r14
  19. # qhasm: reg128 caller_q4
  20. # qhasm: reg128 caller_q5
  21. # qhasm: reg128 caller_q6
  22. # qhasm: reg128 caller_q7
  23. # qhasm: startcode
  24. .fpu neon
  25. .text
  26. # qhasm: reg128 r0
  27. # qhasm: reg128 r1
  28. # qhasm: reg128 r2
  29. # qhasm: reg128 r3
  30. # qhasm: reg128 r4
  31. # qhasm: reg128 x01
  32. # qhasm: reg128 x23
  33. # qhasm: reg128 x4
  34. # qhasm: reg128 y0
  35. # qhasm: reg128 y12
  36. # qhasm: reg128 y34
  37. # qhasm: reg128 5y12
  38. # qhasm: reg128 5y34
  39. # qhasm: stack128 y0_stack
  40. # qhasm: stack128 y12_stack
  41. # qhasm: stack128 y34_stack
  42. # qhasm: stack128 5y12_stack
  43. # qhasm: stack128 5y34_stack
  44. # qhasm: reg128 z0
  45. # qhasm: reg128 z12
  46. # qhasm: reg128 z34
  47. # qhasm: reg128 5z12
  48. # qhasm: reg128 5z34
  49. # qhasm: stack128 z0_stack
  50. # qhasm: stack128 z12_stack
  51. # qhasm: stack128 z34_stack
  52. # qhasm: stack128 5z12_stack
  53. # qhasm: stack128 5z34_stack
  54. # qhasm: stack128 two24
  55. # qhasm: int32 ptr
  56. # qhasm: reg128 c01
  57. # qhasm: reg128 c23
  58. # qhasm: reg128 d01
  59. # qhasm: reg128 d23
  60. # qhasm: reg128 t0
  61. # qhasm: reg128 t1
  62. # qhasm: reg128 t2
  63. # qhasm: reg128 t3
  64. # qhasm: reg128 t4
  65. # qhasm: reg128 mask
  66. # qhasm: reg128 u0
  67. # qhasm: reg128 u1
  68. # qhasm: reg128 u2
  69. # qhasm: reg128 u3
  70. # qhasm: reg128 u4
  71. # qhasm: reg128 v01
  72. # qhasm: reg128 mid
  73. # qhasm: reg128 v23
  74. # qhasm: reg128 v4
  75. # qhasm: int32 len
  76. # qhasm: qpushenter crypto_onetimeauth_poly1305_neon2_blocks
  77. .align 4
  78. .global _crypto_onetimeauth_poly1305_neon2_blocks
  79. .global crypto_onetimeauth_poly1305_neon2_blocks
  80. _crypto_onetimeauth_poly1305_neon2_blocks:
  81. crypto_onetimeauth_poly1305_neon2_blocks:
  82. vpush {q4,q5,q6,q7}
  83. mov r12,sp
  84. sub sp,sp,#192
  85. and sp,sp,#0xffffffe0
  86. # qhasm: len = input_3
  87. # asm 1: mov >len=int32#4,<input_3=int32#4
  88. # asm 2: mov >len=r3,<input_3=r3
  89. mov r3,r3
  90. # qhasm: new y0
  91. # qhasm: y0 = mem64[input_1]y0[1]; input_1 += 8
  92. # asm 1: vld1.8 {<y0=reg128#1%bot},[<input_1=int32#2]!
  93. # asm 2: vld1.8 {<y0=d0},[<input_1=r1]!
  94. vld1.8 {d0},[r1]!
  95. # qhasm: y12 = mem128[input_1]; input_1 += 16
  96. # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<input_1=int32#2]!
  97. # asm 2: vld1.8 {>y12=d2->y12=d3},[<input_1=r1]!
  98. vld1.8 {d2-d3},[r1]!
  99. # qhasm: y34 = mem128[input_1]; input_1 += 16
  100. # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<input_1=int32#2]!
  101. # asm 2: vld1.8 {>y34=d4->y34=d5},[<input_1=r1]!
  102. vld1.8 {d4-d5},[r1]!
  103. # qhasm: input_1 += 8
  104. # asm 1: add >input_1=int32#2,<input_1=int32#2,#8
  105. # asm 2: add >input_1=r1,<input_1=r1,#8
  106. add r1,r1,#8
  107. # qhasm: new z0
  108. # qhasm: z0 = mem64[input_1]z0[1]; input_1 += 8
  109. # asm 1: vld1.8 {<z0=reg128#4%bot},[<input_1=int32#2]!
  110. # asm 2: vld1.8 {<z0=d6},[<input_1=r1]!
  111. vld1.8 {d6},[r1]!
  112. # qhasm: z12 = mem128[input_1]; input_1 += 16
  113. # asm 1: vld1.8 {>z12=reg128#5%bot->z12=reg128#5%top},[<input_1=int32#2]!
  114. # asm 2: vld1.8 {>z12=d8->z12=d9},[<input_1=r1]!
  115. vld1.8 {d8-d9},[r1]!
  116. # qhasm: z34 = mem128[input_1]; input_1 += 16
  117. # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<input_1=int32#2]!
  118. # asm 2: vld1.8 {>z34=d10->z34=d11},[<input_1=r1]!
  119. vld1.8 {d10-d11},[r1]!
  120. # qhasm: 2x mask = 0xffffffff
  121. # asm 1: vmov.i64 >mask=reg128#7,#0xffffffff
  122. # asm 2: vmov.i64 >mask=q6,#0xffffffff
  123. vmov.i64 q6,#0xffffffff
  124. # qhasm: 2x u4 = 0xff
  125. # asm 1: vmov.i64 >u4=reg128#8,#0xff
  126. # asm 2: vmov.i64 >u4=q7,#0xff
  127. vmov.i64 q7,#0xff
  128. # qhasm: x01 aligned= mem128[input_0];input_0+=16
  129. # asm 1: vld1.8 {>x01=reg128#9%bot->x01=reg128#9%top},[<input_0=int32#1,: 128]!
  130. # asm 2: vld1.8 {>x01=d16->x01=d17},[<input_0=r0,: 128]!
  131. vld1.8 {d16-d17},[r0,: 128]!
  132. # qhasm: x23 aligned= mem128[input_0];input_0+=16
  133. # asm 1: vld1.8 {>x23=reg128#10%bot->x23=reg128#10%top},[<input_0=int32#1,: 128]!
  134. # asm 2: vld1.8 {>x23=d18->x23=d19},[<input_0=r0,: 128]!
  135. vld1.8 {d18-d19},[r0,: 128]!
  136. # qhasm: x4 aligned= mem64[input_0]x4[1]
  137. # asm 1: vld1.8 {<x4=reg128#11%bot},[<input_0=int32#1,: 64]
  138. # asm 2: vld1.8 {<x4=d20},[<input_0=r0,: 64]
  139. vld1.8 {d20},[r0,: 64]
  140. # qhasm: input_0 -= 32
  141. # asm 1: sub >input_0=int32#1,<input_0=int32#1,#32
  142. # asm 2: sub >input_0=r0,<input_0=r0,#32
  143. sub r0,r0,#32
  144. # qhasm: 2x mask unsigned>>=6
  145. # asm 1: vshr.u64 >mask=reg128#7,<mask=reg128#7,#6
  146. # asm 2: vshr.u64 >mask=q6,<mask=q6,#6
  147. vshr.u64 q6,q6,#6
  148. # qhasm: 2x u4 unsigned>>= 7
  149. # asm 1: vshr.u64 >u4=reg128#8,<u4=reg128#8,#7
  150. # asm 2: vshr.u64 >u4=q7,<u4=q7,#7
  151. vshr.u64 q7,q7,#7
  152. # qhasm: 4x 5y12 = y12 << 2
  153. # asm 1: vshl.i32 >5y12=reg128#12,<y12=reg128#2,#2
  154. # asm 2: vshl.i32 >5y12=q11,<y12=q1,#2
  155. vshl.i32 q11,q1,#2
  156. # qhasm: 4x 5y34 = y34 << 2
  157. # asm 1: vshl.i32 >5y34=reg128#13,<y34=reg128#3,#2
  158. # asm 2: vshl.i32 >5y34=q12,<y34=q2,#2
  159. vshl.i32 q12,q2,#2
  160. # qhasm: 4x 5y12 += y12
  161. # asm 1: vadd.i32 >5y12=reg128#12,<5y12=reg128#12,<y12=reg128#2
  162. # asm 2: vadd.i32 >5y12=q11,<5y12=q11,<y12=q1
  163. vadd.i32 q11,q11,q1
  164. # qhasm: 4x 5y34 += y34
  165. # asm 1: vadd.i32 >5y34=reg128#13,<5y34=reg128#13,<y34=reg128#3
  166. # asm 2: vadd.i32 >5y34=q12,<5y34=q12,<y34=q2
  167. vadd.i32 q12,q12,q2
  168. # qhasm: 2x u4 <<= 24
  169. # asm 1: vshl.i64 >u4=reg128#8,<u4=reg128#8,#24
  170. # asm 2: vshl.i64 >u4=q7,<u4=q7,#24
  171. vshl.i64 q7,q7,#24
  172. # qhasm: 4x 5z12 = z12 << 2
  173. # asm 1: vshl.i32 >5z12=reg128#14,<z12=reg128#5,#2
  174. # asm 2: vshl.i32 >5z12=q13,<z12=q4,#2
  175. vshl.i32 q13,q4,#2
  176. # qhasm: 4x 5z34 = z34 << 2
  177. # asm 1: vshl.i32 >5z34=reg128#15,<z34=reg128#6,#2
  178. # asm 2: vshl.i32 >5z34=q14,<z34=q5,#2
  179. vshl.i32 q14,q5,#2
  180. # qhasm: 4x 5z12 += z12
  181. # asm 1: vadd.i32 >5z12=reg128#14,<5z12=reg128#14,<z12=reg128#5
  182. # asm 2: vadd.i32 >5z12=q13,<5z12=q13,<z12=q4
  183. vadd.i32 q13,q13,q4
  184. # qhasm: 4x 5z34 += z34
  185. # asm 1: vadd.i32 >5z34=reg128#15,<5z34=reg128#15,<z34=reg128#6
  186. # asm 2: vadd.i32 >5z34=q14,<5z34=q14,<z34=q5
  187. vadd.i32 q14,q14,q5
  188. # qhasm: new two24
  189. # qhasm: new y0_stack
  190. # qhasm: new y12_stack
  191. # qhasm: new y34_stack
  192. # qhasm: new 5y12_stack
  193. # qhasm: new 5y34_stack
  194. # qhasm: new z0_stack
  195. # qhasm: new z12_stack
  196. # qhasm: new z34_stack
  197. # qhasm: new 5z12_stack
  198. # qhasm: new 5z34_stack
  199. # qhasm: ptr = &two24
  200. # asm 1: lea >ptr=int32#2,<two24=stack128#1
  201. # asm 2: lea >ptr=r1,<two24=[sp,#0]
  202. add r1,sp,#0
  203. # qhasm: mem128[ptr] aligned= u4
  204. # asm 1: vst1.8 {<u4=reg128#8%bot-<u4=reg128#8%top},[<ptr=int32#2,: 128]
  205. # asm 2: vst1.8 {<u4=d14-<u4=d15},[<ptr=r1,: 128]
  206. vst1.8 {d14-d15},[r1,: 128]
  207. # qhasm: r4 = u4
  208. # asm 1: vmov >r4=reg128#16,<u4=reg128#8
  209. # asm 2: vmov >r4=q15,<u4=q7
  210. vmov q15,q7
  211. # qhasm: r0 = u4
  212. # asm 1: vmov >r0=reg128#8,<u4=reg128#8
  213. # asm 2: vmov >r0=q7,<u4=q7
  214. vmov q7,q7
  215. # qhasm: ptr = &y0_stack
  216. # asm 1: lea >ptr=int32#2,<y0_stack=stack128#2
  217. # asm 2: lea >ptr=r1,<y0_stack=[sp,#16]
  218. add r1,sp,#16
  219. # qhasm: mem128[ptr] aligned= y0
  220. # asm 1: vst1.8 {<y0=reg128#1%bot-<y0=reg128#1%top},[<ptr=int32#2,: 128]
  221. # asm 2: vst1.8 {<y0=d0-<y0=d1},[<ptr=r1,: 128]
  222. vst1.8 {d0-d1},[r1,: 128]
  223. # qhasm: ptr = &y12_stack
  224. # asm 1: lea >ptr=int32#2,<y12_stack=stack128#3
  225. # asm 2: lea >ptr=r1,<y12_stack=[sp,#32]
  226. add r1,sp,#32
  227. # qhasm: mem128[ptr] aligned= y12
  228. # asm 1: vst1.8 {<y12=reg128#2%bot-<y12=reg128#2%top},[<ptr=int32#2,: 128]
  229. # asm 2: vst1.8 {<y12=d2-<y12=d3},[<ptr=r1,: 128]
  230. vst1.8 {d2-d3},[r1,: 128]
  231. # qhasm: ptr = &y34_stack
  232. # asm 1: lea >ptr=int32#2,<y34_stack=stack128#4
  233. # asm 2: lea >ptr=r1,<y34_stack=[sp,#48]
  234. add r1,sp,#48
  235. # qhasm: mem128[ptr] aligned= y34
  236. # asm 1: vst1.8 {<y34=reg128#3%bot-<y34=reg128#3%top},[<ptr=int32#2,: 128]
  237. # asm 2: vst1.8 {<y34=d4-<y34=d5},[<ptr=r1,: 128]
  238. vst1.8 {d4-d5},[r1,: 128]
  239. # qhasm: ptr = &z0_stack
  240. # asm 1: lea >ptr=int32#2,<z0_stack=stack128#7
  241. # asm 2: lea >ptr=r1,<z0_stack=[sp,#96]
  242. add r1,sp,#96
  243. # qhasm: mem128[ptr] aligned= z0
  244. # asm 1: vst1.8 {<z0=reg128#4%bot-<z0=reg128#4%top},[<ptr=int32#2,: 128]
  245. # asm 2: vst1.8 {<z0=d6-<z0=d7},[<ptr=r1,: 128]
  246. vst1.8 {d6-d7},[r1,: 128]
  247. # qhasm: ptr = &z12_stack
  248. # asm 1: lea >ptr=int32#2,<z12_stack=stack128#8
  249. # asm 2: lea >ptr=r1,<z12_stack=[sp,#112]
  250. add r1,sp,#112
  251. # qhasm: mem128[ptr] aligned= z12
  252. # asm 1: vst1.8 {<z12=reg128#5%bot-<z12=reg128#5%top},[<ptr=int32#2,: 128]
  253. # asm 2: vst1.8 {<z12=d8-<z12=d9},[<ptr=r1,: 128]
  254. vst1.8 {d8-d9},[r1,: 128]
  255. # qhasm: ptr = &z34_stack
  256. # asm 1: lea >ptr=int32#2,<z34_stack=stack128#9
  257. # asm 2: lea >ptr=r1,<z34_stack=[sp,#128]
  258. add r1,sp,#128
  259. # qhasm: mem128[ptr] aligned= z34
  260. # asm 1: vst1.8 {<z34=reg128#6%bot-<z34=reg128#6%top},[<ptr=int32#2,: 128]
  261. # asm 2: vst1.8 {<z34=d10-<z34=d11},[<ptr=r1,: 128]
  262. vst1.8 {d10-d11},[r1,: 128]
  263. # qhasm: ptr = &5y12_stack
  264. # asm 1: lea >ptr=int32#2,<5y12_stack=stack128#5
  265. # asm 2: lea >ptr=r1,<5y12_stack=[sp,#64]
  266. add r1,sp,#64
  267. # qhasm: mem128[ptr] aligned= 5y12
  268. # asm 1: vst1.8 {<5y12=reg128#12%bot-<5y12=reg128#12%top},[<ptr=int32#2,: 128]
  269. # asm 2: vst1.8 {<5y12=d22-<5y12=d23},[<ptr=r1,: 128]
  270. vst1.8 {d22-d23},[r1,: 128]
  271. # qhasm: ptr = &5y34_stack
  272. # asm 1: lea >ptr=int32#2,<5y34_stack=stack128#6
  273. # asm 2: lea >ptr=r1,<5y34_stack=[sp,#80]
  274. add r1,sp,#80
  275. # qhasm: mem128[ptr] aligned= 5y34
  276. # asm 1: vst1.8 {<5y34=reg128#13%bot-<5y34=reg128#13%top},[<ptr=int32#2,: 128]
  277. # asm 2: vst1.8 {<5y34=d24-<5y34=d25},[<ptr=r1,: 128]
  278. vst1.8 {d24-d25},[r1,: 128]
  279. # qhasm: ptr = &5z12_stack
  280. # asm 1: lea >ptr=int32#2,<5z12_stack=stack128#10
  281. # asm 2: lea >ptr=r1,<5z12_stack=[sp,#144]
  282. add r1,sp,#144
  283. # qhasm: mem128[ptr] aligned= 5z12
  284. # asm 1: vst1.8 {<5z12=reg128#14%bot-<5z12=reg128#14%top},[<ptr=int32#2,: 128]
  285. # asm 2: vst1.8 {<5z12=d26-<5z12=d27},[<ptr=r1,: 128]
  286. vst1.8 {d26-d27},[r1,: 128]
  287. # qhasm: ptr = &5z34_stack
  288. # asm 1: lea >ptr=int32#2,<5z34_stack=stack128#11
  289. # asm 2: lea >ptr=r1,<5z34_stack=[sp,#160]
  290. add r1,sp,#160
  291. # qhasm: mem128[ptr] aligned= 5z34
  292. # asm 1: vst1.8 {<5z34=reg128#15%bot-<5z34=reg128#15%top},[<ptr=int32#2,: 128]
  293. # asm 2: vst1.8 {<5z34=d28-<5z34=d29},[<ptr=r1,: 128]
  294. vst1.8 {d28-d29},[r1,: 128]
  295. # qhasm: unsigned>? len - 64
  296. # asm 1: cmp <len=int32#4,#64
  297. # asm 2: cmp <len=r3,#64
  298. cmp r3,#64
  299. # qhasm: goto below64bytes if !unsigned>
  300. bls ._below64bytes
  301. # qhasm: input_2 += 32
  302. # asm 1: add >input_2=int32#2,<input_2=int32#3,#32
  303. # asm 2: add >input_2=r1,<input_2=r2,#32
  304. add r1,r2,#32
  305. # qhasm: mainloop2:
  306. ._mainloop2:
  307. # qhasm: c01 = mem128[input_2];input_2+=16
  308. # asm 1: vld1.8 {>c01=reg128#1%bot->c01=reg128#1%top},[<input_2=int32#2]!
  309. # asm 2: vld1.8 {>c01=d0->c01=d1},[<input_2=r1]!
  310. vld1.8 {d0-d1},[r1]!
  311. # qhasm: c23 = mem128[input_2];input_2+=16
  312. # asm 1: vld1.8 {>c23=reg128#2%bot->c23=reg128#2%top},[<input_2=int32#2]!
  313. # asm 2: vld1.8 {>c23=d2->c23=d3},[<input_2=r1]!
  314. vld1.8 {d2-d3},[r1]!
  315. # qhasm: r4[0,1] += x01[0] unsigned* z34[2]; r4[2,3] += x01[1] unsigned* z34[3]
  316. # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%bot,<z34=reg128#6%top
  317. # asm 2: vmlal.u32 <r4=q15,<x01=d16,<z34=d11
  318. vmlal.u32 q15,d16,d11
  319. # qhasm: ptr = &z12_stack
  320. # asm 1: lea >ptr=int32#3,<z12_stack=stack128#8
  321. # asm 2: lea >ptr=r2,<z12_stack=[sp,#112]
  322. add r2,sp,#112
  323. # qhasm: z12 aligned= mem128[ptr]
  324. # asm 1: vld1.8 {>z12=reg128#3%bot->z12=reg128#3%top},[<ptr=int32#3,: 128]
  325. # asm 2: vld1.8 {>z12=d4->z12=d5},[<ptr=r2,: 128]
  326. vld1.8 {d4-d5},[r2,: 128]
  327. # qhasm: r4[0,1] += x01[2] unsigned* z34[0]; r4[2,3] += x01[3] unsigned* z34[1]
  328. # asm 1: vmlal.u32 <r4=reg128#16,<x01=reg128#9%top,<z34=reg128#6%bot
  329. # asm 2: vmlal.u32 <r4=q15,<x01=d17,<z34=d10
  330. vmlal.u32 q15,d17,d10
  331. # qhasm: ptr = &z0_stack
  332. # asm 1: lea >ptr=int32#3,<z0_stack=stack128#7
  333. # asm 2: lea >ptr=r2,<z0_stack=[sp,#96]
  334. add r2,sp,#96
  335. # qhasm: z0 aligned= mem128[ptr]
  336. # asm 1: vld1.8 {>z0=reg128#4%bot->z0=reg128#4%top},[<ptr=int32#3,: 128]
  337. # asm 2: vld1.8 {>z0=d6->z0=d7},[<ptr=r2,: 128]
  338. vld1.8 {d6-d7},[r2,: 128]
  339. # qhasm: r4[0,1] += x23[0] unsigned* z12[2]; r4[2,3] += x23[1] unsigned* z12[3]
  340. # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%bot,<z12=reg128#3%top
  341. # asm 2: vmlal.u32 <r4=q15,<x23=d18,<z12=d5
  342. vmlal.u32 q15,d18,d5
  343. # qhasm: c01 c23 = c01[0]c01[1]c01[2]c23[2]c23[0]c23[1]c01[3]c23[3]
  344. # asm 1: vtrn.32 <c01=reg128#1%top,<c23=reg128#2%top
  345. # asm 2: vtrn.32 <c01=d1,<c23=d3
  346. vtrn.32 d1,d3
  347. # qhasm: r4[0,1] += x23[2] unsigned* z12[0]; r4[2,3] += x23[3] unsigned* z12[1]
  348. # asm 1: vmlal.u32 <r4=reg128#16,<x23=reg128#10%top,<z12=reg128#3%bot
  349. # asm 2: vmlal.u32 <r4=q15,<x23=d19,<z12=d4
  350. vmlal.u32 q15,d19,d4
  351. # qhasm: r4[0,1] += x4[0] unsigned* z0[0]; r4[2,3] += x4[1] unsigned* z0[1]
  352. # asm 1: vmlal.u32 <r4=reg128#16,<x4=reg128#11%bot,<z0=reg128#4%bot
  353. # asm 2: vmlal.u32 <r4=q15,<x4=d20,<z0=d6
  354. vmlal.u32 q15,d20,d6
  355. # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
  356. # asm 1: vshll.u32 >r3=reg128#5,<c23=reg128#2%top,#18
  357. # asm 2: vshll.u32 >r3=q4,<c23=d3,#18
  358. vshll.u32 q4,d3,#18
  359. # qhasm: c01 c23 = c01[0]c23[0]c01[2]c01[3]c01[1]c23[1]c23[2]c23[3]
  360. # asm 1: vtrn.32 <c01=reg128#1%bot,<c23=reg128#2%bot
  361. # asm 2: vtrn.32 <c01=d0,<c23=d2
  362. vtrn.32 d0,d2
  363. # qhasm: r3[0,1] += x01[0] unsigned* z34[0]; r3[2,3] += x01[1] unsigned* z34[1]
  364. # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%bot,<z34=reg128#6%bot
  365. # asm 2: vmlal.u32 <r3=q4,<x01=d16,<z34=d10
  366. vmlal.u32 q4,d16,d10
  367. # qhasm: r3[0,1] += x01[2] unsigned* z12[2]; r3[2,3] += x01[3] unsigned* z12[3]
  368. # asm 1: vmlal.u32 <r3=reg128#5,<x01=reg128#9%top,<z12=reg128#3%top
  369. # asm 2: vmlal.u32 <r3=q4,<x01=d17,<z12=d5
  370. vmlal.u32 q4,d17,d5
  371. # qhasm: r0 = r0[1]c01[0]r0[2,3]
  372. # asm 1: vext.32 <r0=reg128#8%bot,<r0=reg128#8%bot,<c01=reg128#1%bot,#1
  373. # asm 2: vext.32 <r0=d14,<r0=d14,<c01=d0,#1
  374. vext.32 d14,d14,d0,#1
  375. # qhasm: r3[0,1] += x23[0] unsigned* z12[0]; r3[2,3] += x23[1] unsigned* z12[1]
  376. # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%bot,<z12=reg128#3%bot
  377. # asm 2: vmlal.u32 <r3=q4,<x23=d18,<z12=d4
  378. vmlal.u32 q4,d18,d4
  379. # qhasm: input_2 -= 64
  380. # asm 1: sub >input_2=int32#2,<input_2=int32#2,#64
  381. # asm 2: sub >input_2=r1,<input_2=r1,#64
  382. sub r1,r1,#64
  383. # qhasm: r3[0,1] += x23[2] unsigned* z0[0]; r3[2,3] += x23[3] unsigned* z0[1]
  384. # asm 1: vmlal.u32 <r3=reg128#5,<x23=reg128#10%top,<z0=reg128#4%bot
  385. # asm 2: vmlal.u32 <r3=q4,<x23=d19,<z0=d6
  386. vmlal.u32 q4,d19,d6
  387. # qhasm: ptr = &5z34_stack
  388. # asm 1: lea >ptr=int32#3,<5z34_stack=stack128#11
  389. # asm 2: lea >ptr=r2,<5z34_stack=[sp,#160]
  390. add r2,sp,#160
  391. # qhasm: 5z34 aligned= mem128[ptr]
  392. # asm 1: vld1.8 {>5z34=reg128#6%bot->5z34=reg128#6%top},[<ptr=int32#3,: 128]
  393. # asm 2: vld1.8 {>5z34=d10->5z34=d11},[<ptr=r2,: 128]
  394. vld1.8 {d10-d11},[r2,: 128]
  395. # qhasm: r3[0,1] += x4[0] unsigned* 5z34[2]; r3[2,3] += x4[1] unsigned* 5z34[3]
  396. # asm 1: vmlal.u32 <r3=reg128#5,<x4=reg128#11%bot,<5z34=reg128#6%top
  397. # asm 2: vmlal.u32 <r3=q4,<x4=d20,<5z34=d11
  398. vmlal.u32 q4,d20,d11
  399. # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
  400. # asm 1: vrev64.i32 >r0=reg128#8,<r0=reg128#8
  401. # asm 2: vrev64.i32 >r0=q7,<r0=q7
  402. vrev64.i32 q7,q7
  403. # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
  404. # asm 1: vshll.u32 >r2=reg128#14,<c01=reg128#1%top,#12
  405. # asm 2: vshll.u32 >r2=q13,<c01=d1,#12
  406. vshll.u32 q13,d1,#12
  407. # qhasm: d01 = mem128[input_2];input_2+=16
  408. # asm 1: vld1.8 {>d01=reg128#12%bot->d01=reg128#12%top},[<input_2=int32#2]!
  409. # asm 2: vld1.8 {>d01=d22->d01=d23},[<input_2=r1]!
  410. vld1.8 {d22-d23},[r1]!
  411. # qhasm: r2[0,1] += x01[0] unsigned* z12[2]; r2[2,3] += x01[1] unsigned* z12[3]
  412. # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%bot,<z12=reg128#3%top
  413. # asm 2: vmlal.u32 <r2=q13,<x01=d16,<z12=d5
  414. vmlal.u32 q13,d16,d5
  415. # qhasm: r2[0,1] += x01[2] unsigned* z12[0]; r2[2,3] += x01[3] unsigned* z12[1]
  416. # asm 1: vmlal.u32 <r2=reg128#14,<x01=reg128#9%top,<z12=reg128#3%bot
  417. # asm 2: vmlal.u32 <r2=q13,<x01=d17,<z12=d4
  418. vmlal.u32 q13,d17,d4
  419. # qhasm: r2[0,1] += x23[0] unsigned* z0[0]; r2[2,3] += x23[1] unsigned* z0[1]
  420. # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%bot,<z0=reg128#4%bot
  421. # asm 2: vmlal.u32 <r2=q13,<x23=d18,<z0=d6
  422. vmlal.u32 q13,d18,d6
  423. # qhasm: r2[0,1] += x23[2] unsigned* 5z34[2]; r2[2,3] += x23[3] unsigned* 5z34[3]
  424. # asm 1: vmlal.u32 <r2=reg128#14,<x23=reg128#10%top,<5z34=reg128#6%top
  425. # asm 2: vmlal.u32 <r2=q13,<x23=d19,<5z34=d11
  426. vmlal.u32 q13,d19,d11
  427. # qhasm: r2[0,1] += x4[0] unsigned* 5z34[0]; r2[2,3] += x4[1] unsigned* 5z34[1]
  428. # asm 1: vmlal.u32 <r2=reg128#14,<x4=reg128#11%bot,<5z34=reg128#6%bot
  429. # asm 2: vmlal.u32 <r2=q13,<x4=d20,<5z34=d10
  430. vmlal.u32 q13,d20,d10
  431. # qhasm: r0 = r0[0,1]c01[1]r0[2]
  432. # asm 1: vext.32 <r0=reg128#8%top,<c01=reg128#1%bot,<r0=reg128#8%top,#1
  433. # asm 2: vext.32 <r0=d15,<c01=d0,<r0=d15,#1
  434. vext.32 d15,d0,d15,#1
  435. # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
  436. # asm 1: vshll.u32 >r1=reg128#15,<c23=reg128#2%bot,#6
  437. # asm 2: vshll.u32 >r1=q14,<c23=d2,#6
  438. vshll.u32 q14,d2,#6
  439. # qhasm: r1[0,1] += x01[0] unsigned* z12[0]; r1[2,3] += x01[1] unsigned* z12[1]
  440. # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%bot,<z12=reg128#3%bot
  441. # asm 2: vmlal.u32 <r1=q14,<x01=d16,<z12=d4
  442. vmlal.u32 q14,d16,d4
  443. # qhasm: r1[0,1] += x01[2] unsigned* z0[0]; r1[2,3] += x01[3] unsigned* z0[1]
  444. # asm 1: vmlal.u32 <r1=reg128#15,<x01=reg128#9%top,<z0=reg128#4%bot
  445. # asm 2: vmlal.u32 <r1=q14,<x01=d17,<z0=d6
  446. vmlal.u32 q14,d17,d6
  447. # qhasm: r1[0,1] += x23[0] unsigned* 5z34[2]; r1[2,3] += x23[1] unsigned* 5z34[3]
  448. # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%bot,<5z34=reg128#6%top
  449. # asm 2: vmlal.u32 <r1=q14,<x23=d18,<5z34=d11
  450. vmlal.u32 q14,d18,d11
  451. # qhasm: r1[0,1] += x23[2] unsigned* 5z34[0]; r1[2,3] += x23[3] unsigned* 5z34[1]
  452. # asm 1: vmlal.u32 <r1=reg128#15,<x23=reg128#10%top,<5z34=reg128#6%bot
  453. # asm 2: vmlal.u32 <r1=q14,<x23=d19,<5z34=d10
  454. vmlal.u32 q14,d19,d10
  455. # qhasm: ptr = &5z12_stack
  456. # asm 1: lea >ptr=int32#3,<5z12_stack=stack128#10
  457. # asm 2: lea >ptr=r2,<5z12_stack=[sp,#144]
  458. add r2,sp,#144
  459. # qhasm: 5z12 aligned= mem128[ptr]
  460. # asm 1: vld1.8 {>5z12=reg128#1%bot->5z12=reg128#1%top},[<ptr=int32#3,: 128]
  461. # asm 2: vld1.8 {>5z12=d0->5z12=d1},[<ptr=r2,: 128]
  462. vld1.8 {d0-d1},[r2,: 128]
  463. # qhasm: r1[0,1] += x4[0] unsigned* 5z12[2]; r1[2,3] += x4[1] unsigned* 5z12[3]
  464. # asm 1: vmlal.u32 <r1=reg128#15,<x4=reg128#11%bot,<5z12=reg128#1%top
  465. # asm 2: vmlal.u32 <r1=q14,<x4=d20,<5z12=d1
  466. vmlal.u32 q14,d20,d1
  467. # qhasm: d23 = mem128[input_2];input_2+=16
  468. # asm 1: vld1.8 {>d23=reg128#2%bot->d23=reg128#2%top},[<input_2=int32#2]!
  469. # asm 2: vld1.8 {>d23=d2->d23=d3},[<input_2=r1]!
  470. vld1.8 {d2-d3},[r1]!
  471. # qhasm: input_2 += 32
  472. # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
  473. # asm 2: add >input_2=r1,<input_2=r1,#32
  474. add r1,r1,#32
  475. # qhasm: r0[0,1] += x4[0] unsigned* 5z12[0]; r0[2,3] += x4[1] unsigned* 5z12[1]
  476. # asm 1: vmlal.u32 <r0=reg128#8,<x4=reg128#11%bot,<5z12=reg128#1%bot
  477. # asm 2: vmlal.u32 <r0=q7,<x4=d20,<5z12=d0
  478. vmlal.u32 q7,d20,d0
  479. # qhasm: r0[0,1] += x23[0] unsigned* 5z34[0]; r0[2,3] += x23[1] unsigned* 5z34[1]
  480. # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%bot,<5z34=reg128#6%bot
  481. # asm 2: vmlal.u32 <r0=q7,<x23=d18,<5z34=d10
  482. vmlal.u32 q7,d18,d10
  483. # qhasm: d01 d23 = d01[0] d23[0] d01[1] d23[1]
  484. # asm 1: vswp <d23=reg128#2%bot,<d01=reg128#12%top
  485. # asm 2: vswp <d23=d2,<d01=d23
  486. vswp d2,d23
  487. # qhasm: r0[0,1] += x23[2] unsigned* 5z12[2]; r0[2,3] += x23[3] unsigned* 5z12[3]
  488. # asm 1: vmlal.u32 <r0=reg128#8,<x23=reg128#10%top,<5z12=reg128#1%top
  489. # asm 2: vmlal.u32 <r0=q7,<x23=d19,<5z12=d1
  490. vmlal.u32 q7,d19,d1
  491. # qhasm: r0[0,1] += x01[0] unsigned* z0[0]; r0[2,3] += x01[1] unsigned* z0[1]
  492. # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%bot,<z0=reg128#4%bot
  493. # asm 2: vmlal.u32 <r0=q7,<x01=d16,<z0=d6
  494. vmlal.u32 q7,d16,d6
  495. # qhasm: new mid
  496. # qhasm: 2x v4 = d23 unsigned>> 40
  497. # asm 1: vshr.u64 >v4=reg128#4,<d23=reg128#2,#40
  498. # asm 2: vshr.u64 >v4=q3,<d23=q1,#40
  499. vshr.u64 q3,q1,#40
  500. # qhasm: mid = d01[1]d23[0] mid[2,3]
  501. # asm 1: vext.32 <mid=reg128#1%bot,<d01=reg128#12%bot,<d23=reg128#2%bot,#1
  502. # asm 2: vext.32 <mid=d0,<d01=d22,<d23=d2,#1
  503. vext.32 d0,d22,d2,#1
  504. # qhasm: new v23
  505. # qhasm: v23[2] = d23[0,1] unsigned>> 14; v23[3] = d23[2,3] unsigned>> 14
  506. # asm 1: vshrn.u64 <v23=reg128#10%top,<d23=reg128#2,#14
  507. # asm 2: vshrn.u64 <v23=d19,<d23=q1,#14
  508. vshrn.u64 d19,q1,#14
  509. # qhasm: mid = mid[0,1] d01[3]d23[2]
  510. # asm 1: vext.32 <mid=reg128#1%top,<d01=reg128#12%top,<d23=reg128#2%top,#1
  511. # asm 2: vext.32 <mid=d1,<d01=d23,<d23=d3,#1
  512. vext.32 d1,d23,d3,#1
  513. # qhasm: new v01
  514. # qhasm: v01[2] = d01[0,1] unsigned>> 26; v01[3] = d01[2,3] unsigned>> 26
  515. # asm 1: vshrn.u64 <v01=reg128#11%top,<d01=reg128#12,#26
  516. # asm 2: vshrn.u64 <v01=d21,<d01=q11,#26
  517. vshrn.u64 d21,q11,#26
  518. # qhasm: v01 = d01[1]d01[0] v01[2,3]
  519. # asm 1: vext.32 <v01=reg128#11%bot,<d01=reg128#12%bot,<d01=reg128#12%bot,#1
  520. # asm 2: vext.32 <v01=d20,<d01=d22,<d01=d22,#1
  521. vext.32 d20,d22,d22,#1
  522. # qhasm: r0[0,1] += x01[2] unsigned* 5z34[2]; r0[2,3] += x01[3] unsigned* 5z34[3]
  523. # asm 1: vmlal.u32 <r0=reg128#8,<x01=reg128#9%top,<5z34=reg128#6%top
  524. # asm 2: vmlal.u32 <r0=q7,<x01=d17,<5z34=d11
  525. vmlal.u32 q7,d17,d11
  526. # qhasm: v01 = v01[1]d01[2] v01[2,3]
  527. # asm 1: vext.32 <v01=reg128#11%bot,<v01=reg128#11%bot,<d01=reg128#12%top,#1
  528. # asm 2: vext.32 <v01=d20,<v01=d20,<d01=d23,#1
  529. vext.32 d20,d20,d23,#1
  530. # qhasm: v23[0] = mid[0,1] unsigned>> 20; v23[1] = mid[2,3] unsigned>> 20
  531. # asm 1: vshrn.u64 <v23=reg128#10%bot,<mid=reg128#1,#20
  532. # asm 2: vshrn.u64 <v23=d18,<mid=q0,#20
  533. vshrn.u64 d18,q0,#20
  534. # qhasm: v4 = v4[0]v4[2]v4[1]v4[3]
  535. # asm 1: vtrn.32 <v4=reg128#4%bot,<v4=reg128#4%top
  536. # asm 2: vtrn.32 <v4=d6,<v4=d7
  537. vtrn.32 d6,d7
  538. # qhasm: 4x v01 &= 0x03ffffff
  539. # asm 1: vand.i32 <v01=reg128#11,#0x03ffffff
  540. # asm 2: vand.i32 <v01=q10,#0x03ffffff
  541. vand.i32 q10,#0x03ffffff
  542. # qhasm: ptr = &y34_stack
  543. # asm 1: lea >ptr=int32#3,<y34_stack=stack128#4
  544. # asm 2: lea >ptr=r2,<y34_stack=[sp,#48]
  545. add r2,sp,#48
  546. # qhasm: y34 aligned= mem128[ptr]
  547. # asm 1: vld1.8 {>y34=reg128#3%bot->y34=reg128#3%top},[<ptr=int32#3,: 128]
  548. # asm 2: vld1.8 {>y34=d4->y34=d5},[<ptr=r2,: 128]
  549. vld1.8 {d4-d5},[r2,: 128]
  550. # qhasm: 4x v23 &= 0x03ffffff
  551. # asm 1: vand.i32 <v23=reg128#10,#0x03ffffff
  552. # asm 2: vand.i32 <v23=q9,#0x03ffffff
  553. vand.i32 q9,#0x03ffffff
  554. # qhasm: ptr = &y12_stack
  555. # asm 1: lea >ptr=int32#3,<y12_stack=stack128#3
  556. # asm 2: lea >ptr=r2,<y12_stack=[sp,#32]
  557. add r2,sp,#32
  558. # qhasm: y12 aligned= mem128[ptr]
  559. # asm 1: vld1.8 {>y12=reg128#2%bot->y12=reg128#2%top},[<ptr=int32#3,: 128]
  560. # asm 2: vld1.8 {>y12=d2->y12=d3},[<ptr=r2,: 128]
  561. vld1.8 {d2-d3},[r2,: 128]
  562. # qhasm: 4x v4 |= 0x01000000
  563. # asm 1: vorr.i32 <v4=reg128#4,#0x01000000
  564. # asm 2: vorr.i32 <v4=q3,#0x01000000
  565. vorr.i32 q3,#0x01000000
  566. # qhasm: ptr = &y0_stack
  567. # asm 1: lea >ptr=int32#3,<y0_stack=stack128#2
  568. # asm 2: lea >ptr=r2,<y0_stack=[sp,#16]
  569. add r2,sp,#16
  570. # qhasm: y0 aligned= mem128[ptr]
  571. # asm 1: vld1.8 {>y0=reg128#1%bot->y0=reg128#1%top},[<ptr=int32#3,: 128]
  572. # asm 2: vld1.8 {>y0=d0->y0=d1},[<ptr=r2,: 128]
  573. vld1.8 {d0-d1},[r2,: 128]
  574. # qhasm: r4[0,1] += v01[0] unsigned* y34[2]; r4[2,3] += v01[1] unsigned* y34[3]
  575. # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%bot,<y34=reg128#3%top
  576. # asm 2: vmlal.u32 <r4=q15,<v01=d20,<y34=d5
  577. vmlal.u32 q15,d20,d5
  578. # qhasm: r4[0,1] += v01[2] unsigned* y34[0]; r4[2,3] += v01[3] unsigned* y34[1]
  579. # asm 1: vmlal.u32 <r4=reg128#16,<v01=reg128#11%top,<y34=reg128#3%bot
  580. # asm 2: vmlal.u32 <r4=q15,<v01=d21,<y34=d4
  581. vmlal.u32 q15,d21,d4
  582. # qhasm: r4[0,1] += v23[0] unsigned* y12[2]; r4[2,3] += v23[1] unsigned* y12[3]
  583. # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%bot,<y12=reg128#2%top
  584. # asm 2: vmlal.u32 <r4=q15,<v23=d18,<y12=d3
  585. vmlal.u32 q15,d18,d3
  586. # qhasm: r4[0,1] += v23[2] unsigned* y12[0]; r4[2,3] += v23[3] unsigned* y12[1]
  587. # asm 1: vmlal.u32 <r4=reg128#16,<v23=reg128#10%top,<y12=reg128#2%bot
  588. # asm 2: vmlal.u32 <r4=q15,<v23=d19,<y12=d2
  589. vmlal.u32 q15,d19,d2
  590. # qhasm: r4[0,1] += v4[0] unsigned* y0[0]; r4[2,3] += v4[1] unsigned* y0[1]
  591. # asm 1: vmlal.u32 <r4=reg128#16,<v4=reg128#4%bot,<y0=reg128#1%bot
  592. # asm 2: vmlal.u32 <r4=q15,<v4=d6,<y0=d0
  593. vmlal.u32 q15,d6,d0
  594. # qhasm: ptr = &5y34_stack
  595. # asm 1: lea >ptr=int32#3,<5y34_stack=stack128#6
  596. # asm 2: lea >ptr=r2,<5y34_stack=[sp,#80]
  597. add r2,sp,#80
  598. # qhasm: 5y34 aligned= mem128[ptr]
  599. # asm 1: vld1.8 {>5y34=reg128#13%bot->5y34=reg128#13%top},[<ptr=int32#3,: 128]
  600. # asm 2: vld1.8 {>5y34=d24->5y34=d25},[<ptr=r2,: 128]
  601. vld1.8 {d24-d25},[r2,: 128]
  602. # qhasm: r3[0,1] += v01[0] unsigned* y34[0]; r3[2,3] += v01[1] unsigned* y34[1]
  603. # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%bot,<y34=reg128#3%bot
  604. # asm 2: vmlal.u32 <r3=q4,<v01=d20,<y34=d4
  605. vmlal.u32 q4,d20,d4
  606. # qhasm: r3[0,1] += v01[2] unsigned* y12[2]; r3[2,3] += v01[3] unsigned* y12[3]
  607. # asm 1: vmlal.u32 <r3=reg128#5,<v01=reg128#11%top,<y12=reg128#2%top
  608. # asm 2: vmlal.u32 <r3=q4,<v01=d21,<y12=d3
  609. vmlal.u32 q4,d21,d3
  610. # qhasm: r3[0,1] += v23[0] unsigned* y12[0]; r3[2,3] += v23[1] unsigned* y12[1]
  611. # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%bot,<y12=reg128#2%bot
  612. # asm 2: vmlal.u32 <r3=q4,<v23=d18,<y12=d2
  613. vmlal.u32 q4,d18,d2
  614. # qhasm: r3[0,1] += v23[2] unsigned* y0[0]; r3[2,3] += v23[3] unsigned* y0[1]
  615. # asm 1: vmlal.u32 <r3=reg128#5,<v23=reg128#10%top,<y0=reg128#1%bot
  616. # asm 2: vmlal.u32 <r3=q4,<v23=d19,<y0=d0
  617. vmlal.u32 q4,d19,d0
  618. # qhasm: r3[0,1] += v4[0] unsigned* 5y34[2]; r3[2,3] += v4[1] unsigned* 5y34[3]
  619. # asm 1: vmlal.u32 <r3=reg128#5,<v4=reg128#4%bot,<5y34=reg128#13%top
  620. # asm 2: vmlal.u32 <r3=q4,<v4=d6,<5y34=d25
  621. vmlal.u32 q4,d6,d25
  622. # qhasm: ptr = &5y12_stack
  623. # asm 1: lea >ptr=int32#3,<5y12_stack=stack128#5
  624. # asm 2: lea >ptr=r2,<5y12_stack=[sp,#64]
  625. add r2,sp,#64
  626. # qhasm: 5y12 aligned= mem128[ptr]
  627. # asm 1: vld1.8 {>5y12=reg128#12%bot->5y12=reg128#12%top},[<ptr=int32#3,: 128]
  628. # asm 2: vld1.8 {>5y12=d22->5y12=d23},[<ptr=r2,: 128]
  629. vld1.8 {d22-d23},[r2,: 128]
  630. # qhasm: r0[0,1] += v4[0] unsigned* 5y12[0]; r0[2,3] += v4[1] unsigned* 5y12[1]
  631. # asm 1: vmlal.u32 <r0=reg128#8,<v4=reg128#4%bot,<5y12=reg128#12%bot
  632. # asm 2: vmlal.u32 <r0=q7,<v4=d6,<5y12=d22
  633. vmlal.u32 q7,d6,d22
  634. # qhasm: r0[0,1] += v23[0] unsigned* 5y34[0]; r0[2,3] += v23[1] unsigned* 5y34[1]
  635. # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%bot,<5y34=reg128#13%bot
  636. # asm 2: vmlal.u32 <r0=q7,<v23=d18,<5y34=d24
  637. vmlal.u32 q7,d18,d24
  638. # qhasm: r0[0,1] += v23[2] unsigned* 5y12[2]; r0[2,3] += v23[3] unsigned* 5y12[3]
  639. # asm 1: vmlal.u32 <r0=reg128#8,<v23=reg128#10%top,<5y12=reg128#12%top
  640. # asm 2: vmlal.u32 <r0=q7,<v23=d19,<5y12=d23
  641. vmlal.u32 q7,d19,d23
  642. # qhasm: r0[0,1] += v01[0] unsigned* y0[0]; r0[2,3] += v01[1] unsigned* y0[1]
  643. # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%bot,<y0=reg128#1%bot
  644. # asm 2: vmlal.u32 <r0=q7,<v01=d20,<y0=d0
  645. vmlal.u32 q7,d20,d0
  646. # qhasm: r0[0,1] += v01[2] unsigned* 5y34[2]; r0[2,3] += v01[3] unsigned* 5y34[3]
  647. # asm 1: vmlal.u32 <r0=reg128#8,<v01=reg128#11%top,<5y34=reg128#13%top
  648. # asm 2: vmlal.u32 <r0=q7,<v01=d21,<5y34=d25
  649. vmlal.u32 q7,d21,d25
  650. # qhasm: r1[0,1] += v01[0] unsigned* y12[0]; r1[2,3] += v01[1] unsigned* y12[1]
  651. # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%bot,<y12=reg128#2%bot
  652. # asm 2: vmlal.u32 <r1=q14,<v01=d20,<y12=d2
  653. vmlal.u32 q14,d20,d2
  654. # qhasm: r1[0,1] += v01[2] unsigned* y0[0]; r1[2,3] += v01[3] unsigned* y0[1]
  655. # asm 1: vmlal.u32 <r1=reg128#15,<v01=reg128#11%top,<y0=reg128#1%bot
  656. # asm 2: vmlal.u32 <r1=q14,<v01=d21,<y0=d0
  657. vmlal.u32 q14,d21,d0
  658. # qhasm: r1[0,1] += v23[0] unsigned* 5y34[2]; r1[2,3] += v23[1] unsigned* 5y34[3]
  659. # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%bot,<5y34=reg128#13%top
  660. # asm 2: vmlal.u32 <r1=q14,<v23=d18,<5y34=d25
  661. vmlal.u32 q14,d18,d25
  662. # qhasm: r1[0,1] += v23[2] unsigned* 5y34[0]; r1[2,3] += v23[3] unsigned* 5y34[1]
  663. # asm 1: vmlal.u32 <r1=reg128#15,<v23=reg128#10%top,<5y34=reg128#13%bot
  664. # asm 2: vmlal.u32 <r1=q14,<v23=d19,<5y34=d24
  665. vmlal.u32 q14,d19,d24
  666. # qhasm: r1[0,1] += v4[0] unsigned* 5y12[2]; r1[2,3] += v4[1] unsigned* 5y12[3]
  667. # asm 1: vmlal.u32 <r1=reg128#15,<v4=reg128#4%bot,<5y12=reg128#12%top
  668. # asm 2: vmlal.u32 <r1=q14,<v4=d6,<5y12=d23
  669. vmlal.u32 q14,d6,d23
  670. # qhasm: r2[0,1] += v01[0] unsigned* y12[2]; r2[2,3] += v01[1] unsigned* y12[3]
  671. # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%bot,<y12=reg128#2%top
  672. # asm 2: vmlal.u32 <r2=q13,<v01=d20,<y12=d3
  673. vmlal.u32 q13,d20,d3
  674. # qhasm: r2[0,1] += v01[2] unsigned* y12[0]; r2[2,3] += v01[3] unsigned* y12[1]
  675. # asm 1: vmlal.u32 <r2=reg128#14,<v01=reg128#11%top,<y12=reg128#2%bot
  676. # asm 2: vmlal.u32 <r2=q13,<v01=d21,<y12=d2
  677. vmlal.u32 q13,d21,d2
  678. # qhasm: r2[0,1] += v23[0] unsigned* y0[0]; r2[2,3] += v23[1] unsigned* y0[1]
  679. # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%bot,<y0=reg128#1%bot
  680. # asm 2: vmlal.u32 <r2=q13,<v23=d18,<y0=d0
  681. vmlal.u32 q13,d18,d0
  682. # qhasm: r2[0,1] += v23[2] unsigned* 5y34[2]; r2[2,3] += v23[3] unsigned* 5y34[3]
  683. # asm 1: vmlal.u32 <r2=reg128#14,<v23=reg128#10%top,<5y34=reg128#13%top
  684. # asm 2: vmlal.u32 <r2=q13,<v23=d19,<5y34=d25
  685. vmlal.u32 q13,d19,d25
  686. # qhasm: r2[0,1] += v4[0] unsigned* 5y34[0]; r2[2,3] += v4[1] unsigned* 5y34[1]
  687. # asm 1: vmlal.u32 <r2=reg128#14,<v4=reg128#4%bot,<5y34=reg128#13%bot
  688. # asm 2: vmlal.u32 <r2=q13,<v4=d6,<5y34=d24
  689. vmlal.u32 q13,d6,d24
  690. # qhasm: ptr = &two24
  691. # asm 1: lea >ptr=int32#3,<two24=stack128#1
  692. # asm 2: lea >ptr=r2,<two24=[sp,#0]
  693. add r2,sp,#0
  694. # qhasm: 2x t1 = r0 unsigned>> 26
  695. # asm 1: vshr.u64 >t1=reg128#4,<r0=reg128#8,#26
  696. # asm 2: vshr.u64 >t1=q3,<r0=q7,#26
  697. vshr.u64 q3,q7,#26
  698. # qhasm: len -= 64
  699. # asm 1: sub >len=int32#4,<len=int32#4,#64
  700. # asm 2: sub >len=r3,<len=r3,#64
  701. sub r3,r3,#64
  702. # qhasm: r0 &= mask
  703. # asm 1: vand >r0=reg128#6,<r0=reg128#8,<mask=reg128#7
  704. # asm 2: vand >r0=q5,<r0=q7,<mask=q6
  705. vand q5,q7,q6
  706. # qhasm: 2x r1 += t1
  707. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#15,<t1=reg128#4
  708. # asm 2: vadd.i64 >r1=q3,<r1=q14,<t1=q3
  709. vadd.i64 q3,q14,q3
  710. # qhasm: 2x t4 = r3 unsigned>> 26
  711. # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#5,#26
  712. # asm 2: vshr.u64 >t4=q7,<r3=q4,#26
  713. vshr.u64 q7,q4,#26
  714. # qhasm: r3 &= mask
  715. # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
  716. # asm 2: vand >r3=q4,<r3=q4,<mask=q6
  717. vand q4,q4,q6
  718. # qhasm: 2x x4 = r4 + t4
  719. # asm 1: vadd.i64 >x4=reg128#8,<r4=reg128#16,<t4=reg128#8
  720. # asm 2: vadd.i64 >x4=q7,<r4=q15,<t4=q7
  721. vadd.i64 q7,q15,q7
  722. # qhasm: r4 aligned= mem128[ptr]
  723. # asm 1: vld1.8 {>r4=reg128#16%bot->r4=reg128#16%top},[<ptr=int32#3,: 128]
  724. # asm 2: vld1.8 {>r4=d30->r4=d31},[<ptr=r2,: 128]
  725. vld1.8 {d30-d31},[r2,: 128]
  726. # qhasm: 2x t2 = r1 unsigned>> 26
  727. # asm 1: vshr.u64 >t2=reg128#9,<r1=reg128#4,#26
  728. # asm 2: vshr.u64 >t2=q8,<r1=q3,#26
  729. vshr.u64 q8,q3,#26
  730. # qhasm: r1 &= mask
  731. # asm 1: vand >r1=reg128#4,<r1=reg128#4,<mask=reg128#7
  732. # asm 2: vand >r1=q3,<r1=q3,<mask=q6
  733. vand q3,q3,q6
  734. # qhasm: 2x t0 = x4 unsigned>> 26
  735. # asm 1: vshr.u64 >t0=reg128#10,<x4=reg128#8,#26
  736. # asm 2: vshr.u64 >t0=q9,<x4=q7,#26
  737. vshr.u64 q9,q7,#26
  738. # qhasm: 2x r2 += t2
  739. # asm 1: vadd.i64 >r2=reg128#9,<r2=reg128#14,<t2=reg128#9
  740. # asm 2: vadd.i64 >r2=q8,<r2=q13,<t2=q8
  741. vadd.i64 q8,q13,q8
  742. # qhasm: x4 &= mask
  743. # asm 1: vand >x4=reg128#11,<x4=reg128#8,<mask=reg128#7
  744. # asm 2: vand >x4=q10,<x4=q7,<mask=q6
  745. vand q10,q7,q6
  746. # qhasm: 2x x01 = r0 + t0
  747. # asm 1: vadd.i64 >x01=reg128#6,<r0=reg128#6,<t0=reg128#10
  748. # asm 2: vadd.i64 >x01=q5,<r0=q5,<t0=q9
  749. vadd.i64 q5,q5,q9
  750. # qhasm: r0 aligned= mem128[ptr]
  751. # asm 1: vld1.8 {>r0=reg128#8%bot->r0=reg128#8%top},[<ptr=int32#3,: 128]
  752. # asm 2: vld1.8 {>r0=d14->r0=d15},[<ptr=r2,: 128]
  753. vld1.8 {d14-d15},[r2,: 128]
  754. # qhasm: ptr = &z34_stack
  755. # asm 1: lea >ptr=int32#3,<z34_stack=stack128#9
  756. # asm 2: lea >ptr=r2,<z34_stack=[sp,#128]
  757. add r2,sp,#128
  758. # qhasm: 2x t0 <<= 2
  759. # asm 1: vshl.i64 >t0=reg128#10,<t0=reg128#10,#2
  760. # asm 2: vshl.i64 >t0=q9,<t0=q9,#2
  761. vshl.i64 q9,q9,#2
  762. # qhasm: 2x t3 = r2 unsigned>> 26
  763. # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#9,#26
  764. # asm 2: vshr.u64 >t3=q13,<r2=q8,#26
  765. vshr.u64 q13,q8,#26
  766. # qhasm: 2x x01 += t0
  767. # asm 1: vadd.i64 >x01=reg128#15,<x01=reg128#6,<t0=reg128#10
  768. # asm 2: vadd.i64 >x01=q14,<x01=q5,<t0=q9
  769. vadd.i64 q14,q5,q9
  770. # qhasm: z34 aligned= mem128[ptr]
  771. # asm 1: vld1.8 {>z34=reg128#6%bot->z34=reg128#6%top},[<ptr=int32#3,: 128]
  772. # asm 2: vld1.8 {>z34=d10->z34=d11},[<ptr=r2,: 128]
  773. vld1.8 {d10-d11},[r2,: 128]
  774. # qhasm: x23 = r2 & mask
  775. # asm 1: vand >x23=reg128#10,<r2=reg128#9,<mask=reg128#7
  776. # asm 2: vand >x23=q9,<r2=q8,<mask=q6
  777. vand q9,q8,q6
  778. # qhasm: 2x r3 += t3
  779. # asm 1: vadd.i64 >r3=reg128#5,<r3=reg128#5,<t3=reg128#14
  780. # asm 2: vadd.i64 >r3=q4,<r3=q4,<t3=q13
  781. vadd.i64 q4,q4,q13
  782. # qhasm: input_2 += 32
  783. # asm 1: add >input_2=int32#2,<input_2=int32#2,#32
  784. # asm 2: add >input_2=r1,<input_2=r1,#32
  785. add r1,r1,#32
  786. # qhasm: 2x t1 = x01 unsigned>> 26
  787. # asm 1: vshr.u64 >t1=reg128#14,<x01=reg128#15,#26
  788. # asm 2: vshr.u64 >t1=q13,<x01=q14,#26
  789. vshr.u64 q13,q14,#26
  790. # qhasm: x23 = x23[0,2,1,3]
  791. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  792. # asm 2: vtrn.32 <x23=d18,<x23=d19
  793. vtrn.32 d18,d19
  794. # qhasm: x01 = x01 & mask
  795. # asm 1: vand >x01=reg128#9,<x01=reg128#15,<mask=reg128#7
  796. # asm 2: vand >x01=q8,<x01=q14,<mask=q6
  797. vand q8,q14,q6
  798. # qhasm: 2x r1 += t1
  799. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#4,<t1=reg128#14
  800. # asm 2: vadd.i64 >r1=q3,<r1=q3,<t1=q13
  801. vadd.i64 q3,q3,q13
  802. # qhasm: 2x t4 = r3 unsigned>> 26
  803. # asm 1: vshr.u64 >t4=reg128#14,<r3=reg128#5,#26
  804. # asm 2: vshr.u64 >t4=q13,<r3=q4,#26
  805. vshr.u64 q13,q4,#26
  806. # qhasm: x01 = x01[0,2,1,3]
  807. # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
  808. # asm 2: vtrn.32 <x01=d16,<x01=d17
  809. vtrn.32 d16,d17
  810. # qhasm: r3 &= mask
  811. # asm 1: vand >r3=reg128#5,<r3=reg128#5,<mask=reg128#7
  812. # asm 2: vand >r3=q4,<r3=q4,<mask=q6
  813. vand q4,q4,q6
  814. # qhasm: r1 = r1[0,2,1,3]
  815. # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
  816. # asm 2: vtrn.32 <r1=d6,<r1=d7
  817. vtrn.32 d6,d7
  818. # qhasm: 2x x4 += t4
  819. # asm 1: vadd.i64 >x4=reg128#11,<x4=reg128#11,<t4=reg128#14
  820. # asm 2: vadd.i64 >x4=q10,<x4=q10,<t4=q13
  821. vadd.i64 q10,q10,q13
  822. # qhasm: r3 = r3[0,2,1,3]
  823. # asm 1: vtrn.32 <r3=reg128#5%bot,<r3=reg128#5%top
  824. # asm 2: vtrn.32 <r3=d8,<r3=d9
  825. vtrn.32 d8,d9
  826. # qhasm: x01 = x01[0,1] r1[0,1]
  827. # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
  828. # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
  829. vext.32 d17,d6,d6,#0
  830. # qhasm: x23 = x23[0,1] r3[0,1]
  831. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#5%bot,<r3=reg128#5%bot,#0
  832. # asm 2: vext.32 <x23=d19,<r3=d8,<r3=d8,#0
  833. vext.32 d19,d8,d8,#0
  834. # qhasm: x4 = x4[0,2,1,3]
  835. # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
  836. # asm 2: vtrn.32 <x4=d20,<x4=d21
  837. vtrn.32 d20,d21
  838. # qhasm: unsigned>? len - 64
  839. # asm 1: cmp <len=int32#4,#64
  840. # asm 2: cmp <len=r3,#64
  841. cmp r3,#64
  842. # qhasm: goto mainloop2 if unsigned>
  843. bhi ._mainloop2
  844. # qhasm: input_2 -= 32
  845. # asm 1: sub >input_2=int32#3,<input_2=int32#2,#32
  846. # asm 2: sub >input_2=r2,<input_2=r1,#32
  847. sub r2,r1,#32
  848. # qhasm: below64bytes:
  849. ._below64bytes:
  850. # qhasm: unsigned>? len - 32
  851. # asm 1: cmp <len=int32#4,#32
  852. # asm 2: cmp <len=r3,#32
  853. cmp r3,#32
  854. # qhasm: goto end if !unsigned>
  855. bls ._end
  856. # qhasm: mainloop:
  857. ._mainloop:
  858. # qhasm: new r0
  859. # qhasm: ptr = &two24
  860. # asm 1: lea >ptr=int32#2,<two24=stack128#1
  861. # asm 2: lea >ptr=r1,<two24=[sp,#0]
  862. add r1,sp,#0
  863. # qhasm: r4 aligned= mem128[ptr]
  864. # asm 1: vld1.8 {>r4=reg128#5%bot->r4=reg128#5%top},[<ptr=int32#2,: 128]
  865. # asm 2: vld1.8 {>r4=d8->r4=d9},[<ptr=r1,: 128]
  866. vld1.8 {d8-d9},[r1,: 128]
  867. # qhasm: u4 aligned= mem128[ptr]
  868. # asm 1: vld1.8 {>u4=reg128#6%bot->u4=reg128#6%top},[<ptr=int32#2,: 128]
  869. # asm 2: vld1.8 {>u4=d10->u4=d11},[<ptr=r1,: 128]
  870. vld1.8 {d10-d11},[r1,: 128]
  871. # qhasm: c01 = mem128[input_2];input_2+=16
  872. # asm 1: vld1.8 {>c01=reg128#8%bot->c01=reg128#8%top},[<input_2=int32#3]!
  873. # asm 2: vld1.8 {>c01=d14->c01=d15},[<input_2=r2]!
  874. vld1.8 {d14-d15},[r2]!
  875. # qhasm: r4[0,1] += x01[0] unsigned* y34[2]; r4[2,3] += x01[1] unsigned* y34[3]
  876. # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%bot,<y34=reg128#3%top
  877. # asm 2: vmlal.u32 <r4=q4,<x01=d16,<y34=d5
  878. vmlal.u32 q4,d16,d5
  879. # qhasm: c23 = mem128[input_2];input_2+=16
  880. # asm 1: vld1.8 {>c23=reg128#14%bot->c23=reg128#14%top},[<input_2=int32#3]!
  881. # asm 2: vld1.8 {>c23=d26->c23=d27},[<input_2=r2]!
  882. vld1.8 {d26-d27},[r2]!
  883. # qhasm: r4[0,1] += x01[2] unsigned* y34[0]; r4[2,3] += x01[3] unsigned* y34[1]
  884. # asm 1: vmlal.u32 <r4=reg128#5,<x01=reg128#9%top,<y34=reg128#3%bot
  885. # asm 2: vmlal.u32 <r4=q4,<x01=d17,<y34=d4
  886. vmlal.u32 q4,d17,d4
  887. # qhasm: r0 = u4[1]c01[0]r0[2,3]
  888. # asm 1: vext.32 <r0=reg128#4%bot,<u4=reg128#6%bot,<c01=reg128#8%bot,#1
  889. # asm 2: vext.32 <r0=d6,<u4=d10,<c01=d14,#1
  890. vext.32 d6,d10,d14,#1
  891. # qhasm: r4[0,1] += x23[0] unsigned* y12[2]; r4[2,3] += x23[1] unsigned* y12[3]
  892. # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%bot,<y12=reg128#2%top
  893. # asm 2: vmlal.u32 <r4=q4,<x23=d18,<y12=d3
  894. vmlal.u32 q4,d18,d3
  895. # qhasm: r0 = r0[0,1]u4[1]c23[0]
  896. # asm 1: vext.32 <r0=reg128#4%top,<u4=reg128#6%bot,<c23=reg128#14%bot,#1
  897. # asm 2: vext.32 <r0=d7,<u4=d10,<c23=d26,#1
  898. vext.32 d7,d10,d26,#1
  899. # qhasm: r4[0,1] += x23[2] unsigned* y12[0]; r4[2,3] += x23[3] unsigned* y12[1]
  900. # asm 1: vmlal.u32 <r4=reg128#5,<x23=reg128#10%top,<y12=reg128#2%bot
  901. # asm 2: vmlal.u32 <r4=q4,<x23=d19,<y12=d2
  902. vmlal.u32 q4,d19,d2
  903. # qhasm: r0 = r0[1]r0[0]r0[3]r0[2]
  904. # asm 1: vrev64.i32 >r0=reg128#4,<r0=reg128#4
  905. # asm 2: vrev64.i32 >r0=q3,<r0=q3
  906. vrev64.i32 q3,q3
  907. # qhasm: r4[0,1] += x4[0] unsigned* y0[0]; r4[2,3] += x4[1] unsigned* y0[1]
  908. # asm 1: vmlal.u32 <r4=reg128#5,<x4=reg128#11%bot,<y0=reg128#1%bot
  909. # asm 2: vmlal.u32 <r4=q4,<x4=d20,<y0=d0
  910. vmlal.u32 q4,d20,d0
  911. # qhasm: r0[0,1] += x4[0] unsigned* 5y12[0]; r0[2,3] += x4[1] unsigned* 5y12[1]
  912. # asm 1: vmlal.u32 <r0=reg128#4,<x4=reg128#11%bot,<5y12=reg128#12%bot
  913. # asm 2: vmlal.u32 <r0=q3,<x4=d20,<5y12=d22
  914. vmlal.u32 q3,d20,d22
  915. # qhasm: r0[0,1] += x23[0] unsigned* 5y34[0]; r0[2,3] += x23[1] unsigned* 5y34[1]
  916. # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%bot,<5y34=reg128#13%bot
  917. # asm 2: vmlal.u32 <r0=q3,<x23=d18,<5y34=d24
  918. vmlal.u32 q3,d18,d24
  919. # qhasm: r0[0,1] += x23[2] unsigned* 5y12[2]; r0[2,3] += x23[3] unsigned* 5y12[3]
  920. # asm 1: vmlal.u32 <r0=reg128#4,<x23=reg128#10%top,<5y12=reg128#12%top
  921. # asm 2: vmlal.u32 <r0=q3,<x23=d19,<5y12=d23
  922. vmlal.u32 q3,d19,d23
  923. # qhasm: c01 c23 = c01[0]c23[0]c01[2]c23[2]c01[1]c23[1]c01[3]c23[3]
  924. # asm 1: vtrn.32 <c01=reg128#8,<c23=reg128#14
  925. # asm 2: vtrn.32 <c01=q7,<c23=q13
  926. vtrn.32 q7,q13
  927. # qhasm: r0[0,1] += x01[0] unsigned* y0[0]; r0[2,3] += x01[1] unsigned* y0[1]
  928. # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%bot,<y0=reg128#1%bot
  929. # asm 2: vmlal.u32 <r0=q3,<x01=d16,<y0=d0
  930. vmlal.u32 q3,d16,d0
  931. # qhasm: r3[0,1] = c23[2]<<18; r3[2,3] = c23[3]<<18
  932. # asm 1: vshll.u32 >r3=reg128#6,<c23=reg128#14%top,#18
  933. # asm 2: vshll.u32 >r3=q5,<c23=d27,#18
  934. vshll.u32 q5,d27,#18
  935. # qhasm: r0[0,1] += x01[2] unsigned* 5y34[2]; r0[2,3] += x01[3] unsigned* 5y34[3]
  936. # asm 1: vmlal.u32 <r0=reg128#4,<x01=reg128#9%top,<5y34=reg128#13%top
  937. # asm 2: vmlal.u32 <r0=q3,<x01=d17,<5y34=d25
  938. vmlal.u32 q3,d17,d25
  939. # qhasm: r3[0,1] += x01[0] unsigned* y34[0]; r3[2,3] += x01[1] unsigned* y34[1]
  940. # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%bot,<y34=reg128#3%bot
  941. # asm 2: vmlal.u32 <r3=q5,<x01=d16,<y34=d4
  942. vmlal.u32 q5,d16,d4
  943. # qhasm: r3[0,1] += x01[2] unsigned* y12[2]; r3[2,3] += x01[3] unsigned* y12[3]
  944. # asm 1: vmlal.u32 <r3=reg128#6,<x01=reg128#9%top,<y12=reg128#2%top
  945. # asm 2: vmlal.u32 <r3=q5,<x01=d17,<y12=d3
  946. vmlal.u32 q5,d17,d3
  947. # qhasm: r3[0,1] += x23[0] unsigned* y12[0]; r3[2,3] += x23[1] unsigned* y12[1]
  948. # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%bot,<y12=reg128#2%bot
  949. # asm 2: vmlal.u32 <r3=q5,<x23=d18,<y12=d2
  950. vmlal.u32 q5,d18,d2
  951. # qhasm: r3[0,1] += x23[2] unsigned* y0[0]; r3[2,3] += x23[3] unsigned* y0[1]
  952. # asm 1: vmlal.u32 <r3=reg128#6,<x23=reg128#10%top,<y0=reg128#1%bot
  953. # asm 2: vmlal.u32 <r3=q5,<x23=d19,<y0=d0
  954. vmlal.u32 q5,d19,d0
  955. # qhasm: r1[0,1] = c23[0]<<6; r1[2,3] = c23[1]<<6
  956. # asm 1: vshll.u32 >r1=reg128#14,<c23=reg128#14%bot,#6
  957. # asm 2: vshll.u32 >r1=q13,<c23=d26,#6
  958. vshll.u32 q13,d26,#6
  959. # qhasm: r3[0,1] += x4[0] unsigned* 5y34[2]; r3[2,3] += x4[1] unsigned* 5y34[3]
  960. # asm 1: vmlal.u32 <r3=reg128#6,<x4=reg128#11%bot,<5y34=reg128#13%top
  961. # asm 2: vmlal.u32 <r3=q5,<x4=d20,<5y34=d25
  962. vmlal.u32 q5,d20,d25
  963. # qhasm: r1[0,1] += x01[0] unsigned* y12[0]; r1[2,3] += x01[1] unsigned* y12[1]
  964. # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%bot,<y12=reg128#2%bot
  965. # asm 2: vmlal.u32 <r1=q13,<x01=d16,<y12=d2
  966. vmlal.u32 q13,d16,d2
  967. # qhasm: r1[0,1] += x01[2] unsigned* y0[0]; r1[2,3] += x01[3] unsigned* y0[1]
  968. # asm 1: vmlal.u32 <r1=reg128#14,<x01=reg128#9%top,<y0=reg128#1%bot
  969. # asm 2: vmlal.u32 <r1=q13,<x01=d17,<y0=d0
  970. vmlal.u32 q13,d17,d0
  971. # qhasm: r1[0,1] += x23[0] unsigned* 5y34[2]; r1[2,3] += x23[1] unsigned* 5y34[3]
  972. # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%bot,<5y34=reg128#13%top
  973. # asm 2: vmlal.u32 <r1=q13,<x23=d18,<5y34=d25
  974. vmlal.u32 q13,d18,d25
  975. # qhasm: r1[0,1] += x23[2] unsigned* 5y34[0]; r1[2,3] += x23[3] unsigned* 5y34[1]
  976. # asm 1: vmlal.u32 <r1=reg128#14,<x23=reg128#10%top,<5y34=reg128#13%bot
  977. # asm 2: vmlal.u32 <r1=q13,<x23=d19,<5y34=d24
  978. vmlal.u32 q13,d19,d24
  979. # qhasm: r2[0,1] = c01[2]<<12; r2[2,3] = c01[3]<<12
  980. # asm 1: vshll.u32 >r2=reg128#8,<c01=reg128#8%top,#12
  981. # asm 2: vshll.u32 >r2=q7,<c01=d15,#12
  982. vshll.u32 q7,d15,#12
  983. # qhasm: r1[0,1] += x4[0] unsigned* 5y12[2]; r1[2,3] += x4[1] unsigned* 5y12[3]
  984. # asm 1: vmlal.u32 <r1=reg128#14,<x4=reg128#11%bot,<5y12=reg128#12%top
  985. # asm 2: vmlal.u32 <r1=q13,<x4=d20,<5y12=d23
  986. vmlal.u32 q13,d20,d23
  987. # qhasm: r2[0,1] += x01[0] unsigned* y12[2]; r2[2,3] += x01[1] unsigned* y12[3]
  988. # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%bot,<y12=reg128#2%top
  989. # asm 2: vmlal.u32 <r2=q7,<x01=d16,<y12=d3
  990. vmlal.u32 q7,d16,d3
  991. # qhasm: r2[0,1] += x01[2] unsigned* y12[0]; r2[2,3] += x01[3] unsigned* y12[1]
  992. # asm 1: vmlal.u32 <r2=reg128#8,<x01=reg128#9%top,<y12=reg128#2%bot
  993. # asm 2: vmlal.u32 <r2=q7,<x01=d17,<y12=d2
  994. vmlal.u32 q7,d17,d2
  995. # qhasm: r2[0,1] += x23[0] unsigned* y0[0]; r2[2,3] += x23[1] unsigned* y0[1]
  996. # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%bot,<y0=reg128#1%bot
  997. # asm 2: vmlal.u32 <r2=q7,<x23=d18,<y0=d0
  998. vmlal.u32 q7,d18,d0
  999. # qhasm: r2[0,1] += x23[2] unsigned* 5y34[2]; r2[2,3] += x23[3] unsigned* 5y34[3]
  1000. # asm 1: vmlal.u32 <r2=reg128#8,<x23=reg128#10%top,<5y34=reg128#13%top
  1001. # asm 2: vmlal.u32 <r2=q7,<x23=d19,<5y34=d25
  1002. vmlal.u32 q7,d19,d25
  1003. # qhasm: r2[0,1] += x4[0] unsigned* 5y34[0]; r2[2,3] += x4[1] unsigned* 5y34[1]
  1004. # asm 1: vmlal.u32 <r2=reg128#8,<x4=reg128#11%bot,<5y34=reg128#13%bot
  1005. # asm 2: vmlal.u32 <r2=q7,<x4=d20,<5y34=d24
  1006. vmlal.u32 q7,d20,d24
  1007. # qhasm: 2x t1 = r0 unsigned>> 26
  1008. # asm 1: vshr.u64 >t1=reg128#9,<r0=reg128#4,#26
  1009. # asm 2: vshr.u64 >t1=q8,<r0=q3,#26
  1010. vshr.u64 q8,q3,#26
  1011. # qhasm: r0 &= mask
  1012. # asm 1: vand >r0=reg128#4,<r0=reg128#4,<mask=reg128#7
  1013. # asm 2: vand >r0=q3,<r0=q3,<mask=q6
  1014. vand q3,q3,q6
  1015. # qhasm: 2x r1 += t1
  1016. # asm 1: vadd.i64 >r1=reg128#9,<r1=reg128#14,<t1=reg128#9
  1017. # asm 2: vadd.i64 >r1=q8,<r1=q13,<t1=q8
  1018. vadd.i64 q8,q13,q8
  1019. # qhasm: 2x t4 = r3 unsigned>> 26
  1020. # asm 1: vshr.u64 >t4=reg128#10,<r3=reg128#6,#26
  1021. # asm 2: vshr.u64 >t4=q9,<r3=q5,#26
  1022. vshr.u64 q9,q5,#26
  1023. # qhasm: r3 &= mask
  1024. # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
  1025. # asm 2: vand >r3=q5,<r3=q5,<mask=q6
  1026. vand q5,q5,q6
  1027. # qhasm: 2x r4 += t4
  1028. # asm 1: vadd.i64 >r4=reg128#5,<r4=reg128#5,<t4=reg128#10
  1029. # asm 2: vadd.i64 >r4=q4,<r4=q4,<t4=q9
  1030. vadd.i64 q4,q4,q9
  1031. # qhasm: 2x t2 = r1 unsigned>> 26
  1032. # asm 1: vshr.u64 >t2=reg128#10,<r1=reg128#9,#26
  1033. # asm 2: vshr.u64 >t2=q9,<r1=q8,#26
  1034. vshr.u64 q9,q8,#26
  1035. # qhasm: r1 &= mask
  1036. # asm 1: vand >r1=reg128#11,<r1=reg128#9,<mask=reg128#7
  1037. # asm 2: vand >r1=q10,<r1=q8,<mask=q6
  1038. vand q10,q8,q6
  1039. # qhasm: 2x t0 = r4 unsigned>> 26
  1040. # asm 1: vshr.u64 >t0=reg128#9,<r4=reg128#5,#26
  1041. # asm 2: vshr.u64 >t0=q8,<r4=q4,#26
  1042. vshr.u64 q8,q4,#26
  1043. # qhasm: 2x r2 += t2
  1044. # asm 1: vadd.i64 >r2=reg128#8,<r2=reg128#8,<t2=reg128#10
  1045. # asm 2: vadd.i64 >r2=q7,<r2=q7,<t2=q9
  1046. vadd.i64 q7,q7,q9
  1047. # qhasm: r4 &= mask
  1048. # asm 1: vand >r4=reg128#5,<r4=reg128#5,<mask=reg128#7
  1049. # asm 2: vand >r4=q4,<r4=q4,<mask=q6
  1050. vand q4,q4,q6
  1051. # qhasm: 2x r0 += t0
  1052. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
  1053. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
  1054. vadd.i64 q3,q3,q8
  1055. # qhasm: 2x t0 <<= 2
  1056. # asm 1: vshl.i64 >t0=reg128#9,<t0=reg128#9,#2
  1057. # asm 2: vshl.i64 >t0=q8,<t0=q8,#2
  1058. vshl.i64 q8,q8,#2
  1059. # qhasm: 2x t3 = r2 unsigned>> 26
  1060. # asm 1: vshr.u64 >t3=reg128#14,<r2=reg128#8,#26
  1061. # asm 2: vshr.u64 >t3=q13,<r2=q7,#26
  1062. vshr.u64 q13,q7,#26
  1063. # qhasm: 2x r0 += t0
  1064. # asm 1: vadd.i64 >r0=reg128#4,<r0=reg128#4,<t0=reg128#9
  1065. # asm 2: vadd.i64 >r0=q3,<r0=q3,<t0=q8
  1066. vadd.i64 q3,q3,q8
  1067. # qhasm: x23 = r2 & mask
  1068. # asm 1: vand >x23=reg128#10,<r2=reg128#8,<mask=reg128#7
  1069. # asm 2: vand >x23=q9,<r2=q7,<mask=q6
  1070. vand q9,q7,q6
  1071. # qhasm: 2x r3 += t3
  1072. # asm 1: vadd.i64 >r3=reg128#6,<r3=reg128#6,<t3=reg128#14
  1073. # asm 2: vadd.i64 >r3=q5,<r3=q5,<t3=q13
  1074. vadd.i64 q5,q5,q13
  1075. # qhasm: 2x t1 = r0 unsigned>> 26
  1076. # asm 1: vshr.u64 >t1=reg128#8,<r0=reg128#4,#26
  1077. # asm 2: vshr.u64 >t1=q7,<r0=q3,#26
  1078. vshr.u64 q7,q3,#26
  1079. # qhasm: x01 = r0 & mask
  1080. # asm 1: vand >x01=reg128#9,<r0=reg128#4,<mask=reg128#7
  1081. # asm 2: vand >x01=q8,<r0=q3,<mask=q6
  1082. vand q8,q3,q6
  1083. # qhasm: 2x r1 += t1
  1084. # asm 1: vadd.i64 >r1=reg128#4,<r1=reg128#11,<t1=reg128#8
  1085. # asm 2: vadd.i64 >r1=q3,<r1=q10,<t1=q7
  1086. vadd.i64 q3,q10,q7
  1087. # qhasm: 2x t4 = r3 unsigned>> 26
  1088. # asm 1: vshr.u64 >t4=reg128#8,<r3=reg128#6,#26
  1089. # asm 2: vshr.u64 >t4=q7,<r3=q5,#26
  1090. vshr.u64 q7,q5,#26
  1091. # qhasm: r3 &= mask
  1092. # asm 1: vand >r3=reg128#6,<r3=reg128#6,<mask=reg128#7
  1093. # asm 2: vand >r3=q5,<r3=q5,<mask=q6
  1094. vand q5,q5,q6
  1095. # qhasm: 2x x4 = r4 + t4
  1096. # asm 1: vadd.i64 >x4=reg128#11,<r4=reg128#5,<t4=reg128#8
  1097. # asm 2: vadd.i64 >x4=q10,<r4=q4,<t4=q7
  1098. vadd.i64 q10,q4,q7
  1099. # qhasm: len -= 32
  1100. # asm 1: sub >len=int32#4,<len=int32#4,#32
  1101. # asm 2: sub >len=r3,<len=r3,#32
  1102. sub r3,r3,#32
  1103. # qhasm: x01 = x01[0,2,1,3]
  1104. # asm 1: vtrn.32 <x01=reg128#9%bot,<x01=reg128#9%top
  1105. # asm 2: vtrn.32 <x01=d16,<x01=d17
  1106. vtrn.32 d16,d17
  1107. # qhasm: x23 = x23[0,2,1,3]
  1108. # asm 1: vtrn.32 <x23=reg128#10%bot,<x23=reg128#10%top
  1109. # asm 2: vtrn.32 <x23=d18,<x23=d19
  1110. vtrn.32 d18,d19
  1111. # qhasm: r1 = r1[0,2,1,3]
  1112. # asm 1: vtrn.32 <r1=reg128#4%bot,<r1=reg128#4%top
  1113. # asm 2: vtrn.32 <r1=d6,<r1=d7
  1114. vtrn.32 d6,d7
  1115. # qhasm: r3 = r3[0,2,1,3]
  1116. # asm 1: vtrn.32 <r3=reg128#6%bot,<r3=reg128#6%top
  1117. # asm 2: vtrn.32 <r3=d10,<r3=d11
  1118. vtrn.32 d10,d11
  1119. # qhasm: x4 = x4[0,2,1,3]
  1120. # asm 1: vtrn.32 <x4=reg128#11%bot,<x4=reg128#11%top
  1121. # asm 2: vtrn.32 <x4=d20,<x4=d21
  1122. vtrn.32 d20,d21
  1123. # qhasm: x01 = x01[0,1] r1[0,1]
  1124. # asm 1: vext.32 <x01=reg128#9%top,<r1=reg128#4%bot,<r1=reg128#4%bot,#0
  1125. # asm 2: vext.32 <x01=d17,<r1=d6,<r1=d6,#0
  1126. vext.32 d17,d6,d6,#0
  1127. # qhasm: x23 = x23[0,1] r3[0,1]
  1128. # asm 1: vext.32 <x23=reg128#10%top,<r3=reg128#6%bot,<r3=reg128#6%bot,#0
  1129. # asm 2: vext.32 <x23=d19,<r3=d10,<r3=d10,#0
  1130. vext.32 d19,d10,d10,#0
  1131. # qhasm: unsigned>? len - 32
  1132. # asm 1: cmp <len=int32#4,#32
  1133. # asm 2: cmp <len=r3,#32
  1134. cmp r3,#32
  1135. # qhasm: goto mainloop if unsigned>
  1136. bhi ._mainloop
  1137. # qhasm: end:
  1138. ._end:
  1139. # qhasm: mem128[input_0] = x01;input_0+=16
  1140. # asm 1: vst1.8 {<x01=reg128#9%bot-<x01=reg128#9%top},[<input_0=int32#1]!
  1141. # asm 2: vst1.8 {<x01=d16-<x01=d17},[<input_0=r0]!
  1142. vst1.8 {d16-d17},[r0]!
  1143. # qhasm: mem128[input_0] = x23;input_0+=16
  1144. # asm 1: vst1.8 {<x23=reg128#10%bot-<x23=reg128#10%top},[<input_0=int32#1]!
  1145. # asm 2: vst1.8 {<x23=d18-<x23=d19},[<input_0=r0]!
  1146. vst1.8 {d18-d19},[r0]!
  1147. # qhasm: mem64[input_0] = x4[0]
  1148. # asm 1: vst1.8 <x4=reg128#11%bot,[<input_0=int32#1]
  1149. # asm 2: vst1.8 <x4=d20,[<input_0=r0]
  1150. vst1.8 d20,[r0]
  1151. # qhasm: len = len
  1152. # asm 1: mov >len=int32#1,<len=int32#4
  1153. # asm 2: mov >len=r0,<len=r3
  1154. mov r0,r3
  1155. # qhasm: qpopreturn len
  1156. mov sp,r12
  1157. vpop {q4,q5,q6,q7}
  1158. bx lr