armv4cpuid_ios.S 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. #include "arm_arch.h"
  2. .text
  3. .code 32
  4. .align 5
  5. .globl _OPENSSL_atomic_add
  6. _OPENSSL_atomic_add:
  7. #if __ARM_ARCH__>=6
  8. Ladd: ldrex r2,[r0]
  9. add r3,r2,r1
  10. strex r2,r3,[r0]
  11. cmp r2,#0
  12. bne Ladd
  13. mov r0,r3
  14. bx lr
  15. #else
  16. stmdb sp!,{r4,r5,r6,lr}
  17. ldr r2,Lspinlock
  18. adr r3,Lspinlock
  19. mov r4,r0
  20. mov r5,r1
  21. add r6,r3,r2 @ &spinlock
  22. b .+8
  23. Lspin: bl sched_yield
  24. mov r0,#-1
  25. swp r0,r0,[r6]
  26. cmp r0,#0
  27. bne Lspin
  28. ldr r2,[r4]
  29. add r2,r2,r5
  30. str r2,[r4]
  31. str r0,[r6] @ release spinlock
  32. ldmia sp!,{r4,r5,r6,lr}
  33. tst lr,#1
  34. moveq pc,lr
  35. .word 0xe12fff1e @ bx lr
  36. #endif
  37. .globl _OPENSSL_cleanse
  38. _OPENSSL_cleanse:
  39. eor ip,ip,ip
  40. cmp r1,#7
  41. subhs r1,r1,#4
  42. bhs Lot
  43. cmp r1,#0
  44. beq Lcleanse_done
  45. Little:
  46. strb ip,[r0],#1
  47. subs r1,r1,#1
  48. bhi Little
  49. b Lcleanse_done
  50. Lot: tst r0,#3
  51. beq Laligned
  52. strb ip,[r0],#1
  53. sub r1,r1,#1
  54. b Lot
  55. Laligned:
  56. str ip,[r0],#4
  57. subs r1,r1,#4
  58. bhs Laligned
  59. adds r1,r1,#4
  60. bne Little
  61. Lcleanse_done:
  62. #if __ARM_ARCH__>=5
  63. bx lr
  64. #else
  65. tst lr,#1
  66. moveq pc,lr
  67. .word 0xe12fff1e @ bx lr
  68. #endif
  69. .align 5
  70. .globl __armv7_neon_probe
  71. __armv7_neon_probe:
  72. vorr q0,q0,q0
  73. bx lr
  74. .globl __armv7_tick
  75. __armv7_tick:
  76. #ifdef __APPLE__
  77. mrrc p15,0,r0,r1,c14 @ CNTPCT
  78. #else
  79. mrrc p15,1,r0,r1,c14 @ CNTVCT
  80. #endif
  81. bx lr
  82. .globl __armv8_aes_probe
  83. __armv8_aes_probe:
  84. .byte 0x00,0x03,0xb0,0xf3 @ aese.8 q0,q0
  85. bx lr
  86. .globl __armv8_sha1_probe
  87. __armv8_sha1_probe:
  88. .byte 0x40,0x0c,0x00,0xf2 @ sha1c.32 q0,q0,q0
  89. bx lr
  90. .globl __armv8_sha256_probe
  91. __armv8_sha256_probe:
  92. .byte 0x40,0x0c,0x00,0xf3 @ sha256h.32 q0,q0,q0
  93. bx lr
  94. .globl __armv8_pmull_probe
  95. __armv8_pmull_probe:
  96. .byte 0x00,0x0e,0xa0,0xf2 @ vmull.p64 q0,d0,d0
  97. bx lr
  98. .globl _OPENSSL_wipe_cpu
  99. _OPENSSL_wipe_cpu:
  100. ldr r0,LOPENSSL_armcap
  101. adr r1,LOPENSSL_armcap
  102. ldr r0,[r1,r0]
  103. #ifdef __APPLE__
  104. ldr r0,[r0]
  105. #endif
  106. eor r2,r2,r2
  107. eor r3,r3,r3
  108. eor ip,ip,ip
  109. tst r0,#1
  110. beq Lwipe_done
  111. veor q0, q0, q0
  112. veor q1, q1, q1
  113. veor q2, q2, q2
  114. veor q3, q3, q3
  115. veor q8, q8, q8
  116. veor q9, q9, q9
  117. veor q10, q10, q10
  118. veor q11, q11, q11
  119. veor q12, q12, q12
  120. veor q13, q13, q13
  121. veor q14, q14, q14
  122. veor q15, q15, q15
  123. Lwipe_done:
  124. mov r0,sp
  125. #if __ARM_ARCH__>=5
  126. bx lr
  127. #else
  128. tst lr,#1
  129. moveq pc,lr
  130. .word 0xe12fff1e @ bx lr
  131. #endif
  132. .globl _OPENSSL_instrument_bus
  133. _OPENSSL_instrument_bus:
  134. eor r0,r0,r0
  135. #if __ARM_ARCH__>=5
  136. bx lr
  137. #else
  138. tst lr,#1
  139. moveq pc,lr
  140. .word 0xe12fff1e @ bx lr
  141. #endif
  142. .globl _OPENSSL_instrument_bus2
  143. _OPENSSL_instrument_bus2:
  144. eor r0,r0,r0
  145. #if __ARM_ARCH__>=5
  146. bx lr
  147. #else
  148. tst lr,#1
  149. moveq pc,lr
  150. .word 0xe12fff1e @ bx lr
  151. #endif
  152. .align 5
  153. LOPENSSL_armcap:
  154. .word OPENSSL_armcap_P-.
  155. #if __ARM_ARCH__>=6
  156. .align 5
  157. #else
  158. Lspinlock:
  159. .word atomic_add_spinlock-Lspinlock
  160. .align 5
  161. .data
  162. .align 2
  163. atomic_add_spinlock:
  164. .word
  165. #endif
  166. .comm _OPENSSL_armcap_P,4
  167. .non_lazy_symbol_pointer
  168. OPENSSL_armcap_P:
  169. .indirect_symbol _OPENSSL_armcap_P
  170. .long 0
  171. .private_extern _OPENSSL_armcap_P