memmove.s 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. TEXT memmove(SB), $0
  2. JMP move
  3. TEXT memcpy(SB), $0
  4. move:
  5. MOVW R1, s1+0(FP)
  6. MOVW n+8(FP), R3 /* R3 is count */
  7. MOVW R1, R4 /* R4 is to-pointer */
  8. SGT R0, R3, R5
  9. BEQ R5, ok
  10. MOVW (R0), R0 /* abort if negative count */
  11. ok:
  12. MOVW s2+4(FP), R5 /* R5 is from-pointer */
  13. ADDU R3,R5, R7 /* R7 is end from-pointer */
  14. ADDU R3,R4, R6 /* R6 is end to-pointer */
  15. /*
  16. * easiest test is copy backwards if
  17. * destination string has higher mem address
  18. */
  19. SGT $4,R3, R2
  20. SGTU R4,R5, R1
  21. BNE R1, back
  22. /*
  23. * if not at least 4 chars,
  24. * don't even mess around.
  25. * 3 chars to guarantee any
  26. * rounding up to a word
  27. * boundary and 4 characters
  28. * to get at least maybe one
  29. * full word store.
  30. */
  31. BNE R2, fout
  32. /*
  33. * byte at a time to word align destination
  34. */
  35. f1:
  36. AND $3,R4, R1
  37. BEQ R1, f2
  38. MOVB 0(R5), R8
  39. ADDU $1, R5
  40. MOVB R8, 0(R4)
  41. ADDU $1, R4
  42. JMP f1
  43. /*
  44. * test if source is now word aligned
  45. */
  46. f2:
  47. AND $3, R5, R1
  48. BNE R1, fun2
  49. /*
  50. * turn R3 into to-end pointer-15
  51. * copy 16 at a time while theres room.
  52. * R6 is smaller than R7 --
  53. * there are problems if R7 is 0.
  54. */
  55. ADDU $-15,R6, R3
  56. f3:
  57. SGTU R3,R4, R1
  58. BEQ R1, f4
  59. MOVW 0(R5), R8
  60. MOVW 4(R5), R9
  61. MOVW R8, 0(R4)
  62. MOVW 8(R5), R8
  63. MOVW R9, 4(R4)
  64. MOVW 12(R5), R9
  65. ADDU $16, R5
  66. MOVW R8, 8(R4)
  67. MOVW R9, 12(R4)
  68. ADDU $16, R4
  69. JMP f3
  70. /*
  71. * turn R3 into to-end pointer-3
  72. * copy 4 at a time while theres room
  73. */
  74. f4:
  75. ADDU $-3,R6, R3
  76. f5:
  77. SGTU R3,R4, R1
  78. BEQ R1, fout
  79. MOVW 0(R5), R8
  80. ADDU $4, R5
  81. MOVW R8, 0(R4)
  82. ADDU $4, R4
  83. JMP f5
  84. /*
  85. * forward copy, unaligned
  86. * turn R3 into to-end pointer-15
  87. * copy 16 at a time while theres room.
  88. * R6 is smaller than R7 --
  89. * there are problems if R7 is 0.
  90. */
  91. fun2:
  92. ADDU $-15,R6, R3
  93. fun3:
  94. SGTU R3,R4, R1
  95. BEQ R1, fun4
  96. MOVWL 0(R5), R8
  97. MOVWR 3(R5), R8
  98. MOVWL 4(R5), R9
  99. MOVWR 7(R5), R9
  100. MOVW R8, 0(R4)
  101. MOVWL 8(R5), R8
  102. MOVWR 11(R5), R8
  103. MOVW R9, 4(R4)
  104. MOVWL 12(R5), R9
  105. MOVWR 15(R5), R9
  106. ADDU $16, R5
  107. MOVW R8, 8(R4)
  108. MOVW R9, 12(R4)
  109. ADDU $16, R4
  110. JMP fun3
  111. /*
  112. * turn R3 into to-end pointer-3
  113. * copy 4 at a time while theres room
  114. */
  115. fun4:
  116. ADDU $-3,R6, R3
  117. fun5:
  118. SGTU R3,R4, R1
  119. BEQ R1, fout
  120. MOVWL 0(R5), R8
  121. MOVWR 3(R5), R8
  122. ADDU $4, R5
  123. MOVW R8, 0(R4)
  124. ADDU $4, R4
  125. JMP fun5
  126. /*
  127. * last loop, copy byte at a time
  128. */
  129. fout:
  130. BEQ R7,R5, ret
  131. MOVB 0(R5), R8
  132. ADDU $1, R5
  133. MOVB R8, 0(R4)
  134. ADDU $1, R4
  135. JMP fout
  136. /*
  137. * whole thing repeated for backwards
  138. */
  139. back:
  140. BNE R2, bout
  141. b1:
  142. AND $3,R6, R1
  143. BEQ R1, b2
  144. MOVB -1(R7), R8
  145. ADDU $-1, R7
  146. MOVB R8, -1(R6)
  147. ADDU $-1, R6
  148. JMP b1
  149. b2:
  150. AND $3, R7, R1
  151. BNE R1, bun2
  152. ADDU $15,R5, R3
  153. b3:
  154. SGTU R7,R3, R1
  155. BEQ R1, b4
  156. MOVW -4(R7), R8
  157. MOVW -8(R7), R9
  158. MOVW R8, -4(R6)
  159. MOVW -12(R7), R8
  160. MOVW R9, -8(R6)
  161. MOVW -16(R7), R9
  162. ADDU $-16, R7
  163. MOVW R8, -12(R6)
  164. MOVW R9, -16(R6)
  165. ADDU $-16, R6
  166. JMP b3
  167. b4:
  168. ADDU $3,R5, R3
  169. b5:
  170. SGTU R7,R3, R1
  171. BEQ R1, bout
  172. MOVW -4(R7), R8
  173. ADDU $-4, R7
  174. MOVW R8, -4(R6)
  175. ADDU $-4, R6
  176. JMP b5
  177. bun2:
  178. ADDU $15,R5, R3
  179. bun3:
  180. SGTU R7,R3, R1
  181. BEQ R1, bun4
  182. MOVWL -4(R7), R8
  183. MOVWR -1(R7), R8
  184. MOVWL -8(R7), R9
  185. MOVWR -5(R7), R9
  186. MOVW R8, -4(R6)
  187. MOVWL -12(R7), R8
  188. MOVWR -9(R7), R8
  189. MOVW R9, -8(R6)
  190. MOVWL -16(R7), R9
  191. MOVWR -13(R7), R9
  192. ADDU $-16, R7
  193. MOVW R8, -12(R6)
  194. MOVW R9, -16(R6)
  195. ADDU $-16, R6
  196. JMP bun3
  197. bun4:
  198. ADDU $3,R5, R3
  199. bun5:
  200. SGTU R7,R3, R1
  201. BEQ R1, bout
  202. MOVWL -4(R7), R8
  203. MOVWR -1(R7), R8
  204. ADDU $-4, R7
  205. MOVW R8, -4(R6)
  206. ADDU $-4, R6
  207. JMP bun5
  208. bout:
  209. BEQ R7,R5, ret
  210. MOVB -1(R7), R8
  211. ADDU $-1, R7
  212. MOVB R8, -1(R6)
  213. ADDU $-1, R6
  214. JMP bout
  215. ret:
  216. MOVW s1+0(FP), R1
  217. RET
  218. END