spinlock.S 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <asm_macros.S>
  7. .globl spin_lock
  8. .globl spin_unlock
  9. .globl bit_lock
  10. .globl bit_unlock
  11. #if USE_SPINLOCK_CAS
  12. #if !ARM_ARCH_AT_LEAST(8, 1)
  13. #error USE_SPINLOCK_CAS option requires at least an ARMv8.1 platform
  14. #endif
  15. /*
  16. * When compiled for ARMv8.1 or later, choose spin locks based on Compare and
  17. * Swap instruction.
  18. */
  19. /*
  20. * Acquire lock using Compare and Swap instruction.
  21. *
  22. * Compare for 0 with acquire semantics, and swap 1. If failed to acquire, use
  23. * load exclusive semantics to monitor the address and enter WFE.
  24. *
  25. * void spin_lock(spinlock_t *lock);
  26. */
  27. func spin_lock
  28. mov w2, #1
  29. 1: mov w1, wzr
  30. 2: casa w1, w2, [x0]
  31. cbz w1, 3f
  32. ldxr w1, [x0]
  33. cbz w1, 2b
  34. wfe
  35. b 1b
  36. 3:
  37. ret
  38. endfunc spin_lock
  39. #else /* !USE_SPINLOCK_CAS */
  40. /*
  41. * Acquire lock using load-/store-exclusive instruction pair.
  42. *
  43. * void spin_lock(spinlock_t *lock);
  44. */
  45. func spin_lock
  46. mov w2, #1
  47. sevl
  48. l1: wfe
  49. l2: ldaxr w1, [x0]
  50. cbnz w1, l1
  51. stxr w1, w2, [x0]
  52. cbnz w1, l2
  53. ret
  54. endfunc spin_lock
  55. #endif /* USE_SPINLOCK_CAS */
  56. /*
  57. * Release lock previously acquired by spin_lock.
  58. *
  59. * Use store-release to unconditionally clear the spinlock variable.
  60. * Store operation generates an event to all cores waiting in WFE
  61. * when address is monitored by the global monitor.
  62. *
  63. * void spin_unlock(spinlock_t *lock);
  64. */
  65. func spin_unlock
  66. stlr wzr, [x0]
  67. ret
  68. endfunc spin_unlock
  69. /*
  70. * Atomic bit clear and set instructions require FEAT_LSE which is
  71. * mandatory from Armv8.1.
  72. */
  73. #if ARM_ARCH_AT_LEAST(8, 1)
  74. /*
  75. * Acquire bitlock using atomic bit set on byte. If the original read value
  76. * has the bit set, use load exclusive semantics to monitor the address and
  77. * enter WFE.
  78. *
  79. * void bit_lock(bitlock_t *lock, uint8_t mask);
  80. */
  81. func bit_lock
  82. 1: ldsetab w1, w2, [x0]
  83. tst w2, w1
  84. b.eq 2f
  85. ldxrb w2, [x0]
  86. tst w2, w1
  87. b.eq 1b
  88. wfe
  89. b 1b
  90. 2:
  91. ret
  92. endfunc bit_lock
  93. /*
  94. * Use atomic bit clear store-release to unconditionally clear bitlock variable.
  95. * Store operation generates an event to all cores waiting in WFE when address
  96. * is monitored by the global monitor.
  97. *
  98. * void bit_unlock(bitlock_t *lock, uint8_t mask);
  99. */
  100. func bit_unlock
  101. stclrlb w1, [x0]
  102. ret
  103. endfunc bit_unlock
  104. #endif /* ARM_ARCH_AT_LEAST(8, 1) */