atomic.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. #ifndef _INTERNAL_ATOMIC_H
  2. #define _INTERNAL_ATOMIC_H
  3. #include <stdint.h>
  4. static inline int a_ctz_l(unsigned long x)
  5. {
  6. static const char debruijn32[32] = {
  7. 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  8. 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  9. };
  10. return debruijn32[(x&-x)*0x076be629 >> 27];
  11. }
  12. static inline int a_ctz_64(uint64_t x)
  13. {
  14. uint32_t y = x;
  15. if (!y) {
  16. y = x>>32;
  17. return 32 + a_ctz_l(y);
  18. }
  19. return a_ctz_l(y);
  20. }
  21. static inline int a_cas_1(volatile int *p, int t, int s)
  22. {
  23. register int tmp;
  24. do {
  25. __asm__ __volatile__ ("lwx %0, %1, r0"
  26. : "=r"(tmp) : "r"(p) : "memory");
  27. if (tmp != t) return tmp;
  28. __asm__ __volatile__ ("swx %2, %1, r0 ; addic %0, r0, 0"
  29. : "=r"(tmp) : "r"(p), "r"(s) : "cc", "memory");
  30. } while (tmp);
  31. return t;
  32. }
  33. static inline int a_cas(volatile int *p, int t, int s)
  34. {
  35. register int old, tmp;
  36. __asm__ __volatile__ (
  37. " addi %0, r0, 0\n"
  38. "1: lwx %0, %2, r0\n"
  39. " rsubk %1, %0, %3\n"
  40. " bnei %1, 1f\n"
  41. " swx %4, %2, r0\n"
  42. " addic %1, r0, 0\n"
  43. " bnei %1, 1b\n"
  44. "1: "
  45. : "=&r"(old), "=&r"(tmp)
  46. : "r"(p), "r"(t), "r"(s)
  47. : "cc", "memory" );
  48. return old;
  49. }
  50. static inline void *a_cas_p(volatile void *p, void *t, void *s)
  51. {
  52. return (void *)a_cas(p, (int)t, (int)s);
  53. }
  54. static inline long a_cas_l(volatile void *p, long t, long s)
  55. {
  56. return a_cas(p, t, s);
  57. }
  58. static inline int a_swap(volatile int *x, int v)
  59. {
  60. register int old, tmp;
  61. __asm__ __volatile__ (
  62. " addi %0, r0, 0\n"
  63. "1: lwx %0, %2, r0\n"
  64. " swx %3, %2, r0\n"
  65. " addic %1, r0, 0\n"
  66. " bnei %1, 1b\n"
  67. "1: "
  68. : "=&r"(old), "=&r"(tmp)
  69. : "r"(x), "r"(v)
  70. : "cc", "memory" );
  71. return old;
  72. }
  73. static inline int a_fetch_add(volatile int *x, int v)
  74. {
  75. register int new, tmp;
  76. __asm__ __volatile__ (
  77. " addi %0, r0, 0\n"
  78. "1: lwx %0, %2, r0\n"
  79. " addk %0, %0, %3\n"
  80. " swx %0, %2, r0\n"
  81. " addic %1, r0, 0\n"
  82. " bnei %1, 1b\n"
  83. "1: "
  84. : "=&r"(new), "=&r"(tmp)
  85. : "r"(x), "r"(v)
  86. : "cc", "memory" );
  87. return new-v;
  88. }
  89. static inline void a_inc(volatile int *x)
  90. {
  91. a_fetch_add(x, 1);
  92. }
  93. static inline void a_dec(volatile int *x)
  94. {
  95. a_fetch_add(x, -1);
  96. }
  97. static inline void a_store(volatile int *p, int x)
  98. {
  99. *p=x;
  100. }
  101. static inline void a_spin()
  102. {
  103. }
  104. static inline void a_crash()
  105. {
  106. *(volatile char *)0=0;
  107. }
  108. static inline void a_and(volatile int *p, int v)
  109. {
  110. int old;
  111. do old = *p;
  112. while (a_cas(p, old, old&v) != old);
  113. }
  114. static inline void a_or(volatile int *p, int v)
  115. {
  116. int old;
  117. do old = *p;
  118. while (a_cas(p, old, old|v) != old);
  119. }
  120. static inline void a_or_l(volatile void *p, long v)
  121. {
  122. a_or(p, v);
  123. }
  124. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  125. {
  126. union { uint64_t v; uint32_t r[2]; } u = { v };
  127. a_and((int *)p, u.r[0]);
  128. a_and((int *)p+1, u.r[1]);
  129. }
  130. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  131. {
  132. union { uint64_t v; uint32_t r[2]; } u = { v };
  133. a_or((int *)p, u.r[0]);
  134. a_or((int *)p+1, u.r[1]);
  135. }
  136. #endif