lock.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /*
  2. * lock.c
  3. *
  4. * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
  5. *
  6. * This program is free software: you can redistribute it and/or modify
  7. * it under the terms of the GNU Affero General Public License as
  8. * published by the Free Software Foundation, either version 3 of the
  9. * License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Affero General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Affero General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <thread.h>
  20. #undef lock_t
  21. #undef resource_t
  22. #include <lock.h>
  23. #define NO_HOLDER ((uintptr_t)0)
  24. #define MULTIPLE_HOLDERS ((uintptr_t)-1)
  25. #define UNKNOWN_HOLDER ((uintptr_t)-2)
  26. #define TEMPORARY_HOLDER ((uintptr_t)-3)
  27. void enter_critical(critical_t *critical)
  28. {
  29. *critical = 0;
  30. if (disable_ints()) *critical |= (1 << 0);
  31. if (scheduler_enabled) *critical |= (1 << 1);
  32. disable_ints();
  33. scheduler_enabled = FALSE;
  34. }
  35. void leave_critical(critical_t *critical)
  36. {
  37. if (*critical & (1 << 1)) scheduler_enabled = TRUE;
  38. if (*critical & (1 << 0)) enable_ints();
  39. }
  40. void lock_acquire(lock_t *lock)
  41. {
  42. uintptr_t new_holder = scheduler_enabled ? (uintptr_t)get_current_thread() : UNKNOWN_HOLDER;
  43. for (;;)
  44. {
  45. uintptr_t old_holder = NO_HOLDER;
  46. if (__atomic_compare_exchange(&lock->holder, &old_holder, &new_holder, FALSE, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
  47. {
  48. int32_t new_count = __atomic_add_fetch(&lock->count, 1, __ATOMIC_ACQUIRE);
  49. ASSERT(new_count == 1);
  50. return;
  51. }
  52. if (scheduler_enabled) scheduler_wait(WAIT_UNTIL_EQUAL, NO_TIMEOUT, &lock->holder, NO_HOLDER);
  53. }
  54. }
  55. static inline void lock_acquire_smart_by(lock_t *lock, uintptr_t new_holder)
  56. {
  57. for (;;)
  58. {
  59. uintptr_t old_holder = NO_HOLDER;
  60. if (__atomic_compare_exchange(&lock->holder, &old_holder, &new_holder, FALSE, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
  61. || old_holder == new_holder)
  62. {
  63. int32_t new_count = __atomic_add_fetch(&lock->count, 1, __ATOMIC_ACQUIRE);
  64. ASSERT(new_count > 0);
  65. return;
  66. }
  67. if (scheduler_enabled) scheduler_wait(WAIT_UNTIL_NOT_EQUAL, NO_TIMEOUT, &lock->holder, old_holder);
  68. }
  69. }
  70. void lock_acquire_smart(lock_t *lock)
  71. {
  72. lock_acquire_smart_by(lock, scheduler_enabled ? (uintptr_t)get_current_thread() : UNKNOWN_HOLDER);
  73. }
  74. void lock_acquire_shared(lock_t *lock)
  75. {
  76. lock_acquire_smart_by(lock, MULTIPLE_HOLDERS);
  77. }
  78. void lock_release(lock_t *lock)
  79. {
  80. uintptr_t holder;
  81. for (;;)
  82. {
  83. holder = __atomic_exchange_n(&lock->holder, TEMPORARY_HOLDER, __ATOMIC_ACQUIRE);
  84. if (holder != TEMPORARY_HOLDER) break;
  85. }
  86. ASSERT(holder != NO_HOLDER);
  87. int32_t new_count = __atomic_sub_fetch(&lock->count, 1, __ATOMIC_RELEASE);
  88. ASSERT(new_count >= 0);
  89. if (new_count == 0) holder = NO_HOLDER;
  90. __atomic_store_n(&lock->holder, holder, __ATOMIC_RELEASE);
  91. if (scheduler_enabled && holder == NO_HOLDER) syscall_yield_quantum();
  92. }