lock.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * lock.c
  3. *
  4. * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
  5. *
  6. * This program is free software: you can redistribute it and/or modify
  7. * it under the terms of the GNU Affero General Public License as
  8. * published by the Free Software Foundation, either version 3 of the
  9. * License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Affero General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Affero General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <thread.h>
  20. #include <lock.h>
  21. #include <cpu.h>
  22. #define NO_HOLDER ((uintptr_t)0)
  23. #define MULTIPLE_HOLDERS ((uintptr_t)-1)
  24. #define UNKNOWN_HOLDER ((uintptr_t)-2)
  25. #define TEMPORARY_HOLDER ((uintptr_t)-3)
  26. void enter_critical(critical_t *critical)
  27. {
  28. *critical = 0;
  29. if (cpu_disable_interrupts()) *critical |= (1 << 0);
  30. if (scheduler_enabled) *critical |= (1 << 1);
  31. scheduler_enabled = FALSE;
  32. }
  33. void leave_critical(critical_t *critical)
  34. {
  35. if (*critical & (1 << 1)) scheduler_enabled = TRUE;
  36. if (*critical & (1 << 0)) cpu_enable_interrupts();
  37. }
  38. void lock_acquire(lock_t *lock)
  39. {
  40. uintptr_t new_holder = scheduler_enabled ? (uintptr_t)get_current_thread() : UNKNOWN_HOLDER;
  41. wait_condition_t condition = { .type = WAIT_UNTIL_EQUAL, .pointer = &lock->holder, .value = 0 };
  42. for (;;)
  43. {
  44. uintptr_t old_holder = NO_HOLDER;
  45. if (__atomic_compare_exchange(&lock->holder, &old_holder, &new_holder, FALSE, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
  46. {
  47. int32_t new_count = __atomic_add_fetch(&lock->count, 1, __ATOMIC_ACQUIRE);
  48. ASSERT(new_count == 1);
  49. return;
  50. }
  51. if (scheduler_enabled) scheduler_wait(&condition, NO_TIMEOUT);
  52. }
  53. }
  54. static inline void lock_acquire_smart_by(lock_t *lock, uintptr_t new_holder)
  55. {
  56. wait_condition_t condition1 = { .type = WAIT_UNTIL_EQUAL, .pointer = &lock->holder, .value = 0 };
  57. wait_condition_t condition2 = { .type = WAIT_UNTIL_EQUAL, .pointer = &lock->holder, .value = new_holder };
  58. wait_condition_t *condition = __builtin_alloca(sizeof(wait_condition_t) + 3 * sizeof(wait_condition_t*));
  59. condition->type = WAIT_GROUP_ANY;
  60. condition->conditions[0] = &condition1;
  61. condition->conditions[1] = &condition2;
  62. condition->conditions[2] = NULL;
  63. for (;;)
  64. {
  65. uintptr_t old_holder = NO_HOLDER;
  66. if (__atomic_compare_exchange(&lock->holder, &old_holder, &new_holder, FALSE, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
  67. || old_holder == new_holder)
  68. {
  69. int32_t new_count = __atomic_add_fetch(&lock->count, 1, __ATOMIC_ACQUIRE);
  70. ASSERT(new_count > 0);
  71. return;
  72. }
  73. if (scheduler_enabled) scheduler_wait(condition, NO_TIMEOUT);
  74. }
  75. }
  76. void lock_acquire_smart(lock_t *lock)
  77. {
  78. lock_acquire_smart_by(lock, scheduler_enabled ? (uintptr_t)get_current_thread() : UNKNOWN_HOLDER);
  79. }
  80. void lock_acquire_shared(lock_t *lock)
  81. {
  82. lock_acquire_smart_by(lock, MULTIPLE_HOLDERS);
  83. }
  84. void lock_release(lock_t *lock)
  85. {
  86. uintptr_t holder;
  87. for (;;)
  88. {
  89. holder = __atomic_exchange_n(&lock->holder, TEMPORARY_HOLDER, __ATOMIC_ACQUIRE);
  90. if (holder != TEMPORARY_HOLDER) break;
  91. }
  92. ASSERT(holder != NO_HOLDER);
  93. int32_t new_count = __atomic_sub_fetch(&lock->count, 1, __ATOMIC_RELEASE);
  94. ASSERT(new_count >= 0);
  95. if (new_count == 0) holder = NO_HOLDER;
  96. __atomic_store_n(&lock->holder, holder, __ATOMIC_RELEASE);
  97. if (scheduler_enabled && holder == NO_HOLDER) syscall_yield_quantum();
  98. }