|
@@ -0,0 +1,111 @@
|
|
|
+/*
|
|
|
+ * lock.c
|
|
|
+ *
|
|
|
+ * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
|
|
|
+ *
|
|
|
+ * This program is free software: you can redistribute it and/or modify
|
|
|
+ * it under the terms of the GNU Affero General Public License as
|
|
|
+ * published by the Free Software Foundation, either version 3 of the
|
|
|
+ * License, or (at your option) any later version.
|
|
|
+ *
|
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
+ * GNU Affero General Public License for more details.
|
|
|
+ *
|
|
|
+ * You should have received a copy of the GNU Affero General Public License
|
|
|
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
+ */
|
|
|
+
|
|
|
+#include <thread.h>
|
|
|
+
|
|
|
+#undef lock_t
|
|
|
+#undef resource_t
|
|
|
+#include <lock.h>
|
|
|
+
|
|
|
+#define NO_HOLDER ((uintptr_t)0)
|
|
|
+#define MULTIPLE_HOLDERS ((uintptr_t)-1)
|
|
|
+#define UNKNOWN_HOLDER ((uintptr_t)-2)
|
|
|
+#define TEMPORARY_HOLDER ((uintptr_t)-3)
|
|
|
+
|
|
|
+void enter_critical(critical_t *critical)
|
|
|
+{
|
|
|
+ *critical = 0;
|
|
|
+ if (disable_ints()) *critical |= (1 << 0);
|
|
|
+ if (scheduler_enabled) *critical |= (1 << 1);
|
|
|
+
|
|
|
+ disable_ints();
|
|
|
+ scheduler_enabled = FALSE;
|
|
|
+}
|
|
|
+
|
|
|
+void leave_critical(critical_t *critical)
|
|
|
+{
|
|
|
+ if (*critical & (1 << 1)) scheduler_enabled = TRUE;
|
|
|
+ if (*critical & (1 << 0)) enable_ints();
|
|
|
+}
|
|
|
+
|
|
|
+void lock_acquire(lock_t *lock)
|
|
|
+{
|
|
|
+ uintptr_t new_holder = scheduler_enabled ? (uintptr_t)get_current_thread() : UNKNOWN_HOLDER;
|
|
|
+
|
|
|
+ for (;;)
|
|
|
+ {
|
|
|
+ uintptr_t old_holder = NO_HOLDER;
|
|
|
+
|
|
|
+ if (__atomic_compare_exchange(&lock->holder, &old_holder, &new_holder, FALSE, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
|
|
|
+ {
|
|
|
+ int32_t new_count = __atomic_add_fetch(&lock->count, 1, __ATOMIC_ACQUIRE);
|
|
|
+ ASSERT(new_count == 1);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (scheduler_enabled) scheduler_wait(WAIT_UNTIL_EQUAL, NO_TIMEOUT, &lock->holder, NO_HOLDER);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static inline void lock_acquire_smart_by(lock_t *lock, uintptr_t new_holder)
|
|
|
+{
|
|
|
+ for (;;)
|
|
|
+ {
|
|
|
+ uintptr_t old_holder = NO_HOLDER;
|
|
|
+ if (__atomic_compare_exchange(&lock->holder, &old_holder, &new_holder, FALSE, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)
|
|
|
+ || old_holder == new_holder)
|
|
|
+ {
|
|
|
+ int32_t new_count = __atomic_add_fetch(&lock->count, 1, __ATOMIC_ACQUIRE);
|
|
|
+ ASSERT(new_count > 0);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (scheduler_enabled) scheduler_wait(WAIT_UNTIL_NOT_EQUAL, NO_TIMEOUT, &lock->holder, old_holder);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void lock_acquire_smart(lock_t *lock)
|
|
|
+{
|
|
|
+ lock_acquire_smart_by(lock, scheduler_enabled ? (uintptr_t)get_current_thread() : UNKNOWN_HOLDER);
|
|
|
+}
|
|
|
+
|
|
|
+void lock_acquire_shared(lock_t *lock)
|
|
|
+{
|
|
|
+ lock_acquire_smart_by(lock, MULTIPLE_HOLDERS);
|
|
|
+}
|
|
|
+
|
|
|
+void lock_release(lock_t *lock)
|
|
|
+{
|
|
|
+ uintptr_t holder;
|
|
|
+
|
|
|
+ for (;;)
|
|
|
+ {
|
|
|
+ holder = __atomic_exchange_n(&lock->holder, TEMPORARY_HOLDER, __ATOMIC_ACQUIRE);
|
|
|
+ if (holder != TEMPORARY_HOLDER) break;
|
|
|
+ }
|
|
|
+
|
|
|
+ ASSERT(holder != NO_HOLDER);
|
|
|
+
|
|
|
+ int32_t new_count = __atomic_sub_fetch(&lock->count, 1, __ATOMIC_RELEASE);
|
|
|
+ ASSERT(new_count >= 0);
|
|
|
+ if (new_count == 0) holder = NO_HOLDER;
|
|
|
+
|
|
|
+ __atomic_store_n(&lock->holder, holder, __ATOMIC_RELEASE);
|
|
|
+ if (scheduler_enabled && holder == NO_HOLDER) syscall_yield_quantum();
|
|
|
+}
|