thread.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. /*
  2. * thread.c
  3. *
  4. * Copyright (C) 2018 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
  5. *
  6. * This program is free software: you can redistribute it and/or modify
  7. * it under the terms of the GNU Affero General Public License as
  8. * published by the Free Software Foundation, either version 3 of the
  9. * License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Affero General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Affero General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <thread.h>
  20. #include <timer.h>
  21. #include <process.h>
  22. #include <exception.h>
  23. #include <syscalls.h>
  24. #include <segments.h>
  25. #include <heap.h>
  26. #include <cpu.h>
  27. extern void reschedule(void);
  28. bool_t scheduler_enabled = FALSE;
  29. static list_entry_t thread_queue[THREAD_PRIORITY_MAX];
  30. static thread_t *current_thread = NULL;
  31. static thread_t *last_fpu_thread = NULL;
  32. static dword_t tid_alloc_bitmap[MAX_THREADS / 32];
  33. static DECLARE_LOCK(tid_bitmap_lock);
  34. static dword_t alloc_tid()
  35. {
  36. int i;
  37. dword_t tid = (dword_t)-1;
  38. lock_acquire(&tid_bitmap_lock);
  39. for (i = 0; i < MAX_THREADS; i++)
  40. {
  41. if (!test_bit(tid_alloc_bitmap, i))
  42. {
  43. tid = i;
  44. set_bit(tid_alloc_bitmap, i);
  45. break;
  46. }
  47. }
  48. lock_release(&tid_bitmap_lock);
  49. return tid;
  50. }
  51. static inline bool_t test_condition(wait_condition_t *condition)
  52. {
  53. wait_condition_t **ptr;
  54. switch (condition->type)
  55. {
  56. case WAIT_GROUP_ANY:
  57. for (ptr = condition->conditions; *ptr; ptr++) if (test_condition(*ptr)) return TRUE;
  58. return FALSE;
  59. case WAIT_GROUP_ALL:
  60. for (ptr = condition->conditions; *ptr; ptr++) if (!test_condition(*ptr)) return FALSE;
  61. return TRUE;
  62. case WAIT_ALWAYS:
  63. return FALSE;
  64. case WAIT_UNTIL_EQUAL:
  65. return *condition->pointer == condition->value;
  66. case WAIT_UNTIL_NOT_EQUAL:
  67. return (*condition->pointer != condition->value);
  68. case WAIT_UNTIL_LESS:
  69. return (*condition->pointer < condition->value);
  70. case WAIT_UNTIL_NOT_LESS:
  71. return (*condition->pointer >= condition->value);
  72. case WAIT_UNTIL_GREATER:
  73. return (*condition->pointer > condition->value);
  74. case WAIT_UNTIL_NOT_GREATER:
  75. return (*condition->pointer <= condition->value);
  76. default:
  77. KERNEL_CRASH("Invalid wait condition value");
  78. return FALSE;
  79. }
  80. }
  81. static inline bool_t is_thread_ready(thread_t *thread)
  82. {
  83. qword_t current_time = timer_get_milliseconds();
  84. if (thread->terminated) return FALSE;
  85. if (thread->frozen > 0 && !thread->in_kernel) return FALSE;
  86. if (!thread->wait) return TRUE;
  87. if (test_condition(thread->wait->root))
  88. {
  89. thread->wait->result = WAIT_CONDITION_HIT;
  90. thread->wait = NULL;
  91. return TRUE;
  92. }
  93. if (thread->wait->timeout != NO_TIMEOUT && (current_time - thread->wait->timestamp) >= (qword_t)thread->wait->timeout)
  94. {
  95. thread->wait->result = WAIT_TIMED_OUT;
  96. thread->wait = NULL;
  97. return TRUE;
  98. }
  99. if (thread->terminating)
  100. {
  101. thread->wait->result = WAIT_CANCELED;
  102. thread->wait = NULL;
  103. return TRUE;
  104. }
  105. return FALSE;
  106. }
  107. static void destroy_thread(thread_t *thread)
  108. {
  109. list_remove(&thread->in_queue_list);
  110. lock_acquire(&thread->owner_process->thread_list_lock);
  111. list_remove(&thread->in_process_list);
  112. lock_release(&thread->owner_process->thread_list_lock);
  113. free(thread->kernel_stack);
  114. thread->kernel_stack = NULL;
  115. if (thread->owner_process->threads.next == &thread->owner_process->threads)
  116. {
  117. destroy_process(thread->owner_process);
  118. }
  119. dereference(&thread->header);
  120. }
  121. void thread_cleanup(object_t *obj)
  122. {
  123. if (CONTAINER_OF(obj, thread_t, header) == last_fpu_thread) last_fpu_thread = NULL;
  124. }
  125. dword_t thread_pre_wait(object_t *obj, void *parameter, wait_condition_t *condition)
  126. {
  127. thread_t *thread = (thread_t*)obj;
  128. condition->type = WAIT_UNTIL_NOT_EQUAL;
  129. condition->pointer = &thread->terminated;
  130. condition->value = FALSE;
  131. return ERR_SUCCESS;
  132. }
  133. dword_t create_thread_internal(process_t *proc, thread_state_t *initial_state, dword_t flags, priority_t priority, void *kernel_stack, thread_t **new_thread)
  134. {
  135. dword_t ret;
  136. if (proc->terminating) return ERR_CANCELED;
  137. thread_t *thread = (thread_t*)malloc(sizeof(thread_t));
  138. if (thread == NULL) return ERR_NOMEMORY;
  139. init_object(&thread->header, NULL, OBJECT_THREAD);
  140. ret = create_object(&thread->header);
  141. if (ret != ERR_SUCCESS)
  142. {
  143. free(thread);
  144. return ret;
  145. }
  146. thread->tid = alloc_tid();
  147. if (thread->tid == (dword_t)-1)
  148. {
  149. ret = ERR_NOMEMORY;
  150. goto cleanup;
  151. }
  152. thread->priority = priority;
  153. thread->quantum = QUANTUM;
  154. thread->frozen = (flags & THREAD_CREATE_FROZEN) ? TRUE : FALSE;
  155. thread->running_ticks = 0ULL;
  156. thread->owner_process = proc;
  157. thread->exit_code = 0;
  158. thread->terminating = FALSE;
  159. thread->terminated = FALSE;
  160. thread->last_context = NULL;
  161. thread->wait = NULL;
  162. memset(&thread->kernel_handler, 0, sizeof(thread->kernel_handler));
  163. memset(&thread->user_handler, 0, sizeof(thread->user_handler));
  164. thread->state = *initial_state;
  165. thread->state.regs.eflags = 0x202;
  166. if (proc != kernel_process)
  167. {
  168. thread->previous_mode = USER_MODE;
  169. thread->in_kernel = 0;
  170. thread->state.regs.cs = get_user_code_selector();
  171. thread->state.regs.data_selector = get_user_data_selector();
  172. }
  173. else
  174. {
  175. thread->previous_mode = KERNEL_MODE;
  176. thread->state.regs.cs = get_kernel_code_selector();
  177. thread->state.regs.data_selector = get_kernel_data_selector();
  178. }
  179. thread->kernel_stack = kernel_stack;
  180. thread->kernel_esp = ((uintptr_t)thread->kernel_stack + KERNEL_STACK_SIZE + 3) & ~3;
  181. lock_acquire(&thread->owner_process->thread_list_lock);
  182. list_append(&proc->threads, &thread->in_process_list);
  183. lock_release(&thread->owner_process->thread_list_lock);
  184. critical_t critical;
  185. enter_critical(&critical);
  186. list_append(&thread_queue[priority], &thread->in_queue_list);
  187. leave_critical(&critical);
  188. *new_thread = thread;
  189. ret = ERR_SUCCESS;
  190. cleanup:
  191. if (ret != ERR_SUCCESS)
  192. {
  193. if (thread->kernel_stack) free(thread->kernel_stack);
  194. if (thread != NULL) dereference(&thread->header);
  195. if (thread->tid != (dword_t)-1)
  196. {
  197. lock_acquire(&tid_bitmap_lock);
  198. clear_bit(tid_alloc_bitmap, thread->tid);
  199. lock_release(&tid_bitmap_lock);
  200. }
  201. }
  202. return ret;
  203. }
  204. thread_t *get_current_thread()
  205. {
  206. return current_thread;
  207. }
  208. void thread_lazy_fpu(void)
  209. {
  210. if (last_fpu_thread) cpu_save_fpu_state(last_fpu_thread->state.fpu_state);
  211. cpu_restore_fpu_state(current_thread->state.fpu_state);
  212. last_fpu_thread = current_thread;
  213. asm volatile ("clts");
  214. }
  215. #include <log.h>
  216. void scheduler(registers_t *regs)
  217. {
  218. int i;
  219. critical_t critical;
  220. enter_critical(&critical);
  221. if (current_thread->quantum == 0)
  222. {
  223. list_append(&thread_queue[current_thread->priority], &current_thread->in_queue_list);
  224. thread_t *next_thread = NULL;
  225. for (i = 0; i < THREAD_PRIORITY_MAX; i++)
  226. {
  227. list_entry_t *ptr;
  228. for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
  229. {
  230. thread_t *thread = CONTAINER_OF(ptr, thread_t, in_queue_list);
  231. if (is_thread_ready(thread))
  232. {
  233. next_thread = thread;
  234. goto found;
  235. }
  236. }
  237. }
  238. found:
  239. ASSERT(next_thread != NULL);
  240. list_remove(&next_thread->in_queue_list);
  241. if (current_thread->tid != 0) ASSERT(current_thread->kernel_esp >= (uintptr_t)current_thread->kernel_stack);
  242. if (next_thread->tid != 0) ASSERT(next_thread->kernel_esp >= (uintptr_t)next_thread->kernel_stack);
  243. if (current_thread != next_thread)
  244. {
  245. memcpy(&current_thread->state.regs, regs, sizeof(registers_t));
  246. current_thread->kernel_esp = regs->esp;
  247. if (SEGMENT_RPL(regs->cs) != 0) current_thread->state.regs.esp = ((registers_ext_t*)regs)->esp3;
  248. set_kernel_esp(next_thread->kernel_esp);
  249. /*asm volatile ("pushl %eax\n"
  250. "movl %cr4, %eax\n"
  251. "orb $0x08, %al\n"
  252. "movl %eax, %cr4\n"
  253. "popl %eax\n");*/
  254. if (SEGMENT_RPL(next_thread->state.regs.cs) != 0)
  255. {
  256. push_to_stack(&next_thread->kernel_esp, get_user_data_selector());
  257. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
  258. }
  259. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eflags);
  260. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.cs);
  261. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eip);
  262. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.error_code);
  263. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.eax);
  264. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ecx);
  265. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edx);
  266. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebx);
  267. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esp);
  268. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.ebp);
  269. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.esi);
  270. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.edi);
  271. push_to_stack(&next_thread->kernel_esp, next_thread->state.regs.data_selector);
  272. regs->esp = next_thread->kernel_esp;
  273. regs->error_code = CONTEXT_SWITCH_MAGIC;
  274. if (current_thread->owner_process != next_thread->owner_process)
  275. {
  276. set_page_directory(next_thread->owner_process->memory_space.page_directory);
  277. }
  278. }
  279. if (current_thread->owner_process != kernel_process)
  280. {
  281. bump_address_space(&current_thread->owner_process->memory_space);
  282. }
  283. if (current_thread->terminating && !current_thread->in_kernel) current_thread->terminated = TRUE;
  284. if (current_thread->terminated) destroy_thread(current_thread);
  285. current_thread = next_thread;
  286. current_thread->quantum = QUANTUM;
  287. }
  288. else
  289. {
  290. current_thread->quantum--;
  291. }
  292. leave_critical(&critical);
  293. }
  294. wait_result_t scheduler_wait(wait_condition_t *condition, dword_t timeout)
  295. {
  296. if (test_condition(condition)) return WAIT_CONDITION_HIT;
  297. if (timeout == 0) return WAIT_TIMED_OUT;
  298. wait_t wait = { .root = condition, .timeout = timeout, .timestamp = timer_get_milliseconds(), .result = WAIT_CANCELED };
  299. while (!__sync_bool_compare_and_swap(&current_thread->wait, NULL, &wait)) continue;
  300. syscall_yield_quantum();
  301. return wait.result;
  302. }
  303. sysret_t syscall_sleep(qword_t milliseconds)
  304. {
  305. wait_condition_t condition = { .type = WAIT_ALWAYS };
  306. return scheduler_wait(&condition, milliseconds) == WAIT_CANCELED ? ERR_CANCELED : ERR_SUCCESS;
  307. }
  308. sysret_t syscall_yield_quantum()
  309. {
  310. current_thread->quantum = 0;
  311. reschedule();
  312. return ERR_SUCCESS;
  313. }
  314. dword_t create_system_thread(thread_procedure_t routine, dword_t flags, priority_t priority, dword_t stack_size, void *param, thread_t **new_thread)
  315. {
  316. thread_state_t initial_state;
  317. memset(&initial_state, 0, sizeof(initial_state));
  318. if (!stack_size) stack_size = KERNEL_STACK_SIZE;
  319. void *kernel_stack = malloc(stack_size + sizeof(uintptr_t) - 1);
  320. if (kernel_stack == NULL) return ERR_NOMEMORY;
  321. dword_t ret = commit_pages(kernel_stack, stack_size);
  322. if (ret != ERR_SUCCESS)
  323. {
  324. free(kernel_stack);
  325. return ret;
  326. }
  327. initial_state.regs.eip = (dword_t)routine;
  328. initial_state.regs.esp = ((dword_t)kernel_stack + stack_size + 3) & ~3;
  329. push_to_stack((uintptr_t*)&initial_state.regs.esp, (uintptr_t)param);
  330. return create_thread_internal(kernel_process, &initial_state, flags, priority, kernel_stack, new_thread);
  331. }
  332. sysret_t syscall_create_thread(handle_t process, thread_state_t *initial_state, dword_t flags, priority_t priority, handle_t *new_thread)
  333. {
  334. dword_t ret;
  335. thread_state_t safe_state;
  336. process_t *proc;
  337. thread_t *thread;
  338. if (get_previous_mode() == USER_MODE)
  339. {
  340. if (!check_usermode(initial_state, sizeof(initial_state))) return ERR_BADPTR;
  341. if (!check_usermode(new_thread, sizeof(*new_thread))) return ERR_BADPTR;
  342. EH_TRY safe_state = *initial_state;
  343. EH_CATCH EH_ESCAPE(return ERR_BADPTR);
  344. EH_DONE;
  345. }
  346. else
  347. {
  348. safe_state = *initial_state;
  349. }
  350. if (process != INVALID_HANDLE)
  351. {
  352. if (!reference_by_handle(process, OBJECT_PROCESS, (object_t**)&proc)) return ERR_INVALID;
  353. }
  354. else
  355. {
  356. proc = get_current_process();
  357. reference(&proc->header);
  358. }
  359. if (get_previous_mode() == USER_MODE && proc == kernel_process)
  360. {
  361. ret = ERR_FORBIDDEN;
  362. goto cleanup;
  363. }
  364. void *kernel_stack = malloc(KERNEL_STACK_SIZE + sizeof(uintptr_t) - 1);
  365. if (kernel_stack == NULL)
  366. {
  367. ret = ERR_NOMEMORY;
  368. goto cleanup;
  369. }
  370. ret = commit_pages(kernel_stack, KERNEL_STACK_SIZE);
  371. if (ret != ERR_SUCCESS) goto cleanup;
  372. ret = create_thread_internal(proc, &safe_state, flags, priority, kernel_stack, &thread);
  373. if (ret != ERR_SUCCESS)
  374. {
  375. free(kernel_stack);
  376. goto cleanup;
  377. }
  378. handle_t thread_handle;
  379. ret = open_object(&thread->header, 0, &thread_handle);
  380. EH_TRY *new_thread = thread_handle;
  381. EH_CATCH syscall_close_object(thread_handle);
  382. EH_DONE;
  383. cleanup:
  384. dereference(&proc->header);
  385. return ret;
  386. }
  387. sysret_t syscall_open_thread(dword_t tid, handle_t *handle)
  388. {
  389. int i;
  390. thread_t *thread = NULL;
  391. dword_t ret = ERR_NOTFOUND;
  392. critical_t critical;
  393. enter_critical(&critical);
  394. if (current_thread->tid == tid)
  395. {
  396. thread = current_thread;
  397. }
  398. else
  399. {
  400. for (i = 0; i < THREAD_PRIORITY_MAX; i++)
  401. {
  402. list_entry_t *ptr = thread_queue[i].next;
  403. for (ptr = thread_queue[i].next; ptr != &thread_queue[i]; ptr = ptr->next)
  404. {
  405. thread_t *entry = CONTAINER_OF(ptr, thread_t, in_queue_list);
  406. if (entry->tid == tid)
  407. {
  408. thread = entry;
  409. goto found;
  410. }
  411. }
  412. }
  413. }
  414. found:
  415. if (thread != NULL) ret = open_object(&thread->header, 0, handle);
  416. else ret = ERR_NOTFOUND;
  417. leave_critical(&critical);
  418. return ret;
  419. }
  420. dword_t terminate_thread_internal(thread_t *thread, dword_t exit_code)
  421. {
  422. thread->exit_code = exit_code;
  423. thread->terminating = TRUE;
  424. return ERR_SUCCESS;
  425. }
  426. sysret_t syscall_terminate_thread(handle_t handle, dword_t exit_code)
  427. {
  428. thread_t *thread;
  429. if (handle == INVALID_HANDLE)
  430. {
  431. thread = get_current_thread();
  432. reference(&thread->header);
  433. }
  434. else
  435. {
  436. if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
  437. }
  438. return terminate_thread_internal(thread, exit_code);
  439. }
  440. sysret_t syscall_query_thread(handle_t handle, thread_info_t info_type, void *buffer, size_t size)
  441. {
  442. dword_t ret = ERR_SUCCESS;
  443. thread_t *thread;
  444. void *safe_buffer;
  445. if (get_previous_mode() == USER_MODE)
  446. {
  447. if (!check_usermode(buffer, size)) return ERR_BADPTR;
  448. safe_buffer = malloc(size);
  449. if (safe_buffer == NULL) return ERR_NOMEMORY;
  450. memset(safe_buffer, 0, size);
  451. }
  452. else
  453. {
  454. safe_buffer = buffer;
  455. }
  456. if (handle == INVALID_HANDLE)
  457. {
  458. thread = get_current_thread();
  459. reference(&thread->header);
  460. }
  461. else
  462. {
  463. if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
  464. }
  465. switch (info_type)
  466. {
  467. case THREAD_TID_INFO:
  468. if (size >= sizeof(dword_t)) *((dword_t*)safe_buffer) = thread->tid;
  469. else ret = ERR_SMALLBUF;
  470. break;
  471. case THREAD_FROZEN_INFO:
  472. if (size >= sizeof(int32_t)) *((int32_t*)safe_buffer) = thread->frozen;
  473. else ret = ERR_SMALLBUF;
  474. break;
  475. case THREAD_CPU_STATE_INFO:
  476. if (size >= sizeof(thread_state_t))
  477. {
  478. if (current_thread->tid != thread->tid)
  479. {
  480. *((thread_state_t*)safe_buffer) = thread->state;
  481. }
  482. else
  483. {
  484. ((thread_state_t*)safe_buffer)->regs = *thread->last_context;
  485. cpu_save_fpu_state(((thread_state_t*)safe_buffer)->fpu_state);
  486. }
  487. }
  488. else
  489. {
  490. ret = ERR_SMALLBUF;
  491. }
  492. break;
  493. case THREAD_PRIORITY_INFO:
  494. if (size >= sizeof(priority_t)) *((priority_t*)safe_buffer) = thread->priority;
  495. else ret = ERR_SMALLBUF;
  496. break;
  497. case THREAD_AFFINITY_INFO:
  498. if (size >= sizeof(affinity_t)) *((affinity_t*)safe_buffer) = thread->affinity;
  499. else ret = ERR_SMALLBUF;
  500. default:
  501. ret = ERR_INVALID;
  502. }
  503. if (get_previous_mode() == USER_MODE)
  504. {
  505. EH_TRY memcpy(buffer, safe_buffer, size);
  506. EH_CATCH ret = ERR_BADPTR;
  507. EH_DONE;
  508. free(safe_buffer);
  509. }
  510. dereference(&thread->header);
  511. return ret;
  512. }
  513. sysret_t syscall_set_thread(handle_t handle, thread_info_t info_type, const void *buffer, size_t size)
  514. {
  515. dword_t ret;
  516. thread_t *thread;
  517. void *safe_buffer;
  518. if (get_previous_mode() == USER_MODE)
  519. {
  520. if (!check_usermode(buffer, size)) return ERR_BADPTR;
  521. safe_buffer = malloc(size);
  522. if (safe_buffer == NULL) return ERR_NOMEMORY;
  523. EH_TRY memcpy(safe_buffer, buffer, size);
  524. EH_CATCH ret = ERR_BADPTR;
  525. EH_DONE;
  526. }
  527. else
  528. {
  529. safe_buffer = (void*)buffer;
  530. }
  531. if (handle == INVALID_HANDLE)
  532. {
  533. thread = get_current_thread();
  534. reference(&thread->header);
  535. }
  536. else
  537. {
  538. if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread))
  539. {
  540. if (get_previous_mode() == USER_MODE) free(safe_buffer);
  541. return ERR_INVALID;
  542. }
  543. }
  544. switch (info_type)
  545. {
  546. case THREAD_CPU_STATE_INFO:
  547. if (size >= sizeof(thread_state_t))
  548. {
  549. if (thread->owner_process->pid == kernel_process->pid) return ERR_FORBIDDEN;
  550. thread_state_t *new_state = (thread_state_t*)safe_buffer;
  551. critical_t critical;
  552. if (current_thread->tid != thread->tid) enter_critical(&critical);
  553. if (thread->in_kernel == 0)
  554. {
  555. thread->state.regs.eax = new_state->regs.eax;
  556. thread->state.regs.ecx = new_state->regs.ecx;
  557. thread->state.regs.edx = new_state->regs.edx;
  558. thread->state.regs.ebx = new_state->regs.ebx;
  559. thread->state.regs.esp = new_state->regs.esp;
  560. thread->state.regs.ebp = new_state->regs.ebp;
  561. thread->state.regs.esi = new_state->regs.esi;
  562. thread->state.regs.edi = new_state->regs.edi;
  563. thread->state.regs.eip = new_state->regs.eip;
  564. thread->state.regs.eflags = (thread->state.regs.eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
  565. }
  566. else if (thread->last_context)
  567. {
  568. thread->last_context->eax = new_state->regs.eax;
  569. thread->last_context->ecx = new_state->regs.ecx;
  570. thread->last_context->edx = new_state->regs.edx;
  571. thread->last_context->ebx = new_state->regs.ebx;
  572. thread->last_context->esp = new_state->regs.esp;
  573. thread->last_context->ebp = new_state->regs.ebp;
  574. thread->last_context->esi = new_state->regs.esi;
  575. thread->last_context->edi = new_state->regs.edi;
  576. thread->last_context->eip = new_state->regs.eip;
  577. thread->last_context->eflags = (thread->last_context->eflags & ~SAFE_EFLAGS_MASK) | (new_state->regs.eflags & SAFE_EFLAGS_MASK);
  578. }
  579. if (current_thread->tid != thread->tid)
  580. {
  581. memcpy(thread->state.fpu_state, new_state->fpu_state, sizeof(thread->state.fpu_state));
  582. }
  583. else
  584. {
  585. cpu_restore_fpu_state(new_state->fpu_state);
  586. }
  587. if (current_thread->tid != thread->tid) leave_critical(&critical);
  588. }
  589. else
  590. {
  591. ret = ERR_SMALLBUF;
  592. }
  593. break;
  594. case THREAD_PRIORITY_INFO:
  595. if (size >= sizeof(priority_t)) thread->priority = *((priority_t*)safe_buffer);
  596. else ret = ERR_SMALLBUF;
  597. case THREAD_AFFINITY_INFO:
  598. if (size >= sizeof(affinity_t)) thread->affinity = *((affinity_t*)safe_buffer);
  599. else ret = ERR_SMALLBUF;
  600. break;
  601. default:
  602. ret = ERR_INVALID;
  603. }
  604. if (get_previous_mode() == USER_MODE) free(safe_buffer);
  605. dereference(&thread->header);
  606. return ret;
  607. }
  608. sysret_t syscall_freeze_thread(handle_t handle)
  609. {
  610. dword_t ret = ERR_SUCCESS;
  611. thread_t *thread;
  612. if (handle == INVALID_HANDLE)
  613. {
  614. thread = get_current_thread();
  615. reference(&thread->header);
  616. }
  617. else
  618. {
  619. if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
  620. }
  621. thread->frozen++;
  622. dereference(&thread->header);
  623. return ret;
  624. }
  625. sysret_t syscall_thaw_thread(handle_t handle)
  626. {
  627. dword_t ret = ERR_SUCCESS;
  628. thread_t *thread;
  629. if (handle == INVALID_HANDLE)
  630. {
  631. thread = get_current_thread();
  632. reference(&thread->header);
  633. }
  634. else
  635. {
  636. if (!reference_by_handle(handle, OBJECT_THREAD, (object_t**)&thread)) return ERR_INVALID;
  637. }
  638. thread->frozen--;
  639. dereference(&thread->header);
  640. return ret;
  641. }
  642. void thread_init(void)
  643. {
  644. int i;
  645. critical_t critical;
  646. memset(tid_alloc_bitmap, 0, sizeof(tid_alloc_bitmap));
  647. set_bit(tid_alloc_bitmap, 0);
  648. thread_t *main_thread = (thread_t*)malloc(sizeof(thread_t));
  649. if (main_thread == NULL) KERNEL_CRASH("Cannot allocate thread object");
  650. init_object(&main_thread->header, NULL, OBJECT_THREAD);
  651. if (create_object(&main_thread->header) != ERR_SUCCESS)
  652. {
  653. KERNEL_CRASH("Cannot initialize thread object");
  654. }
  655. main_thread->tid = 0;
  656. main_thread->priority = THREAD_PRIORITY_MID;
  657. main_thread->kernel_stack = malloc(KERNEL_STACK_SIZE);
  658. ASSERT(main_thread->kernel_stack != NULL);
  659. commit_pages(main_thread->kernel_stack, KERNEL_STACK_SIZE);
  660. main_thread->kernel_esp = ((uintptr_t)main_thread->kernel_stack + KERNEL_STACK_SIZE) & ~3;
  661. set_kernel_esp(main_thread->kernel_esp);
  662. main_thread->exit_code = 0;
  663. main_thread->quantum = 0;
  664. main_thread->running_ticks = 0ULL;
  665. main_thread->owner_process = kernel_process;
  666. list_append(&kernel_process->threads, &main_thread->in_process_list);
  667. main_thread->in_kernel = 1;
  668. main_thread->last_context = NULL;
  669. main_thread->terminated = FALSE;
  670. main_thread->previous_mode = KERNEL_MODE;
  671. main_thread->wait = NULL;
  672. memset(&main_thread->kernel_handler, 0, sizeof(main_thread->kernel_handler));
  673. memset(&main_thread->user_handler, 0, sizeof(main_thread->user_handler));
  674. enter_critical(&critical);
  675. current_thread = main_thread;
  676. for (i = 0; i < THREAD_PRIORITY_MAX; i++) list_init(&thread_queue[i]);
  677. scheduler_enabled = TRUE;
  678. leave_critical(&critical);
  679. }