l64vsyscall.S 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #include "mem.h"
  2. #include "amd64.h"
  3. #ifndef __ASSEMBLER__
  4. #define __ASSEMBLER__
  5. #endif
  6. .globl sce
  7. .globl scx
  8. /*
  9. * starting the user program up. First time.
  10. */
  11. .globl touser
  12. touser:
  13. CLI
  14. SWAPGS
  15. // we should be able to skip this step. We'll see.
  16. MOVQ $SSEL(SiUDS, SsRPL3), %rax
  17. MOVW %ax, %ds
  18. MOVW %ax, %es
  19. /*
  20. * FS is used for TLS. Don't touch!
  21. MOVW AX, FS
  22. */
  23. MOVW %ax, %gs
  24. // Hmm. Assuumes many things, eh? Assumes plan 9 a.out
  25. // format. This will fail.
  26. MOVQ $(UTZERO+0x20), %rcx /* ip */
  27. MOVQ $If, %R11 /* flags */
  28. MOVQ %RDI, %RSP /* sp */
  29. sysretq
  30. .globl syscallentry
  31. syscallentry:
  32. incq sce
  33. SWAPGS
  34. movq %r15, %gs:32 /* stash r15 to m->rathole */
  35. movq %gs:16, %r15 /* m->proc */
  36. movq (16*8)(%r15), %r15 /* m->proc->kstack */
  37. xchgq %r15, %rsp
  38. addq $KSTACK, %rsp
  39. // start building ureg
  40. PUSHQ $SSEL(SiUDS, SsRPL3) /* ureg.ss old stack segment */
  41. PUSHQ %r15 /* ureg.sp old sp */
  42. PUSHQ %r11 /* ureg.flags old flags */
  43. PUSHQ $SSEL(SiUCS, SsRPL3) /* ureg.cs old code segment */
  44. PUSHQ %rCX /* ureg.ip old ip */
  45. movq %gs:32, %r15 /* restore r15 from m->rathole */
  46. SUBQ $(17*8), %rsp
  47. MOVQ %rAX, (0*8)(%rsp) // ureg.ax
  48. MOVQ %rBX, (1*8)(%rsp) // ureg.bx
  49. MOVQ %rCX, (2*8)(%rsp) // ureg.cx
  50. MOVQ %rDX, (3*8)(%rsp) // ureg.dx
  51. MOVQ %rSI, (4*8)(%rsp) // ...
  52. MOVQ %rDI, (5*8)(%rsp)
  53. MOVQ %rBP, (6*8)(%rsp)
  54. MOVQ %r8, (7*8)(%rsp)
  55. MOVQ %r9, (8*8)(%rsp)
  56. MOVQ %r10, (9*8)(%rsp)
  57. MOVQ %r11, (10*8)(%rsp)
  58. MOVQ %r12, (11*8)(%rsp)
  59. MOVQ %r13, (12*8)(%rsp)
  60. MOVQ %r14, (13*8)(%rsp)
  61. MOVQ %r15, (14*8)(%rsp)
  62. // (15*8)(%rsp) // ureg.type
  63. // (16*8)(%rsp) // ureg.error
  64. MOVQ %rsp, %rsi /* Ureg* */
  65. // system call number is in %rax, as per linux.
  66. movq %rax, %rdi
  67. xorq %rax, %rax
  68. pushq %rax
  69. popfq /* clear all flags. is there something else we should clear too? */
  70. movq $0, %rbp /* stack traces end here */
  71. CALL syscall
  72. .globl syscallreturn
  73. syscallreturn:
  74. // restore from ureg
  75. MOVQ (0*8)(%rsp),%rAX
  76. MOVQ (1*8)(%rsp),%rBX
  77. MOVQ (2*8)(%rsp),%rCX
  78. MOVQ (3*8)(%rsp),%rDX
  79. MOVQ (4*8)(%rsp),%rSI
  80. MOVQ (5*8)(%rsp),%rDI
  81. MOVQ (6*8)(%rsp),%rBP
  82. MOVQ (7*8)(%rsp),%r8
  83. MOVQ (8*8)(%rsp),%r9
  84. MOVQ (9*8)(%rsp),%r10
  85. MOVQ (10*8)(%rsp),%r11
  86. MOVQ (11*8)(%rsp),%r12
  87. MOVQ (12*8)(%rsp),%r13
  88. MOVQ (13*8)(%rsp),%r14
  89. MOVQ (14*8)(%rsp),%r15
  90. ADDQ $(17*8), %rsp /* registers + arguments */
  91. CLI
  92. SWAPGS
  93. MOVQ 0(%rsp), %rCX /* ip */
  94. MOVQ 16(%rsp), %r11 /* flags */
  95. MOVQ 24(%rsp), %rSP /* sp */
  96. incq scx
  97. sysretq
  98. .globl sysrforkret
  99. sysrforkret:
  100. MOVQ $0, 0(%rsp)
  101. JMP syscallreturn