2
0

syscall_arch.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. #define __SYSCALL_LL_E(x) \
  2. ((union { long long ll; long l[2]; }){ .ll = x }).l[0], \
  3. ((union { long long ll; long l[2]; }){ .ll = x }).l[1]
  4. #define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x))
  5. #ifdef __thumb__
  6. /* Avoid use of r7 in asm constraints when producing thumb code,
  7. * since it's reserved as frame pointer and might not be supported. */
  8. #define __ASM____R7__
  9. #define __asm_syscall(...) do { \
  10. __asm__ __volatile__ ( "mov %1,r7 ; mov r7,%2 ; svc 0 ; mov r7,%1" \
  11. : "=r"(r0), "=&r"((int){0}) : __VA_ARGS__ : "memory"); \
  12. return r0; \
  13. } while (0)
  14. #else
  15. #define __ASM____R7__ __asm__("r7")
  16. #define __asm_syscall(...) do { \
  17. __asm__ __volatile__ ( "svc 0" \
  18. : "=r"(r0) : __VA_ARGS__ : "memory"); \
  19. return r0; \
  20. } while (0)
  21. #endif
  22. /* For thumb2, we can allow 8-bit immediate syscall numbers, saving a
  23. * register in the above dance around r7. Does not work for thumb1 where
  24. * only movs, not mov, supports immediates, and we can't use movs because
  25. * it doesn't support high regs. */
  26. #ifdef __thumb2__
  27. #define R7_OPERAND "rI"(r7)
  28. #else
  29. #define R7_OPERAND "r"(r7)
  30. #endif
  31. static inline long __syscall0(long n)
  32. {
  33. register long r7 __ASM____R7__ = n;
  34. register long r0 __asm__("r0");
  35. __asm_syscall(R7_OPERAND);
  36. }
  37. static inline long __syscall1(long n, long a)
  38. {
  39. register long r7 __ASM____R7__ = n;
  40. register long r0 __asm__("r0") = a;
  41. __asm_syscall(R7_OPERAND, "0"(r0));
  42. }
  43. static inline long __syscall2(long n, long a, long b)
  44. {
  45. register long r7 __ASM____R7__ = n;
  46. register long r0 __asm__("r0") = a;
  47. register long r1 __asm__("r1") = b;
  48. __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1));
  49. }
  50. static inline long __syscall3(long n, long a, long b, long c)
  51. {
  52. register long r7 __ASM____R7__ = n;
  53. register long r0 __asm__("r0") = a;
  54. register long r1 __asm__("r1") = b;
  55. register long r2 __asm__("r2") = c;
  56. __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2));
  57. }
  58. static inline long __syscall4(long n, long a, long b, long c, long d)
  59. {
  60. register long r7 __ASM____R7__ = n;
  61. register long r0 __asm__("r0") = a;
  62. register long r1 __asm__("r1") = b;
  63. register long r2 __asm__("r2") = c;
  64. register long r3 __asm__("r3") = d;
  65. __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3));
  66. }
  67. static inline long __syscall5(long n, long a, long b, long c, long d, long e)
  68. {
  69. register long r7 __ASM____R7__ = n;
  70. register long r0 __asm__("r0") = a;
  71. register long r1 __asm__("r1") = b;
  72. register long r2 __asm__("r2") = c;
  73. register long r3 __asm__("r3") = d;
  74. register long r4 __asm__("r4") = e;
  75. __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
  76. }
  77. static inline long __syscall6(long n, long a, long b, long c, long d, long e, long f)
  78. {
  79. register long r7 __ASM____R7__ = n;
  80. register long r0 __asm__("r0") = a;
  81. register long r1 __asm__("r1") = b;
  82. register long r2 __asm__("r2") = c;
  83. register long r3 __asm__("r3") = d;
  84. register long r4 __asm__("r4") = e;
  85. register long r5 __asm__("r5") = f;
  86. __asm_syscall(R7_OPERAND, "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
  87. }
  88. #define SYSCALL_FADVISE_6_ARG
  89. #define SYSCALL_IPC_BROKEN_MODE
  90. #define VDSO_USEFUL
  91. #define VDSO_CGT32_SYM "__vdso_clock_gettime"
  92. #define VDSO_CGT32_VER "LINUX_2.6"
  93. #define VDSO_CGT_SYM "__vdso_clock_gettime64"
  94. #define VDSO_CGT_VER "LINUX_2.6"
  95. #define VDSO_CGT_WORKAROUND 1