2
0

062-04-MIPS-Switch-to-the-irq_stack-in-interrupts.patch 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. From: Matt Redfearn <matt.redfearn@imgtec.com>
  2. Date: Mon, 19 Dec 2016 14:20:59 +0000
  3. Subject: [PATCH] MIPS: Switch to the irq_stack in interrupts
  4. When enterring interrupt context via handle_int or except_vec_vi, switch
  5. to the irq_stack of the current CPU if it is not already in use.
  6. The current stack pointer is masked with the thread size and compared to
  7. the base or the irq stack. If it does not match then the stack pointer
  8. is set to the top of that stack, otherwise this is a nested irq being
  9. handled on the irq stack so the stack pointer should be left as it was.
  10. The in-use stack pointer is placed in the callee saved register s1. It
  11. will be saved to the stack when plat_irq_dispatch is invoked and can be
  12. restored once control returns here.
  13. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
  14. ---
  15. --- a/arch/mips/kernel/genex.S
  16. +++ b/arch/mips/kernel/genex.S
  17. @@ -188,9 +188,44 @@ NESTED(handle_int, PT_SIZE, sp)
  18. LONG_L s0, TI_REGS($28)
  19. LONG_S sp, TI_REGS($28)
  20. - PTR_LA ra, ret_from_irq
  21. - PTR_LA v0, plat_irq_dispatch
  22. - jr v0
  23. +
  24. + /*
  25. + * SAVE_ALL ensures we are using a valid kernel stack for the thread.
  26. + * Check if we are already using the IRQ stack.
  27. + */
  28. + move s1, sp # Preserve the sp
  29. +
  30. + /* Get IRQ stack for this CPU */
  31. + ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  32. +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  33. + lui k1, %hi(irq_stack)
  34. +#else
  35. + lui k1, %highest(irq_stack)
  36. + daddiu k1, %higher(irq_stack)
  37. + dsll k1, 16
  38. + daddiu k1, %hi(irq_stack)
  39. + dsll k1, 16
  40. +#endif
  41. + LONG_SRL k0, SMP_CPUID_PTRSHIFT
  42. + LONG_ADDU k1, k0
  43. + LONG_L t0, %lo(irq_stack)(k1)
  44. +
  45. + # Check if already on IRQ stack
  46. + PTR_LI t1, ~(_THREAD_SIZE-1)
  47. + and t1, t1, sp
  48. + beq t0, t1, 2f
  49. +
  50. + /* Switch to IRQ stack */
  51. + li t1, _IRQ_STACK_SIZE
  52. + PTR_ADD sp, t0, t1
  53. +
  54. +2:
  55. + jal plat_irq_dispatch
  56. +
  57. + /* Restore sp */
  58. + move sp, s1
  59. +
  60. + j ret_from_irq
  61. #ifdef CONFIG_CPU_MICROMIPS
  62. nop
  63. #endif
  64. @@ -263,8 +298,44 @@ NESTED(except_vec_vi_handler, 0, sp)
  65. LONG_L s0, TI_REGS($28)
  66. LONG_S sp, TI_REGS($28)
  67. - PTR_LA ra, ret_from_irq
  68. - jr v0
  69. +
  70. + /*
  71. + * SAVE_ALL ensures we are using a valid kernel stack for the thread.
  72. + * Check if we are already using the IRQ stack.
  73. + */
  74. + move s1, sp # Preserve the sp
  75. +
  76. + /* Get IRQ stack for this CPU */
  77. + ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  78. +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  79. + lui k1, %hi(irq_stack)
  80. +#else
  81. + lui k1, %highest(irq_stack)
  82. + daddiu k1, %higher(irq_stack)
  83. + dsll k1, 16
  84. + daddiu k1, %hi(irq_stack)
  85. + dsll k1, 16
  86. +#endif
  87. + LONG_SRL k0, SMP_CPUID_PTRSHIFT
  88. + LONG_ADDU k1, k0
  89. + LONG_L t0, %lo(irq_stack)(k1)
  90. +
  91. + # Check if already on IRQ stack
  92. + PTR_LI t1, ~(_THREAD_SIZE-1)
  93. + and t1, t1, sp
  94. + beq t0, t1, 2f
  95. +
  96. + /* Switch to IRQ stack */
  97. + li t1, _IRQ_STACK_SIZE
  98. + PTR_ADD sp, t0, t1
  99. +
  100. +2:
  101. + jal plat_irq_dispatch
  102. +
  103. + /* Restore sp */
  104. + move sp, s1
  105. +
  106. + j ret_from_irq
  107. END(except_vec_vi_handler)
  108. /*