1
0

700-RISCV-Inline-subword-atomic-ops.patch 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021
  1. From f797260adaf52bee0ec0e16190bbefbe1bfc3692 Mon Sep 17 00:00:00 2001
  2. From: Patrick O'Neill <patrick@rivosinc.com>
  3. Date: Tue, 18 Apr 2023 14:33:13 -0700
  4. Subject: [PATCH] RISCV: Inline subword atomic ops
  5. RISC-V has no support for subword atomic operations; code currently
  6. generates libatomic library calls.
  7. This patch changes the default behavior to inline subword atomic calls
  8. (using the same logic as the existing library call).
  9. Behavior can be specified using the -minline-atomics and
  10. -mno-inline-atomics command line flags.
  11. gcc/libgcc/config/riscv/atomic.c has the same logic implemented in asm.
  12. This will need to stay for backwards compatibility and the
  13. -mno-inline-atomics flag.
  14. 2023-04-18 Patrick O'Neill <patrick@rivosinc.com>
  15. gcc/ChangeLog:
  16. PR target/104338
  17. * config/riscv/riscv-protos.h: Add helper function stubs.
  18. * config/riscv/riscv.cc: Add helper functions for subword masking.
  19. * config/riscv/riscv.opt: Add command-line flag.
  20. * config/riscv/sync.md: Add masking logic and inline asm for fetch_and_op,
  21. fetch_and_nand, CAS, and exchange ops.
  22. * doc/invoke.texi: Add blurb regarding command-line flag.
  23. libgcc/ChangeLog:
  24. PR target/104338
  25. * config/riscv/atomic.c: Add reference to duplicate logic.
  26. gcc/testsuite/ChangeLog:
  27. PR target/104338
  28. * gcc.target/riscv/inline-atomics-1.c: New test.
  29. * gcc.target/riscv/inline-atomics-2.c: New test.
  30. * gcc.target/riscv/inline-atomics-3.c: New test.
  31. * gcc.target/riscv/inline-atomics-4.c: New test.
  32. * gcc.target/riscv/inline-atomics-5.c: New test.
  33. * gcc.target/riscv/inline-atomics-6.c: New test.
  34. * gcc.target/riscv/inline-atomics-7.c: New test.
  35. * gcc.target/riscv/inline-atomics-8.c: New test.
  36. Signed-off-by: Patrick O'Neill <patrick@rivosinc.com>
  37. Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
  38. ---
  39. gcc/config/riscv/riscv-protos.h | 2 +
  40. gcc/config/riscv/riscv.cc | 49 ++
  41. gcc/config/riscv/riscv.opt | 4 +
  42. gcc/config/riscv/sync.md | 301 +++++++++
  43. gcc/doc/invoke.texi | 10 +-
  44. .../gcc.target/riscv/inline-atomics-1.c | 18 +
  45. .../gcc.target/riscv/inline-atomics-2.c | 9 +
  46. .../gcc.target/riscv/inline-atomics-3.c | 569 ++++++++++++++++++
  47. .../gcc.target/riscv/inline-atomics-4.c | 566 +++++++++++++++++
  48. .../gcc.target/riscv/inline-atomics-5.c | 87 +++
  49. .../gcc.target/riscv/inline-atomics-6.c | 87 +++
  50. .../gcc.target/riscv/inline-atomics-7.c | 69 +++
  51. .../gcc.target/riscv/inline-atomics-8.c | 69 +++
  52. libgcc/config/riscv/atomic.c | 2 +
  53. 14 files changed, 1841 insertions(+), 1 deletion(-)
  54. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-1.c
  55. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-2.c
  56. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-3.c
  57. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-4.c
  58. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-5.c
  59. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-6.c
  60. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-7.c
  61. create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-8.c
  62. --- a/gcc/config/riscv/riscv-protos.h
  63. +++ b/gcc/config/riscv/riscv-protos.h
  64. @@ -74,6 +74,8 @@ extern bool riscv_expand_block_move (rtx
  65. extern bool riscv_store_data_bypass_p (rtx_insn *, rtx_insn *);
  66. extern rtx riscv_gen_gpr_save_insn (struct riscv_frame_info *);
  67. extern bool riscv_gpr_save_operation_p (rtx);
  68. +extern void riscv_subword_address (rtx, rtx *, rtx *, rtx *, rtx *);
  69. +extern void riscv_lshift_subword (machine_mode, rtx, rtx, rtx *);
  70. /* Routines implemented in riscv-c.c. */
  71. void riscv_cpu_cpp_builtins (cpp_reader *);
  72. --- a/gcc/config/riscv/riscv.c
  73. +++ b/gcc/config/riscv/riscv.c
  74. @@ -5351,6 +5351,55 @@ riscv_asan_shadow_offset (void)
  75. return TARGET_64BIT ? (HOST_WIDE_INT_1 << 29) : 0;
  76. }
  77. +/* Given memory reference MEM, expand code to compute the aligned
  78. + memory address, shift and mask values and store them into
  79. + *ALIGNED_MEM, *SHIFT, *MASK and *NOT_MASK. */
  80. +
  81. +void
  82. +riscv_subword_address (rtx mem, rtx *aligned_mem, rtx *shift, rtx *mask,
  83. + rtx *not_mask)
  84. +{
  85. + /* Align the memory address to a word. */
  86. + rtx addr = force_reg (Pmode, XEXP (mem, 0));
  87. +
  88. + rtx addr_mask = gen_int_mode (-4, Pmode);
  89. +
  90. + rtx aligned_addr = gen_reg_rtx (Pmode);
  91. + emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, addr_mask));
  92. +
  93. + *aligned_mem = change_address (mem, SImode, aligned_addr);
  94. +
  95. + /* Calculate the shift amount. */
  96. + emit_move_insn (*shift, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
  97. + gen_int_mode (3, SImode)));
  98. + emit_move_insn (*shift, gen_rtx_ASHIFT (SImode, *shift,
  99. + gen_int_mode (3, SImode)));
  100. +
  101. + /* Calculate the mask. */
  102. + int unshifted_mask = GET_MODE_MASK (GET_MODE (mem));
  103. +
  104. + emit_move_insn (*mask, gen_int_mode (unshifted_mask, SImode));
  105. +
  106. + emit_move_insn (*mask, gen_rtx_ASHIFT (SImode, *mask,
  107. + gen_lowpart (QImode, *shift)));
  108. +
  109. + emit_move_insn (*not_mask, gen_rtx_NOT(SImode, *mask));
  110. +}
  111. +
  112. +/* Leftshift a subword within an SImode register. */
  113. +
  114. +void
  115. +riscv_lshift_subword (machine_mode mode, rtx value, rtx shift,
  116. + rtx *shifted_value)
  117. +{
  118. + rtx value_reg = gen_reg_rtx (SImode);
  119. + emit_move_insn (value_reg, simplify_gen_subreg (SImode, value,
  120. + mode, 0));
  121. +
  122. + emit_move_insn(*shifted_value, gen_rtx_ASHIFT (SImode, value_reg,
  123. + gen_lowpart (QImode, shift)));
  124. +}
  125. +
  126. /* Initialize the GCC target structure. */
  127. #undef TARGET_ASM_ALIGNED_HI_OP
  128. #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
  129. --- a/gcc/config/riscv/riscv.opt
  130. +++ b/gcc/config/riscv/riscv.opt
  131. @@ -195,6 +195,10 @@ long riscv_stack_protector_guard_offset
  132. TargetVariable
  133. int riscv_zi_subext
  134. +minline-atomics
  135. +Target Var(TARGET_INLINE_SUBWORD_ATOMIC) Init(1)
  136. +Always inline subword atomic operations.
  137. +
  138. Enum
  139. Name(isa_spec_class) Type(enum riscv_isa_spec_class)
  140. Supported ISA specs (for use with the -misa-spec= option):
  141. --- a/gcc/config/riscv/sync.md
  142. +++ b/gcc/config/riscv/sync.md
  143. @@ -21,8 +21,11 @@
  144. (define_c_enum "unspec" [
  145. UNSPEC_COMPARE_AND_SWAP
  146. + UNSPEC_COMPARE_AND_SWAP_SUBWORD
  147. UNSPEC_SYNC_OLD_OP
  148. + UNSPEC_SYNC_OLD_OP_SUBWORD
  149. UNSPEC_SYNC_EXCHANGE
  150. + UNSPEC_SYNC_EXCHANGE_SUBWORD
  151. UNSPEC_ATOMIC_STORE
  152. UNSPEC_MEMORY_BARRIER
  153. ])
  154. @@ -92,6 +95,135 @@
  155. "%F3amo<insn>.<amo>%A3 %0,%z2,%1"
  156. [(set (attr "length") (const_int 8))])
  157. +(define_insn "subword_atomic_fetch_strong_<atomic_optab>"
  158. + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
  159. + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
  160. + (set (match_dup 1)
  161. + (unspec_volatile:SI
  162. + [(any_atomic:SI (match_dup 1)
  163. + (match_operand:SI 2 "register_operand" "rI")) ;; value for op
  164. + (match_operand:SI 3 "register_operand" "rI")] ;; mask
  165. + UNSPEC_SYNC_OLD_OP_SUBWORD))
  166. + (match_operand:SI 4 "register_operand" "rI") ;; not_mask
  167. + (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1
  168. + (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2
  169. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  170. + {
  171. + return "1:\;"
  172. + "lr.w.aq\t%0, %1\;"
  173. + "<insn>\t%5, %0, %2\;"
  174. + "and\t%5, %5, %3\;"
  175. + "and\t%6, %0, %4\;"
  176. + "or\t%6, %6, %5\;"
  177. + "sc.w.rl\t%5, %6, %1\;"
  178. + "bnez\t%5, 1b";
  179. + }
  180. + [(set (attr "length") (const_int 28))])
  181. +
  182. +(define_expand "atomic_fetch_nand<mode>"
  183. + [(match_operand:SHORT 0 "register_operand") ;; old value at mem
  184. + (not:SHORT (and:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
  185. + (match_operand:SHORT 2 "reg_or_0_operand"))) ;; value for op
  186. + (match_operand:SI 3 "const_int_operand")] ;; model
  187. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  188. +{
  189. + /* We have no QImode/HImode atomics, so form a mask, then use
  190. + subword_atomic_fetch_strong_nand to implement a LR/SC version of the
  191. + operation. */
  192. +
  193. + /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
  194. + is disabled */
  195. +
  196. + rtx old = gen_reg_rtx (SImode);
  197. + rtx mem = operands[1];
  198. + rtx value = operands[2];
  199. + rtx aligned_mem = gen_reg_rtx (SImode);
  200. + rtx shift = gen_reg_rtx (SImode);
  201. + rtx mask = gen_reg_rtx (SImode);
  202. + rtx not_mask = gen_reg_rtx (SImode);
  203. +
  204. + riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
  205. +
  206. + rtx shifted_value = gen_reg_rtx (SImode);
  207. + riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
  208. +
  209. + emit_insn (gen_subword_atomic_fetch_strong_nand (old, aligned_mem,
  210. + shifted_value,
  211. + mask, not_mask));
  212. +
  213. + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
  214. + gen_lowpart (QImode, shift)));
  215. +
  216. + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
  217. +
  218. + DONE;
  219. +})
  220. +
  221. +(define_insn "subword_atomic_fetch_strong_nand"
  222. + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
  223. + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
  224. + (set (match_dup 1)
  225. + (unspec_volatile:SI
  226. + [(not:SI (and:SI (match_dup 1)
  227. + (match_operand:SI 2 "register_operand" "rI"))) ;; value for op
  228. + (match_operand:SI 3 "register_operand" "rI")] ;; mask
  229. + UNSPEC_SYNC_OLD_OP_SUBWORD))
  230. + (match_operand:SI 4 "register_operand" "rI") ;; not_mask
  231. + (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1
  232. + (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2
  233. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  234. + {
  235. + return "1:\;"
  236. + "lr.w.aq\t%0, %1\;"
  237. + "and\t%5, %0, %2\;"
  238. + "not\t%5, %5\;"
  239. + "and\t%5, %5, %3\;"
  240. + "and\t%6, %0, %4\;"
  241. + "or\t%6, %6, %5\;"
  242. + "sc.w.rl\t%5, %6, %1\;"
  243. + "bnez\t%5, 1b";
  244. + }
  245. + [(set (attr "length") (const_int 32))])
  246. +
  247. +(define_expand "atomic_fetch_<atomic_optab><mode>"
  248. + [(match_operand:SHORT 0 "register_operand") ;; old value at mem
  249. + (any_atomic:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
  250. + (match_operand:SHORT 2 "reg_or_0_operand")) ;; value for op
  251. + (match_operand:SI 3 "const_int_operand")] ;; model
  252. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  253. +{
  254. + /* We have no QImode/HImode atomics, so form a mask, then use
  255. + subword_atomic_fetch_strong_<mode> to implement a LR/SC version of the
  256. + operation. */
  257. +
  258. + /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
  259. + is disabled */
  260. +
  261. + rtx old = gen_reg_rtx (SImode);
  262. + rtx mem = operands[1];
  263. + rtx value = operands[2];
  264. + rtx aligned_mem = gen_reg_rtx (SImode);
  265. + rtx shift = gen_reg_rtx (SImode);
  266. + rtx mask = gen_reg_rtx (SImode);
  267. + rtx not_mask = gen_reg_rtx (SImode);
  268. +
  269. + riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
  270. +
  271. + rtx shifted_value = gen_reg_rtx (SImode);
  272. + riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
  273. +
  274. + emit_insn (gen_subword_atomic_fetch_strong_<atomic_optab> (old, aligned_mem,
  275. + shifted_value,
  276. + mask, not_mask));
  277. +
  278. + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
  279. + gen_lowpart (QImode, shift)));
  280. +
  281. + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
  282. +
  283. + DONE;
  284. +})
  285. +
  286. (define_insn "atomic_exchange<mode>"
  287. [(set (match_operand:GPR 0 "register_operand" "=&r")
  288. (unspec_volatile:GPR
  289. @@ -104,6 +236,56 @@
  290. "%F3amoswap.<amo>%A3 %0,%z2,%1"
  291. [(set (attr "length") (const_int 8))])
  292. +(define_expand "atomic_exchange<mode>"
  293. + [(match_operand:SHORT 0 "register_operand") ;; old value at mem
  294. + (match_operand:SHORT 1 "memory_operand") ;; mem location
  295. + (match_operand:SHORT 2 "register_operand") ;; value
  296. + (match_operand:SI 3 "const_int_operand")] ;; model
  297. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  298. +{
  299. + rtx old = gen_reg_rtx (SImode);
  300. + rtx mem = operands[1];
  301. + rtx value = operands[2];
  302. + rtx aligned_mem = gen_reg_rtx (SImode);
  303. + rtx shift = gen_reg_rtx (SImode);
  304. + rtx mask = gen_reg_rtx (SImode);
  305. + rtx not_mask = gen_reg_rtx (SImode);
  306. +
  307. + riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
  308. +
  309. + rtx shifted_value = gen_reg_rtx (SImode);
  310. + riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
  311. +
  312. + emit_insn (gen_subword_atomic_exchange_strong (old, aligned_mem,
  313. + shifted_value, not_mask));
  314. +
  315. + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
  316. + gen_lowpart (QImode, shift)));
  317. +
  318. + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
  319. + DONE;
  320. +})
  321. +
  322. +(define_insn "subword_atomic_exchange_strong"
  323. + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
  324. + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
  325. + (set (match_dup 1)
  326. + (unspec_volatile:SI
  327. + [(match_operand:SI 2 "reg_or_0_operand" "rI") ;; value
  328. + (match_operand:SI 3 "reg_or_0_operand" "rI")] ;; not_mask
  329. + UNSPEC_SYNC_EXCHANGE_SUBWORD))
  330. + (clobber (match_scratch:SI 4 "=&r"))] ;; tmp_1
  331. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  332. + {
  333. + return "1:\;"
  334. + "lr.w.aq\t%0, %1\;"
  335. + "and\t%4, %0, %3\;"
  336. + "or\t%4, %4, %2\;"
  337. + "sc.w.rl\t%4, %4, %1\;"
  338. + "bnez\t%4, 1b";
  339. + }
  340. + [(set (attr "length") (const_int 20))])
  341. +
  342. (define_insn "atomic_cas_value_strong<mode>"
  343. [(set (match_operand:GPR 0 "register_operand" "=&r")
  344. (match_operand:GPR 1 "memory_operand" "+A"))
  345. @@ -152,6 +334,125 @@
  346. DONE;
  347. })
  348. +(define_expand "atomic_compare_and_swap<mode>"
  349. + [(match_operand:SI 0 "register_operand") ;; bool output
  350. + (match_operand:SHORT 1 "register_operand") ;; val output
  351. + (match_operand:SHORT 2 "memory_operand") ;; memory
  352. + (match_operand:SHORT 3 "reg_or_0_operand") ;; expected value
  353. + (match_operand:SHORT 4 "reg_or_0_operand") ;; desired value
  354. + (match_operand:SI 5 "const_int_operand") ;; is_weak
  355. + (match_operand:SI 6 "const_int_operand") ;; mod_s
  356. + (match_operand:SI 7 "const_int_operand")] ;; mod_f
  357. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  358. +{
  359. + emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
  360. + operands[3], operands[4],
  361. + operands[6], operands[7]));
  362. +
  363. + rtx val = gen_reg_rtx (SImode);
  364. + if (operands[1] != const0_rtx)
  365. + emit_move_insn (val, gen_rtx_SIGN_EXTEND (SImode, operands[1]));
  366. + else
  367. + emit_move_insn (val, const0_rtx);
  368. +
  369. + rtx exp = gen_reg_rtx (SImode);
  370. + if (operands[3] != const0_rtx)
  371. + emit_move_insn (exp, gen_rtx_SIGN_EXTEND (SImode, operands[3]));
  372. + else
  373. + emit_move_insn (exp, const0_rtx);
  374. +
  375. + rtx compare = val;
  376. + if (exp != const0_rtx)
  377. + {
  378. + rtx difference = gen_rtx_MINUS (SImode, val, exp);
  379. + compare = gen_reg_rtx (SImode);
  380. + emit_move_insn (compare, difference);
  381. + }
  382. +
  383. + if (word_mode != SImode)
  384. + {
  385. + rtx reg = gen_reg_rtx (word_mode);
  386. + emit_move_insn (reg, gen_rtx_SIGN_EXTEND (word_mode, compare));
  387. + compare = reg;
  388. + }
  389. +
  390. + emit_move_insn (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx));
  391. + DONE;
  392. +})
  393. +
  394. +(define_expand "atomic_cas_value_strong<mode>"
  395. + [(match_operand:SHORT 0 "register_operand") ;; val output
  396. + (match_operand:SHORT 1 "memory_operand") ;; memory
  397. + (match_operand:SHORT 2 "reg_or_0_operand") ;; expected value
  398. + (match_operand:SHORT 3 "reg_or_0_operand") ;; desired value
  399. + (match_operand:SI 4 "const_int_operand") ;; mod_s
  400. + (match_operand:SI 5 "const_int_operand") ;; mod_f
  401. + (match_scratch:SHORT 6)]
  402. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  403. +{
  404. + /* We have no QImode/HImode atomics, so form a mask, then use
  405. + subword_atomic_cas_strong<mode> to implement a LR/SC version of the
  406. + operation. */
  407. +
  408. + /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
  409. + is disabled */
  410. +
  411. + rtx old = gen_reg_rtx (SImode);
  412. + rtx mem = operands[1];
  413. + rtx aligned_mem = gen_reg_rtx (SImode);
  414. + rtx shift = gen_reg_rtx (SImode);
  415. + rtx mask = gen_reg_rtx (SImode);
  416. + rtx not_mask = gen_reg_rtx (SImode);
  417. +
  418. + riscv_subword_address (mem, &aligned_mem, &shift, &mask, &not_mask);
  419. +
  420. + rtx o = operands[2];
  421. + rtx n = operands[3];
  422. + rtx shifted_o = gen_reg_rtx (SImode);
  423. + rtx shifted_n = gen_reg_rtx (SImode);
  424. +
  425. + riscv_lshift_subword (<MODE>mode, o, shift, &shifted_o);
  426. + riscv_lshift_subword (<MODE>mode, n, shift, &shifted_n);
  427. +
  428. + emit_move_insn (shifted_o, gen_rtx_AND (SImode, shifted_o, mask));
  429. + emit_move_insn (shifted_n, gen_rtx_AND (SImode, shifted_n, mask));
  430. +
  431. + emit_insn (gen_subword_atomic_cas_strong (old, aligned_mem,
  432. + shifted_o, shifted_n,
  433. + mask, not_mask));
  434. +
  435. + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
  436. + gen_lowpart (QImode, shift)));
  437. +
  438. + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
  439. +
  440. + DONE;
  441. +})
  442. +
  443. +(define_insn "subword_atomic_cas_strong"
  444. + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
  445. + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
  446. + (set (match_dup 1)
  447. + (unspec_volatile:SI [(match_operand:SI 2 "reg_or_0_operand" "rJ") ;; expected value
  448. + (match_operand:SI 3 "reg_or_0_operand" "rJ")] ;; desired value
  449. + UNSPEC_COMPARE_AND_SWAP_SUBWORD))
  450. + (match_operand:SI 4 "register_operand" "rI") ;; mask
  451. + (match_operand:SI 5 "register_operand" "rI") ;; not_mask
  452. + (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_1
  453. + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
  454. + {
  455. + return "1:\;"
  456. + "lr.w.aq\t%0, %1\;"
  457. + "and\t%6, %0, %4\;"
  458. + "bne\t%6, %z2, 1f\;"
  459. + "and\t%6, %0, %5\;"
  460. + "or\t%6, %6, %3\;"
  461. + "sc.w.rl\t%6, %6, %1\;"
  462. + "bnez\t%6, 1b\;"
  463. + "1:";
  464. + }
  465. + [(set (attr "length") (const_int 28))])
  466. +
  467. (define_expand "atomic_test_and_set"
  468. [(match_operand:QI 0 "register_operand" "") ;; bool output
  469. (match_operand:QI 1 "memory_operand" "+A") ;; memory
  470. --- a/gcc/doc/invoke.texi
  471. +++ b/gcc/doc/invoke.texi
  472. @@ -734,7 +734,8 @@ Objective-C and Objective-C++ Dialects}.
  473. -moverride=@var{string} -mverbose-cost-dump @gol
  474. -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{sysreg} @gol
  475. -mstack-protector-guard-offset=@var{offset} -mtrack-speculation @gol
  476. --moutline-atomics }
  477. +-moutline-atomics
  478. +-minline-atomics -mno-inline-atomics}
  479. @emph{Adapteva Epiphany Options}
  480. @gccoptlist{-mhalf-reg-file -mprefer-short-insn-regs @gol
  481. @@ -26742,6 +26743,13 @@ Do or don't use smaller but slower prolo
  482. library function calls. The default is to use fast inline prologues and
  483. epilogues.
  484. +@opindex minline-atomics
  485. +@item -minline-atomics
  486. +@itemx -mno-inline-atomics
  487. +Do or don't use smaller but slower subword atomic emulation code that uses
  488. +libatomic function calls. The default is to use fast inline subword atomics
  489. +that do not require libatomic.
  490. +
  491. @item -mshorten-memrefs
  492. @itemx -mno-shorten-memrefs
  493. @opindex mshorten-memrefs
  494. --- /dev/null
  495. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-1.c
  496. @@ -0,0 +1,18 @@
  497. +/* { dg-do compile } */
  498. +/* { dg-options "-mno-inline-atomics" } */
  499. +/* { dg-message "note: '__sync_fetch_and_nand' changed semantics in GCC 4.4" "fetch_and_nand" { target *-*-* } 0 } */
  500. +/* { dg-final { scan-assembler "\tcall\t__sync_fetch_and_add_1" } } */
  501. +/* { dg-final { scan-assembler "\tcall\t__sync_fetch_and_nand_1" } } */
  502. +/* { dg-final { scan-assembler "\tcall\t__sync_bool_compare_and_swap_1" } } */
  503. +
  504. +char foo;
  505. +char bar;
  506. +char baz;
  507. +
  508. +int
  509. +main ()
  510. +{
  511. + __sync_fetch_and_add(&foo, 1);
  512. + __sync_fetch_and_nand(&bar, 1);
  513. + __sync_bool_compare_and_swap (&baz, 1, 2);
  514. +}
  515. --- /dev/null
  516. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-2.c
  517. @@ -0,0 +1,9 @@
  518. +/* { dg-do compile } */
  519. +/* Verify that subword atomics do not generate calls. */
  520. +/* { dg-options "-minline-atomics" } */
  521. +/* { dg-message "note: '__sync_fetch_and_nand' changed semantics in GCC 4.4" "fetch_and_nand" { target *-*-* } 0 } */
  522. +/* { dg-final { scan-assembler-not "\tcall\t__sync_fetch_and_add_1" } } */
  523. +/* { dg-final { scan-assembler-not "\tcall\t__sync_fetch_and_nand_1" } } */
  524. +/* { dg-final { scan-assembler-not "\tcall\t__sync_bool_compare_and_swap_1" } } */
  525. +
  526. +#include "inline-atomics-1.c"
  527. \ No newline at end of file
  528. --- /dev/null
  529. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-3.c
  530. @@ -0,0 +1,569 @@
  531. +/* Check all char alignments. */
  532. +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-op-1.c */
  533. +/* Test __atomic routines for existence and proper execution on 1 byte
  534. + values with each valid memory model. */
  535. +/* { dg-do run } */
  536. +/* { dg-options "-minline-atomics -Wno-address-of-packed-member" } */
  537. +
  538. +/* Test the execution of the __atomic_*OP builtin routines for a char. */
  539. +
  540. +extern void abort(void);
  541. +
  542. +char count, res;
  543. +const char init = ~0;
  544. +
  545. +struct A
  546. +{
  547. + char a;
  548. + char b;
  549. + char c;
  550. + char d;
  551. +} __attribute__ ((packed)) A;
  552. +
  553. +/* The fetch_op routines return the original value before the operation. */
  554. +
  555. +void
  556. +test_fetch_add (char* v)
  557. +{
  558. + *v = 0;
  559. + count = 1;
  560. +
  561. + if (__atomic_fetch_add (v, count, __ATOMIC_RELAXED) != 0)
  562. + abort ();
  563. +
  564. + if (__atomic_fetch_add (v, 1, __ATOMIC_CONSUME) != 1)
  565. + abort ();
  566. +
  567. + if (__atomic_fetch_add (v, count, __ATOMIC_ACQUIRE) != 2)
  568. + abort ();
  569. +
  570. + if (__atomic_fetch_add (v, 1, __ATOMIC_RELEASE) != 3)
  571. + abort ();
  572. +
  573. + if (__atomic_fetch_add (v, count, __ATOMIC_ACQ_REL) != 4)
  574. + abort ();
  575. +
  576. + if (__atomic_fetch_add (v, 1, __ATOMIC_SEQ_CST) != 5)
  577. + abort ();
  578. +}
  579. +
  580. +
  581. +void
  582. +test_fetch_sub (char* v)
  583. +{
  584. + *v = res = 20;
  585. + count = 0;
  586. +
  587. + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_RELAXED) != res--)
  588. + abort ();
  589. +
  590. + if (__atomic_fetch_sub (v, 1, __ATOMIC_CONSUME) != res--)
  591. + abort ();
  592. +
  593. + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQUIRE) != res--)
  594. + abort ();
  595. +
  596. + if (__atomic_fetch_sub (v, 1, __ATOMIC_RELEASE) != res--)
  597. + abort ();
  598. +
  599. + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQ_REL) != res--)
  600. + abort ();
  601. +
  602. + if (__atomic_fetch_sub (v, 1, __ATOMIC_SEQ_CST) != res--)
  603. + abort ();
  604. +}
  605. +
  606. +void
  607. +test_fetch_and (char* v)
  608. +{
  609. + *v = init;
  610. +
  611. + if (__atomic_fetch_and (v, 0, __ATOMIC_RELAXED) != init)
  612. + abort ();
  613. +
  614. + if (__atomic_fetch_and (v, init, __ATOMIC_CONSUME) != 0)
  615. + abort ();
  616. +
  617. + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQUIRE) != 0)
  618. + abort ();
  619. +
  620. + *v = ~*v;
  621. + if (__atomic_fetch_and (v, init, __ATOMIC_RELEASE) != init)
  622. + abort ();
  623. +
  624. + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQ_REL) != init)
  625. + abort ();
  626. +
  627. + if (__atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST) != 0)
  628. + abort ();
  629. +}
  630. +
  631. +void
  632. +test_fetch_nand (char* v)
  633. +{
  634. + *v = init;
  635. +
  636. + if (__atomic_fetch_nand (v, 0, __ATOMIC_RELAXED) != init)
  637. + abort ();
  638. +
  639. + if (__atomic_fetch_nand (v, init, __ATOMIC_CONSUME) != init)
  640. + abort ();
  641. +
  642. + if (__atomic_fetch_nand (v, 0, __ATOMIC_ACQUIRE) != 0 )
  643. + abort ();
  644. +
  645. + if (__atomic_fetch_nand (v, init, __ATOMIC_RELEASE) != init)
  646. + abort ();
  647. +
  648. + if (__atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL) != 0)
  649. + abort ();
  650. +
  651. + if (__atomic_fetch_nand (v, 0, __ATOMIC_SEQ_CST) != init)
  652. + abort ();
  653. +}
  654. +
  655. +void
  656. +test_fetch_xor (char* v)
  657. +{
  658. + *v = init;
  659. + count = 0;
  660. +
  661. + if (__atomic_fetch_xor (v, count, __ATOMIC_RELAXED) != init)
  662. + abort ();
  663. +
  664. + if (__atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME) != init)
  665. + abort ();
  666. +
  667. + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQUIRE) != 0)
  668. + abort ();
  669. +
  670. + if (__atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE) != 0)
  671. + abort ();
  672. +
  673. + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL) != init)
  674. + abort ();
  675. +
  676. + if (__atomic_fetch_xor (v, ~count, __ATOMIC_SEQ_CST) != init)
  677. + abort ();
  678. +}
  679. +
  680. +void
  681. +test_fetch_or (char* v)
  682. +{
  683. + *v = 0;
  684. + count = 1;
  685. +
  686. + if (__atomic_fetch_or (v, count, __ATOMIC_RELAXED) != 0)
  687. + abort ();
  688. +
  689. + count *= 2;
  690. + if (__atomic_fetch_or (v, 2, __ATOMIC_CONSUME) != 1)
  691. + abort ();
  692. +
  693. + count *= 2;
  694. + if (__atomic_fetch_or (v, count, __ATOMIC_ACQUIRE) != 3)
  695. + abort ();
  696. +
  697. + count *= 2;
  698. + if (__atomic_fetch_or (v, 8, __ATOMIC_RELEASE) != 7)
  699. + abort ();
  700. +
  701. + count *= 2;
  702. + if (__atomic_fetch_or (v, count, __ATOMIC_ACQ_REL) != 15)
  703. + abort ();
  704. +
  705. + count *= 2;
  706. + if (__atomic_fetch_or (v, count, __ATOMIC_SEQ_CST) != 31)
  707. + abort ();
  708. +}
  709. +
  710. +/* The OP_fetch routines return the new value after the operation. */
  711. +
  712. +void
  713. +test_add_fetch (char* v)
  714. +{
  715. + *v = 0;
  716. + count = 1;
  717. +
  718. + if (__atomic_add_fetch (v, count, __ATOMIC_RELAXED) != 1)
  719. + abort ();
  720. +
  721. + if (__atomic_add_fetch (v, 1, __ATOMIC_CONSUME) != 2)
  722. + abort ();
  723. +
  724. + if (__atomic_add_fetch (v, count, __ATOMIC_ACQUIRE) != 3)
  725. + abort ();
  726. +
  727. + if (__atomic_add_fetch (v, 1, __ATOMIC_RELEASE) != 4)
  728. + abort ();
  729. +
  730. + if (__atomic_add_fetch (v, count, __ATOMIC_ACQ_REL) != 5)
  731. + abort ();
  732. +
  733. + if (__atomic_add_fetch (v, count, __ATOMIC_SEQ_CST) != 6)
  734. + abort ();
  735. +}
  736. +
  737. +
  738. +void
  739. +test_sub_fetch (char* v)
  740. +{
  741. + *v = res = 20;
  742. + count = 0;
  743. +
  744. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED) != --res)
  745. + abort ();
  746. +
  747. + if (__atomic_sub_fetch (v, 1, __ATOMIC_CONSUME) != --res)
  748. + abort ();
  749. +
  750. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQUIRE) != --res)
  751. + abort ();
  752. +
  753. + if (__atomic_sub_fetch (v, 1, __ATOMIC_RELEASE) != --res)
  754. + abort ();
  755. +
  756. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL) != --res)
  757. + abort ();
  758. +
  759. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_SEQ_CST) != --res)
  760. + abort ();
  761. +}
  762. +
  763. +void
  764. +test_and_fetch (char* v)
  765. +{
  766. + *v = init;
  767. +
  768. + if (__atomic_and_fetch (v, 0, __ATOMIC_RELAXED) != 0)
  769. + abort ();
  770. +
  771. + *v = init;
  772. + if (__atomic_and_fetch (v, init, __ATOMIC_CONSUME) != init)
  773. + abort ();
  774. +
  775. + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
  776. + abort ();
  777. +
  778. + *v = ~*v;
  779. + if (__atomic_and_fetch (v, init, __ATOMIC_RELEASE) != init)
  780. + abort ();
  781. +
  782. + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL) != 0)
  783. + abort ();
  784. +
  785. + *v = ~*v;
  786. + if (__atomic_and_fetch (v, 0, __ATOMIC_SEQ_CST) != 0)
  787. + abort ();
  788. +}
  789. +
  790. +void
  791. +test_nand_fetch (char* v)
  792. +{
  793. + *v = init;
  794. +
  795. + if (__atomic_nand_fetch (v, 0, __ATOMIC_RELAXED) != init)
  796. + abort ();
  797. +
  798. + if (__atomic_nand_fetch (v, init, __ATOMIC_CONSUME) != 0)
  799. + abort ();
  800. +
  801. + if (__atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE) != init)
  802. + abort ();
  803. +
  804. + if (__atomic_nand_fetch (v, init, __ATOMIC_RELEASE) != 0)
  805. + abort ();
  806. +
  807. + if (__atomic_nand_fetch (v, init, __ATOMIC_ACQ_REL) != init)
  808. + abort ();
  809. +
  810. + if (__atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST) != init)
  811. + abort ();
  812. +}
  813. +
  814. +
  815. +
  816. +void
  817. +test_xor_fetch (char* v)
  818. +{
  819. + *v = init;
  820. + count = 0;
  821. +
  822. + if (__atomic_xor_fetch (v, count, __ATOMIC_RELAXED) != init)
  823. + abort ();
  824. +
  825. + if (__atomic_xor_fetch (v, ~count, __ATOMIC_CONSUME) != 0)
  826. + abort ();
  827. +
  828. + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
  829. + abort ();
  830. +
  831. + if (__atomic_xor_fetch (v, ~count, __ATOMIC_RELEASE) != init)
  832. + abort ();
  833. +
  834. + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQ_REL) != init)
  835. + abort ();
  836. +
  837. + if (__atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST) != 0)
  838. + abort ();
  839. +}
  840. +
  841. +void
  842. +test_or_fetch (char* v)
  843. +{
  844. + *v = 0;
  845. + count = 1;
  846. +
  847. + if (__atomic_or_fetch (v, count, __ATOMIC_RELAXED) != 1)
  848. + abort ();
  849. +
  850. + count *= 2;
  851. + if (__atomic_or_fetch (v, 2, __ATOMIC_CONSUME) != 3)
  852. + abort ();
  853. +
  854. + count *= 2;
  855. + if (__atomic_or_fetch (v, count, __ATOMIC_ACQUIRE) != 7)
  856. + abort ();
  857. +
  858. + count *= 2;
  859. + if (__atomic_or_fetch (v, 8, __ATOMIC_RELEASE) != 15)
  860. + abort ();
  861. +
  862. + count *= 2;
  863. + if (__atomic_or_fetch (v, count, __ATOMIC_ACQ_REL) != 31)
  864. + abort ();
  865. +
  866. + count *= 2;
  867. + if (__atomic_or_fetch (v, count, __ATOMIC_SEQ_CST) != 63)
  868. + abort ();
  869. +}
  870. +
  871. +
  872. +/* Test the OP routines with a result which isn't used. Use both variations
  873. + within each function. */
  874. +
  875. +void
  876. +test_add (char* v)
  877. +{
  878. + *v = 0;
  879. + count = 1;
  880. +
  881. + __atomic_add_fetch (v, count, __ATOMIC_RELAXED);
  882. + if (*v != 1)
  883. + abort ();
  884. +
  885. + __atomic_fetch_add (v, count, __ATOMIC_CONSUME);
  886. + if (*v != 2)
  887. + abort ();
  888. +
  889. + __atomic_add_fetch (v, 1 , __ATOMIC_ACQUIRE);
  890. + if (*v != 3)
  891. + abort ();
  892. +
  893. + __atomic_fetch_add (v, 1, __ATOMIC_RELEASE);
  894. + if (*v != 4)
  895. + abort ();
  896. +
  897. + __atomic_add_fetch (v, count, __ATOMIC_ACQ_REL);
  898. + if (*v != 5)
  899. + abort ();
  900. +
  901. + __atomic_fetch_add (v, count, __ATOMIC_SEQ_CST);
  902. + if (*v != 6)
  903. + abort ();
  904. +}
  905. +
  906. +
  907. +void
  908. +test_sub (char* v)
  909. +{
  910. + *v = res = 20;
  911. + count = 0;
  912. +
  913. + __atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED);
  914. + if (*v != --res)
  915. + abort ();
  916. +
  917. + __atomic_fetch_sub (v, count + 1, __ATOMIC_CONSUME);
  918. + if (*v != --res)
  919. + abort ();
  920. +
  921. + __atomic_sub_fetch (v, 1, __ATOMIC_ACQUIRE);
  922. + if (*v != --res)
  923. + abort ();
  924. +
  925. + __atomic_fetch_sub (v, 1, __ATOMIC_RELEASE);
  926. + if (*v != --res)
  927. + abort ();
  928. +
  929. + __atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL);
  930. + if (*v != --res)
  931. + abort ();
  932. +
  933. + __atomic_fetch_sub (v, count + 1, __ATOMIC_SEQ_CST);
  934. + if (*v != --res)
  935. + abort ();
  936. +}
  937. +
  938. +void
  939. +test_and (char* v)
  940. +{
  941. + *v = init;
  942. +
  943. + __atomic_and_fetch (v, 0, __ATOMIC_RELAXED);
  944. + if (*v != 0)
  945. + abort ();
  946. +
  947. + *v = init;
  948. + __atomic_fetch_and (v, init, __ATOMIC_CONSUME);
  949. + if (*v != init)
  950. + abort ();
  951. +
  952. + __atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE);
  953. + if (*v != 0)
  954. + abort ();
  955. +
  956. + *v = ~*v;
  957. + __atomic_fetch_and (v, init, __ATOMIC_RELEASE);
  958. + if (*v != init)
  959. + abort ();
  960. +
  961. + __atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL);
  962. + if (*v != 0)
  963. + abort ();
  964. +
  965. + *v = ~*v;
  966. + __atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST);
  967. + if (*v != 0)
  968. + abort ();
  969. +}
  970. +
  971. +void
  972. +test_nand (char* v)
  973. +{
  974. + *v = init;
  975. +
  976. + __atomic_fetch_nand (v, 0, __ATOMIC_RELAXED);
  977. + if (*v != init)
  978. + abort ();
  979. +
  980. + __atomic_fetch_nand (v, init, __ATOMIC_CONSUME);
  981. + if (*v != 0)
  982. + abort ();
  983. +
  984. + __atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE);
  985. + if (*v != init)
  986. + abort ();
  987. +
  988. + __atomic_nand_fetch (v, init, __ATOMIC_RELEASE);
  989. + if (*v != 0)
  990. + abort ();
  991. +
  992. + __atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL);
  993. + if (*v != init)
  994. + abort ();
  995. +
  996. + __atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST);
  997. + if (*v != init)
  998. + abort ();
  999. +}
  1000. +
  1001. +
  1002. +
  1003. +void
  1004. +test_xor (char* v)
  1005. +{
  1006. + *v = init;
  1007. + count = 0;
  1008. +
  1009. + __atomic_xor_fetch (v, count, __ATOMIC_RELAXED);
  1010. + if (*v != init)
  1011. + abort ();
  1012. +
  1013. + __atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME);
  1014. + if (*v != 0)
  1015. + abort ();
  1016. +
  1017. + __atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE);
  1018. + if (*v != 0)
  1019. + abort ();
  1020. +
  1021. + __atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE);
  1022. + if (*v != init)
  1023. + abort ();
  1024. +
  1025. + __atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL);
  1026. + if (*v != init)
  1027. + abort ();
  1028. +
  1029. + __atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST);
  1030. + if (*v != 0)
  1031. + abort ();
  1032. +}
  1033. +
  1034. +void
  1035. +test_or (char* v)
  1036. +{
  1037. + *v = 0;
  1038. + count = 1;
  1039. +
  1040. + __atomic_or_fetch (v, count, __ATOMIC_RELAXED);
  1041. + if (*v != 1)
  1042. + abort ();
  1043. +
  1044. + count *= 2;
  1045. + __atomic_fetch_or (v, count, __ATOMIC_CONSUME);
  1046. + if (*v != 3)
  1047. + abort ();
  1048. +
  1049. + count *= 2;
  1050. + __atomic_or_fetch (v, 4, __ATOMIC_ACQUIRE);
  1051. + if (*v != 7)
  1052. + abort ();
  1053. +
  1054. + count *= 2;
  1055. + __atomic_fetch_or (v, 8, __ATOMIC_RELEASE);
  1056. + if (*v != 15)
  1057. + abort ();
  1058. +
  1059. + count *= 2;
  1060. + __atomic_or_fetch (v, count, __ATOMIC_ACQ_REL);
  1061. + if (*v != 31)
  1062. + abort ();
  1063. +
  1064. + count *= 2;
  1065. + __atomic_fetch_or (v, count, __ATOMIC_SEQ_CST);
  1066. + if (*v != 63)
  1067. + abort ();
  1068. +}
  1069. +
  1070. +int
  1071. +main ()
  1072. +{
  1073. + char* V[] = {&A.a, &A.b, &A.c, &A.d};
  1074. +
  1075. + for (int i = 0; i < 4; i++) {
  1076. + test_fetch_add (V[i]);
  1077. + test_fetch_sub (V[i]);
  1078. + test_fetch_and (V[i]);
  1079. + test_fetch_nand (V[i]);
  1080. + test_fetch_xor (V[i]);
  1081. + test_fetch_or (V[i]);
  1082. +
  1083. + test_add_fetch (V[i]);
  1084. + test_sub_fetch (V[i]);
  1085. + test_and_fetch (V[i]);
  1086. + test_nand_fetch (V[i]);
  1087. + test_xor_fetch (V[i]);
  1088. + test_or_fetch (V[i]);
  1089. +
  1090. + test_add (V[i]);
  1091. + test_sub (V[i]);
  1092. + test_and (V[i]);
  1093. + test_nand (V[i]);
  1094. + test_xor (V[i]);
  1095. + test_or (V[i]);
  1096. + }
  1097. +
  1098. + return 0;
  1099. +}
  1100. --- /dev/null
  1101. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-4.c
  1102. @@ -0,0 +1,566 @@
  1103. +/* Check all short alignments. */
  1104. +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-op-2.c */
  1105. +/* Test __atomic routines for existence and proper execution on 2 byte
  1106. + values with each valid memory model. */
  1107. +/* { dg-do run } */
  1108. +/* { dg-options "-minline-atomics -Wno-address-of-packed-member" } */
  1109. +
  1110. +/* Test the execution of the __atomic_*OP builtin routines for a short. */
  1111. +
  1112. +extern void abort(void);
  1113. +
  1114. +short count, res;
  1115. +const short init = ~0;
  1116. +
  1117. +struct A
  1118. +{
  1119. + short a;
  1120. + short b;
  1121. +} __attribute__ ((packed)) A;
  1122. +
  1123. +/* The fetch_op routines return the original value before the operation. */
  1124. +
  1125. +void
  1126. +test_fetch_add (short* v)
  1127. +{
  1128. + *v = 0;
  1129. + count = 1;
  1130. +
  1131. + if (__atomic_fetch_add (v, count, __ATOMIC_RELAXED) != 0)
  1132. + abort ();
  1133. +
  1134. + if (__atomic_fetch_add (v, 1, __ATOMIC_CONSUME) != 1)
  1135. + abort ();
  1136. +
  1137. + if (__atomic_fetch_add (v, count, __ATOMIC_ACQUIRE) != 2)
  1138. + abort ();
  1139. +
  1140. + if (__atomic_fetch_add (v, 1, __ATOMIC_RELEASE) != 3)
  1141. + abort ();
  1142. +
  1143. + if (__atomic_fetch_add (v, count, __ATOMIC_ACQ_REL) != 4)
  1144. + abort ();
  1145. +
  1146. + if (__atomic_fetch_add (v, 1, __ATOMIC_SEQ_CST) != 5)
  1147. + abort ();
  1148. +}
  1149. +
  1150. +
  1151. +void
  1152. +test_fetch_sub (short* v)
  1153. +{
  1154. + *v = res = 20;
  1155. + count = 0;
  1156. +
  1157. + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_RELAXED) != res--)
  1158. + abort ();
  1159. +
  1160. + if (__atomic_fetch_sub (v, 1, __ATOMIC_CONSUME) != res--)
  1161. + abort ();
  1162. +
  1163. + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQUIRE) != res--)
  1164. + abort ();
  1165. +
  1166. + if (__atomic_fetch_sub (v, 1, __ATOMIC_RELEASE) != res--)
  1167. + abort ();
  1168. +
  1169. + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQ_REL) != res--)
  1170. + abort ();
  1171. +
  1172. + if (__atomic_fetch_sub (v, 1, __ATOMIC_SEQ_CST) != res--)
  1173. + abort ();
  1174. +}
  1175. +
  1176. +void
  1177. +test_fetch_and (short* v)
  1178. +{
  1179. + *v = init;
  1180. +
  1181. + if (__atomic_fetch_and (v, 0, __ATOMIC_RELAXED) != init)
  1182. + abort ();
  1183. +
  1184. + if (__atomic_fetch_and (v, init, __ATOMIC_CONSUME) != 0)
  1185. + abort ();
  1186. +
  1187. + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQUIRE) != 0)
  1188. + abort ();
  1189. +
  1190. + *v = ~*v;
  1191. + if (__atomic_fetch_and (v, init, __ATOMIC_RELEASE) != init)
  1192. + abort ();
  1193. +
  1194. + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQ_REL) != init)
  1195. + abort ();
  1196. +
  1197. + if (__atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST) != 0)
  1198. + abort ();
  1199. +}
  1200. +
  1201. +void
  1202. +test_fetch_nand (short* v)
  1203. +{
  1204. + *v = init;
  1205. +
  1206. + if (__atomic_fetch_nand (v, 0, __ATOMIC_RELAXED) != init)
  1207. + abort ();
  1208. +
  1209. + if (__atomic_fetch_nand (v, init, __ATOMIC_CONSUME) != init)
  1210. + abort ();
  1211. +
  1212. + if (__atomic_fetch_nand (v, 0, __ATOMIC_ACQUIRE) != 0 )
  1213. + abort ();
  1214. +
  1215. + if (__atomic_fetch_nand (v, init, __ATOMIC_RELEASE) != init)
  1216. + abort ();
  1217. +
  1218. + if (__atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL) != 0)
  1219. + abort ();
  1220. +
  1221. + if (__atomic_fetch_nand (v, 0, __ATOMIC_SEQ_CST) != init)
  1222. + abort ();
  1223. +}
  1224. +
  1225. +void
  1226. +test_fetch_xor (short* v)
  1227. +{
  1228. + *v = init;
  1229. + count = 0;
  1230. +
  1231. + if (__atomic_fetch_xor (v, count, __ATOMIC_RELAXED) != init)
  1232. + abort ();
  1233. +
  1234. + if (__atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME) != init)
  1235. + abort ();
  1236. +
  1237. + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQUIRE) != 0)
  1238. + abort ();
  1239. +
  1240. + if (__atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE) != 0)
  1241. + abort ();
  1242. +
  1243. + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL) != init)
  1244. + abort ();
  1245. +
  1246. + if (__atomic_fetch_xor (v, ~count, __ATOMIC_SEQ_CST) != init)
  1247. + abort ();
  1248. +}
  1249. +
  1250. +void
  1251. +test_fetch_or (short* v)
  1252. +{
  1253. + *v = 0;
  1254. + count = 1;
  1255. +
  1256. + if (__atomic_fetch_or (v, count, __ATOMIC_RELAXED) != 0)
  1257. + abort ();
  1258. +
  1259. + count *= 2;
  1260. + if (__atomic_fetch_or (v, 2, __ATOMIC_CONSUME) != 1)
  1261. + abort ();
  1262. +
  1263. + count *= 2;
  1264. + if (__atomic_fetch_or (v, count, __ATOMIC_ACQUIRE) != 3)
  1265. + abort ();
  1266. +
  1267. + count *= 2;
  1268. + if (__atomic_fetch_or (v, 8, __ATOMIC_RELEASE) != 7)
  1269. + abort ();
  1270. +
  1271. + count *= 2;
  1272. + if (__atomic_fetch_or (v, count, __ATOMIC_ACQ_REL) != 15)
  1273. + abort ();
  1274. +
  1275. + count *= 2;
  1276. + if (__atomic_fetch_or (v, count, __ATOMIC_SEQ_CST) != 31)
  1277. + abort ();
  1278. +}
  1279. +
  1280. +/* The OP_fetch routines return the new value after the operation. */
  1281. +
  1282. +void
  1283. +test_add_fetch (short* v)
  1284. +{
  1285. + *v = 0;
  1286. + count = 1;
  1287. +
  1288. + if (__atomic_add_fetch (v, count, __ATOMIC_RELAXED) != 1)
  1289. + abort ();
  1290. +
  1291. + if (__atomic_add_fetch (v, 1, __ATOMIC_CONSUME) != 2)
  1292. + abort ();
  1293. +
  1294. + if (__atomic_add_fetch (v, count, __ATOMIC_ACQUIRE) != 3)
  1295. + abort ();
  1296. +
  1297. + if (__atomic_add_fetch (v, 1, __ATOMIC_RELEASE) != 4)
  1298. + abort ();
  1299. +
  1300. + if (__atomic_add_fetch (v, count, __ATOMIC_ACQ_REL) != 5)
  1301. + abort ();
  1302. +
  1303. + if (__atomic_add_fetch (v, count, __ATOMIC_SEQ_CST) != 6)
  1304. + abort ();
  1305. +}
  1306. +
  1307. +
  1308. +void
  1309. +test_sub_fetch (short* v)
  1310. +{
  1311. + *v = res = 20;
  1312. + count = 0;
  1313. +
  1314. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED) != --res)
  1315. + abort ();
  1316. +
  1317. + if (__atomic_sub_fetch (v, 1, __ATOMIC_CONSUME) != --res)
  1318. + abort ();
  1319. +
  1320. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQUIRE) != --res)
  1321. + abort ();
  1322. +
  1323. + if (__atomic_sub_fetch (v, 1, __ATOMIC_RELEASE) != --res)
  1324. + abort ();
  1325. +
  1326. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL) != --res)
  1327. + abort ();
  1328. +
  1329. + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_SEQ_CST) != --res)
  1330. + abort ();
  1331. +}
  1332. +
  1333. +void
  1334. +test_and_fetch (short* v)
  1335. +{
  1336. + *v = init;
  1337. +
  1338. + if (__atomic_and_fetch (v, 0, __ATOMIC_RELAXED) != 0)
  1339. + abort ();
  1340. +
  1341. + *v = init;
  1342. + if (__atomic_and_fetch (v, init, __ATOMIC_CONSUME) != init)
  1343. + abort ();
  1344. +
  1345. + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
  1346. + abort ();
  1347. +
  1348. + *v = ~*v;
  1349. + if (__atomic_and_fetch (v, init, __ATOMIC_RELEASE) != init)
  1350. + abort ();
  1351. +
  1352. + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL) != 0)
  1353. + abort ();
  1354. +
  1355. + *v = ~*v;
  1356. + if (__atomic_and_fetch (v, 0, __ATOMIC_SEQ_CST) != 0)
  1357. + abort ();
  1358. +}
  1359. +
  1360. +void
  1361. +test_nand_fetch (short* v)
  1362. +{
  1363. + *v = init;
  1364. +
  1365. + if (__atomic_nand_fetch (v, 0, __ATOMIC_RELAXED) != init)
  1366. + abort ();
  1367. +
  1368. + if (__atomic_nand_fetch (v, init, __ATOMIC_CONSUME) != 0)
  1369. + abort ();
  1370. +
  1371. + if (__atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE) != init)
  1372. + abort ();
  1373. +
  1374. + if (__atomic_nand_fetch (v, init, __ATOMIC_RELEASE) != 0)
  1375. + abort ();
  1376. +
  1377. + if (__atomic_nand_fetch (v, init, __ATOMIC_ACQ_REL) != init)
  1378. + abort ();
  1379. +
  1380. + if (__atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST) != init)
  1381. + abort ();
  1382. +}
  1383. +
  1384. +
  1385. +
  1386. +void
  1387. +test_xor_fetch (short* v)
  1388. +{
  1389. + *v = init;
  1390. + count = 0;
  1391. +
  1392. + if (__atomic_xor_fetch (v, count, __ATOMIC_RELAXED) != init)
  1393. + abort ();
  1394. +
  1395. + if (__atomic_xor_fetch (v, ~count, __ATOMIC_CONSUME) != 0)
  1396. + abort ();
  1397. +
  1398. + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
  1399. + abort ();
  1400. +
  1401. + if (__atomic_xor_fetch (v, ~count, __ATOMIC_RELEASE) != init)
  1402. + abort ();
  1403. +
  1404. + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQ_REL) != init)
  1405. + abort ();
  1406. +
  1407. + if (__atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST) != 0)
  1408. + abort ();
  1409. +}
  1410. +
  1411. +void
  1412. +test_or_fetch (short* v)
  1413. +{
  1414. + *v = 0;
  1415. + count = 1;
  1416. +
  1417. + if (__atomic_or_fetch (v, count, __ATOMIC_RELAXED) != 1)
  1418. + abort ();
  1419. +
  1420. + count *= 2;
  1421. + if (__atomic_or_fetch (v, 2, __ATOMIC_CONSUME) != 3)
  1422. + abort ();
  1423. +
  1424. + count *= 2;
  1425. + if (__atomic_or_fetch (v, count, __ATOMIC_ACQUIRE) != 7)
  1426. + abort ();
  1427. +
  1428. + count *= 2;
  1429. + if (__atomic_or_fetch (v, 8, __ATOMIC_RELEASE) != 15)
  1430. + abort ();
  1431. +
  1432. + count *= 2;
  1433. + if (__atomic_or_fetch (v, count, __ATOMIC_ACQ_REL) != 31)
  1434. + abort ();
  1435. +
  1436. + count *= 2;
  1437. + if (__atomic_or_fetch (v, count, __ATOMIC_SEQ_CST) != 63)
  1438. + abort ();
  1439. +}
  1440. +
  1441. +
  1442. +/* Test the OP routines with a result which isn't used. Use both variations
  1443. + within each function. */
  1444. +
  1445. +void
  1446. +test_add (short* v)
  1447. +{
  1448. + *v = 0;
  1449. + count = 1;
  1450. +
  1451. + __atomic_add_fetch (v, count, __ATOMIC_RELAXED);
  1452. + if (*v != 1)
  1453. + abort ();
  1454. +
  1455. + __atomic_fetch_add (v, count, __ATOMIC_CONSUME);
  1456. + if (*v != 2)
  1457. + abort ();
  1458. +
  1459. + __atomic_add_fetch (v, 1 , __ATOMIC_ACQUIRE);
  1460. + if (*v != 3)
  1461. + abort ();
  1462. +
  1463. + __atomic_fetch_add (v, 1, __ATOMIC_RELEASE);
  1464. + if (*v != 4)
  1465. + abort ();
  1466. +
  1467. + __atomic_add_fetch (v, count, __ATOMIC_ACQ_REL);
  1468. + if (*v != 5)
  1469. + abort ();
  1470. +
  1471. + __atomic_fetch_add (v, count, __ATOMIC_SEQ_CST);
  1472. + if (*v != 6)
  1473. + abort ();
  1474. +}
  1475. +
  1476. +
  1477. +void
  1478. +test_sub (short* v)
  1479. +{
  1480. + *v = res = 20;
  1481. + count = 0;
  1482. +
  1483. + __atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED);
  1484. + if (*v != --res)
  1485. + abort ();
  1486. +
  1487. + __atomic_fetch_sub (v, count + 1, __ATOMIC_CONSUME);
  1488. + if (*v != --res)
  1489. + abort ();
  1490. +
  1491. + __atomic_sub_fetch (v, 1, __ATOMIC_ACQUIRE);
  1492. + if (*v != --res)
  1493. + abort ();
  1494. +
  1495. + __atomic_fetch_sub (v, 1, __ATOMIC_RELEASE);
  1496. + if (*v != --res)
  1497. + abort ();
  1498. +
  1499. + __atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL);
  1500. + if (*v != --res)
  1501. + abort ();
  1502. +
  1503. + __atomic_fetch_sub (v, count + 1, __ATOMIC_SEQ_CST);
  1504. + if (*v != --res)
  1505. + abort ();
  1506. +}
  1507. +
  1508. +void
  1509. +test_and (short* v)
  1510. +{
  1511. + *v = init;
  1512. +
  1513. + __atomic_and_fetch (v, 0, __ATOMIC_RELAXED);
  1514. + if (*v != 0)
  1515. + abort ();
  1516. +
  1517. + *v = init;
  1518. + __atomic_fetch_and (v, init, __ATOMIC_CONSUME);
  1519. + if (*v != init)
  1520. + abort ();
  1521. +
  1522. + __atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE);
  1523. + if (*v != 0)
  1524. + abort ();
  1525. +
  1526. + *v = ~*v;
  1527. + __atomic_fetch_and (v, init, __ATOMIC_RELEASE);
  1528. + if (*v != init)
  1529. + abort ();
  1530. +
  1531. + __atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL);
  1532. + if (*v != 0)
  1533. + abort ();
  1534. +
  1535. + *v = ~*v;
  1536. + __atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST);
  1537. + if (*v != 0)
  1538. + abort ();
  1539. +}
  1540. +
  1541. +void
  1542. +test_nand (short* v)
  1543. +{
  1544. + *v = init;
  1545. +
  1546. + __atomic_fetch_nand (v, 0, __ATOMIC_RELAXED);
  1547. + if (*v != init)
  1548. + abort ();
  1549. +
  1550. + __atomic_fetch_nand (v, init, __ATOMIC_CONSUME);
  1551. + if (*v != 0)
  1552. + abort ();
  1553. +
  1554. + __atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE);
  1555. + if (*v != init)
  1556. + abort ();
  1557. +
  1558. + __atomic_nand_fetch (v, init, __ATOMIC_RELEASE);
  1559. + if (*v != 0)
  1560. + abort ();
  1561. +
  1562. + __atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL);
  1563. + if (*v != init)
  1564. + abort ();
  1565. +
  1566. + __atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST);
  1567. + if (*v != init)
  1568. + abort ();
  1569. +}
  1570. +
  1571. +
  1572. +
  1573. +void
  1574. +test_xor (short* v)
  1575. +{
  1576. + *v = init;
  1577. + count = 0;
  1578. +
  1579. + __atomic_xor_fetch (v, count, __ATOMIC_RELAXED);
  1580. + if (*v != init)
  1581. + abort ();
  1582. +
  1583. + __atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME);
  1584. + if (*v != 0)
  1585. + abort ();
  1586. +
  1587. + __atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE);
  1588. + if (*v != 0)
  1589. + abort ();
  1590. +
  1591. + __atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE);
  1592. + if (*v != init)
  1593. + abort ();
  1594. +
  1595. + __atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL);
  1596. + if (*v != init)
  1597. + abort ();
  1598. +
  1599. + __atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST);
  1600. + if (*v != 0)
  1601. + abort ();
  1602. +}
  1603. +
  1604. +void
  1605. +test_or (short* v)
  1606. +{
  1607. + *v = 0;
  1608. + count = 1;
  1609. +
  1610. + __atomic_or_fetch (v, count, __ATOMIC_RELAXED);
  1611. + if (*v != 1)
  1612. + abort ();
  1613. +
  1614. + count *= 2;
  1615. + __atomic_fetch_or (v, count, __ATOMIC_CONSUME);
  1616. + if (*v != 3)
  1617. + abort ();
  1618. +
  1619. + count *= 2;
  1620. + __atomic_or_fetch (v, 4, __ATOMIC_ACQUIRE);
  1621. + if (*v != 7)
  1622. + abort ();
  1623. +
  1624. + count *= 2;
  1625. + __atomic_fetch_or (v, 8, __ATOMIC_RELEASE);
  1626. + if (*v != 15)
  1627. + abort ();
  1628. +
  1629. + count *= 2;
  1630. + __atomic_or_fetch (v, count, __ATOMIC_ACQ_REL);
  1631. + if (*v != 31)
  1632. + abort ();
  1633. +
  1634. + count *= 2;
  1635. + __atomic_fetch_or (v, count, __ATOMIC_SEQ_CST);
  1636. + if (*v != 63)
  1637. + abort ();
  1638. +}
  1639. +
  1640. +int
  1641. +main () {
  1642. + short* V[] = {&A.a, &A.b};
  1643. +
  1644. + for (int i = 0; i < 2; i++) {
  1645. + test_fetch_add (V[i]);
  1646. + test_fetch_sub (V[i]);
  1647. + test_fetch_and (V[i]);
  1648. + test_fetch_nand (V[i]);
  1649. + test_fetch_xor (V[i]);
  1650. + test_fetch_or (V[i]);
  1651. +
  1652. + test_add_fetch (V[i]);
  1653. + test_sub_fetch (V[i]);
  1654. + test_and_fetch (V[i]);
  1655. + test_nand_fetch (V[i]);
  1656. + test_xor_fetch (V[i]);
  1657. + test_or_fetch (V[i]);
  1658. +
  1659. + test_add (V[i]);
  1660. + test_sub (V[i]);
  1661. + test_and (V[i]);
  1662. + test_nand (V[i]);
  1663. + test_xor (V[i]);
  1664. + test_or (V[i]);
  1665. + }
  1666. +
  1667. + return 0;
  1668. +}
  1669. --- /dev/null
  1670. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-5.c
  1671. @@ -0,0 +1,87 @@
  1672. +/* Test __atomic routines for existence and proper execution on 1 byte
  1673. + values with each valid memory model. */
  1674. +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-compare-exchange-1.c */
  1675. +/* { dg-do run } */
  1676. +/* { dg-options "-minline-atomics" } */
  1677. +
  1678. +/* Test the execution of the __atomic_compare_exchange_n builtin for a char. */
  1679. +
  1680. +extern void abort(void);
  1681. +
  1682. +char v = 0;
  1683. +char expected = 0;
  1684. +char max = ~0;
  1685. +char desired = ~0;
  1686. +char zero = 0;
  1687. +
  1688. +#define STRONG 0
  1689. +#define WEAK 1
  1690. +
  1691. +int
  1692. +main ()
  1693. +{
  1694. +
  1695. + if (!__atomic_compare_exchange_n (&v, &expected, max, STRONG , __ATOMIC_RELAXED, __ATOMIC_RELAXED))
  1696. + abort ();
  1697. + if (expected != 0)
  1698. + abort ();
  1699. +
  1700. + if (__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
  1701. + abort ();
  1702. + if (expected != max)
  1703. + abort ();
  1704. +
  1705. + if (!__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
  1706. + abort ();
  1707. + if (expected != max)
  1708. + abort ();
  1709. + if (v != 0)
  1710. + abort ();
  1711. +
  1712. + if (__atomic_compare_exchange_n (&v, &expected, desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
  1713. + abort ();
  1714. + if (expected != 0)
  1715. + abort ();
  1716. +
  1717. + if (!__atomic_compare_exchange_n (&v, &expected, desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
  1718. + abort ();
  1719. + if (expected != 0)
  1720. + abort ();
  1721. + if (v != max)
  1722. + abort ();
  1723. +
  1724. + /* Now test the generic version. */
  1725. +
  1726. + v = 0;
  1727. +
  1728. + if (!__atomic_compare_exchange (&v, &expected, &max, STRONG, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
  1729. + abort ();
  1730. + if (expected != 0)
  1731. + abort ();
  1732. +
  1733. + if (__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
  1734. + abort ();
  1735. + if (expected != max)
  1736. + abort ();
  1737. +
  1738. + if (!__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
  1739. + abort ();
  1740. + if (expected != max)
  1741. + abort ();
  1742. + if (v != 0)
  1743. + abort ();
  1744. +
  1745. + if (__atomic_compare_exchange (&v, &expected, &desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
  1746. + abort ();
  1747. + if (expected != 0)
  1748. + abort ();
  1749. +
  1750. + if (!__atomic_compare_exchange (&v, &expected, &desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
  1751. + abort ();
  1752. + if (expected != 0)
  1753. + abort ();
  1754. + if (v != max)
  1755. + abort ();
  1756. +
  1757. + return 0;
  1758. +}
  1759. --- /dev/null
  1760. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-6.c
  1761. @@ -0,0 +1,87 @@
  1762. +/* Test __atomic routines for existence and proper execution on 2 byte
  1763. + values with each valid memory model. */
  1764. +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-compare-exchange-2.c */
  1765. +/* { dg-do run } */
  1766. +/* { dg-options "-minline-atomics" } */
  1767. +
  1768. +/* Test the execution of the __atomic_compare_exchange_n builtin for a short. */
  1769. +
  1770. +extern void abort(void);
  1771. +
  1772. +short v = 0;
  1773. +short expected = 0;
  1774. +short max = ~0;
  1775. +short desired = ~0;
  1776. +short zero = 0;
  1777. +
  1778. +#define STRONG 0
  1779. +#define WEAK 1
  1780. +
  1781. +int
  1782. +main ()
  1783. +{
  1784. +
  1785. + if (!__atomic_compare_exchange_n (&v, &expected, max, STRONG , __ATOMIC_RELAXED, __ATOMIC_RELAXED))
  1786. + abort ();
  1787. + if (expected != 0)
  1788. + abort ();
  1789. +
  1790. + if (__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
  1791. + abort ();
  1792. + if (expected != max)
  1793. + abort ();
  1794. +
  1795. + if (!__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
  1796. + abort ();
  1797. + if (expected != max)
  1798. + abort ();
  1799. + if (v != 0)
  1800. + abort ();
  1801. +
  1802. + if (__atomic_compare_exchange_n (&v, &expected, desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
  1803. + abort ();
  1804. + if (expected != 0)
  1805. + abort ();
  1806. +
  1807. + if (!__atomic_compare_exchange_n (&v, &expected, desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
  1808. + abort ();
  1809. + if (expected != 0)
  1810. + abort ();
  1811. + if (v != max)
  1812. + abort ();
  1813. +
  1814. + /* Now test the generic version. */
  1815. +
  1816. + v = 0;
  1817. +
  1818. + if (!__atomic_compare_exchange (&v, &expected, &max, STRONG, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
  1819. + abort ();
  1820. + if (expected != 0)
  1821. + abort ();
  1822. +
  1823. + if (__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
  1824. + abort ();
  1825. + if (expected != max)
  1826. + abort ();
  1827. +
  1828. + if (!__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
  1829. + abort ();
  1830. + if (expected != max)
  1831. + abort ();
  1832. + if (v != 0)
  1833. + abort ();
  1834. +
  1835. + if (__atomic_compare_exchange (&v, &expected, &desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
  1836. + abort ();
  1837. + if (expected != 0)
  1838. + abort ();
  1839. +
  1840. + if (!__atomic_compare_exchange (&v, &expected, &desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
  1841. + abort ();
  1842. + if (expected != 0)
  1843. + abort ();
  1844. + if (v != max)
  1845. + abort ();
  1846. +
  1847. + return 0;
  1848. +}
  1849. --- /dev/null
  1850. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-7.c
  1851. @@ -0,0 +1,69 @@
  1852. +/* Test __atomic routines for existence and proper execution on 1 byte
  1853. + values with each valid memory model. */
  1854. +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-exchange-1.c */
  1855. +/* { dg-do run } */
  1856. +/* { dg-options "-minline-atomics" } */
  1857. +
  1858. +/* Test the execution of the __atomic_exchange_n builtin for a char. */
  1859. +
  1860. +extern void abort(void);
  1861. +
  1862. +char v, count, ret;
  1863. +
  1864. +int
  1865. +main ()
  1866. +{
  1867. + v = 0;
  1868. + count = 0;
  1869. +
  1870. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELAXED) != count)
  1871. + abort ();
  1872. + count++;
  1873. +
  1874. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQUIRE) != count)
  1875. + abort ();
  1876. + count++;
  1877. +
  1878. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELEASE) != count)
  1879. + abort ();
  1880. + count++;
  1881. +
  1882. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQ_REL) != count)
  1883. + abort ();
  1884. + count++;
  1885. +
  1886. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_SEQ_CST) != count)
  1887. + abort ();
  1888. + count++;
  1889. +
  1890. + /* Now test the generic version. */
  1891. +
  1892. + count++;
  1893. +
  1894. + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELAXED);
  1895. + if (ret != count - 1 || v != count)
  1896. + abort ();
  1897. + count++;
  1898. +
  1899. + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQUIRE);
  1900. + if (ret != count - 1 || v != count)
  1901. + abort ();
  1902. + count++;
  1903. +
  1904. + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELEASE);
  1905. + if (ret != count - 1 || v != count)
  1906. + abort ();
  1907. + count++;
  1908. +
  1909. + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQ_REL);
  1910. + if (ret != count - 1 || v != count)
  1911. + abort ();
  1912. + count++;
  1913. +
  1914. + __atomic_exchange (&v, &count, &ret, __ATOMIC_SEQ_CST);
  1915. + if (ret != count - 1 || v != count)
  1916. + abort ();
  1917. + count++;
  1918. +
  1919. + return 0;
  1920. +}
  1921. --- /dev/null
  1922. +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-8.c
  1923. @@ -0,0 +1,69 @@
  1924. +/* Test __atomic routines for existence and proper execution on 2 byte
  1925. + values with each valid memory model. */
  1926. +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-exchange-2.c */
  1927. +/* { dg-do run } */
  1928. +/* { dg-options "-minline-atomics" } */
  1929. +
  1930. +/* Test the execution of the __atomic_X builtin for a short. */
  1931. +
  1932. +extern void abort(void);
  1933. +
  1934. +short v, count, ret;
  1935. +
  1936. +int
  1937. +main ()
  1938. +{
  1939. + v = 0;
  1940. + count = 0;
  1941. +
  1942. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELAXED) != count)
  1943. + abort ();
  1944. + count++;
  1945. +
  1946. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQUIRE) != count)
  1947. + abort ();
  1948. + count++;
  1949. +
  1950. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELEASE) != count)
  1951. + abort ();
  1952. + count++;
  1953. +
  1954. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQ_REL) != count)
  1955. + abort ();
  1956. + count++;
  1957. +
  1958. + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_SEQ_CST) != count)
  1959. + abort ();
  1960. + count++;
  1961. +
  1962. + /* Now test the generic version. */
  1963. +
  1964. + count++;
  1965. +
  1966. + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELAXED);
  1967. + if (ret != count - 1 || v != count)
  1968. + abort ();
  1969. + count++;
  1970. +
  1971. + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQUIRE);
  1972. + if (ret != count - 1 || v != count)
  1973. + abort ();
  1974. + count++;
  1975. +
  1976. + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELEASE);
  1977. + if (ret != count - 1 || v != count)
  1978. + abort ();
  1979. + count++;
  1980. +
  1981. + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQ_REL);
  1982. + if (ret != count - 1 || v != count)
  1983. + abort ();
  1984. + count++;
  1985. +
  1986. + __atomic_exchange (&v, &count, &ret, __ATOMIC_SEQ_CST);
  1987. + if (ret != count - 1 || v != count)
  1988. + abort ();
  1989. + count++;
  1990. +
  1991. + return 0;
  1992. +}
  1993. --- a/libgcc/config/riscv/atomic.c
  1994. +++ b/libgcc/config/riscv/atomic.c
  1995. @@ -30,6 +30,8 @@ see the files COPYING3 and COPYING.RUNTI
  1996. #define INVERT "not %[tmp1], %[tmp1]\n\t"
  1997. #define DONT_INVERT ""
  1998. +/* Logic duplicated in gcc/gcc/config/riscv/sync.md for use when inlining is enabled */
  1999. +
  2000. #define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop) \
  2001. type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v) \
  2002. { \