12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021 |
- From f797260adaf52bee0ec0e16190bbefbe1bfc3692 Mon Sep 17 00:00:00 2001
- From: Patrick O'Neill <patrick@rivosinc.com>
- Date: Tue, 18 Apr 2023 14:33:13 -0700
- Subject: [PATCH] RISCV: Inline subword atomic ops
- RISC-V has no support for subword atomic operations; code currently
- generates libatomic library calls.
- This patch changes the default behavior to inline subword atomic calls
- (using the same logic as the existing library call).
- Behavior can be specified using the -minline-atomics and
- -mno-inline-atomics command line flags.
- gcc/libgcc/config/riscv/atomic.c has the same logic implemented in asm.
- This will need to stay for backwards compatibility and the
- -mno-inline-atomics flag.
- 2023-04-18 Patrick O'Neill <patrick@rivosinc.com>
- gcc/ChangeLog:
- PR target/104338
- * config/riscv/riscv-protos.h: Add helper function stubs.
- * config/riscv/riscv.cc: Add helper functions for subword masking.
- * config/riscv/riscv.opt: Add command-line flag.
- * config/riscv/sync.md: Add masking logic and inline asm for fetch_and_op,
- fetch_and_nand, CAS, and exchange ops.
- * doc/invoke.texi: Add blurb regarding command-line flag.
- libgcc/ChangeLog:
- PR target/104338
- * config/riscv/atomic.c: Add reference to duplicate logic.
- gcc/testsuite/ChangeLog:
- PR target/104338
- * gcc.target/riscv/inline-atomics-1.c: New test.
- * gcc.target/riscv/inline-atomics-2.c: New test.
- * gcc.target/riscv/inline-atomics-3.c: New test.
- * gcc.target/riscv/inline-atomics-4.c: New test.
- * gcc.target/riscv/inline-atomics-5.c: New test.
- * gcc.target/riscv/inline-atomics-6.c: New test.
- * gcc.target/riscv/inline-atomics-7.c: New test.
- * gcc.target/riscv/inline-atomics-8.c: New test.
- Signed-off-by: Patrick O'Neill <patrick@rivosinc.com>
- Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
- ---
- gcc/config/riscv/riscv-protos.h | 2 +
- gcc/config/riscv/riscv.cc | 49 ++
- gcc/config/riscv/riscv.opt | 4 +
- gcc/config/riscv/sync.md | 301 +++++++++
- gcc/doc/invoke.texi | 10 +-
- .../gcc.target/riscv/inline-atomics-1.c | 18 +
- .../gcc.target/riscv/inline-atomics-2.c | 9 +
- .../gcc.target/riscv/inline-atomics-3.c | 569 ++++++++++++++++++
- .../gcc.target/riscv/inline-atomics-4.c | 566 +++++++++++++++++
- .../gcc.target/riscv/inline-atomics-5.c | 87 +++
- .../gcc.target/riscv/inline-atomics-6.c | 87 +++
- .../gcc.target/riscv/inline-atomics-7.c | 69 +++
- .../gcc.target/riscv/inline-atomics-8.c | 69 +++
- libgcc/config/riscv/atomic.c | 2 +
- 14 files changed, 1841 insertions(+), 1 deletion(-)
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-1.c
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-2.c
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-3.c
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-4.c
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-5.c
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-6.c
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-7.c
- create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-8.c
- --- a/gcc/config/riscv/riscv-protos.h
- +++ b/gcc/config/riscv/riscv-protos.h
- @@ -79,6 +79,8 @@ extern void riscv_reinit (void);
- extern poly_uint64 riscv_regmode_natural_size (machine_mode);
- extern bool riscv_v_ext_vector_mode_p (machine_mode);
- extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT);
- +extern void riscv_subword_address (rtx, rtx *, rtx *, rtx *, rtx *);
- +extern void riscv_lshift_subword (machine_mode, rtx, rtx, rtx *);
-
- /* Routines implemented in riscv-c.cc. */
- void riscv_cpu_cpp_builtins (cpp_reader *);
- --- a/gcc/config/riscv/riscv.cc
- +++ b/gcc/config/riscv/riscv.cc
- @@ -7143,6 +7143,55 @@ riscv_zero_call_used_regs (HARD_REG_SET
- & ~zeroed_hardregs);
- }
-
- +/* Given memory reference MEM, expand code to compute the aligned
- + memory address, shift and mask values and store them into
- + *ALIGNED_MEM, *SHIFT, *MASK and *NOT_MASK. */
- +
- +void
- +riscv_subword_address (rtx mem, rtx *aligned_mem, rtx *shift, rtx *mask,
- + rtx *not_mask)
- +{
- + /* Align the memory address to a word. */
- + rtx addr = force_reg (Pmode, XEXP (mem, 0));
- +
- + rtx addr_mask = gen_int_mode (-4, Pmode);
- +
- + rtx aligned_addr = gen_reg_rtx (Pmode);
- + emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, addr_mask));
- +
- + *aligned_mem = change_address (mem, SImode, aligned_addr);
- +
- + /* Calculate the shift amount. */
- + emit_move_insn (*shift, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
- + gen_int_mode (3, SImode)));
- + emit_move_insn (*shift, gen_rtx_ASHIFT (SImode, *shift,
- + gen_int_mode (3, SImode)));
- +
- + /* Calculate the mask. */
- + int unshifted_mask = GET_MODE_MASK (GET_MODE (mem));
- +
- + emit_move_insn (*mask, gen_int_mode (unshifted_mask, SImode));
- +
- + emit_move_insn (*mask, gen_rtx_ASHIFT (SImode, *mask,
- + gen_lowpart (QImode, *shift)));
- +
- + emit_move_insn (*not_mask, gen_rtx_NOT(SImode, *mask));
- +}
- +
- +/* Leftshift a subword within an SImode register. */
- +
- +void
- +riscv_lshift_subword (machine_mode mode, rtx value, rtx shift,
- + rtx *shifted_value)
- +{
- + rtx value_reg = gen_reg_rtx (SImode);
- + emit_move_insn (value_reg, simplify_gen_subreg (SImode, value,
- + mode, 0));
- +
- + emit_move_insn(*shifted_value, gen_rtx_ASHIFT (SImode, value_reg,
- + gen_lowpart (QImode, shift)));
- +}
- +
- /* Initialize the GCC target structure. */
- #undef TARGET_ASM_ALIGNED_HI_OP
- #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
- --- a/gcc/config/riscv/riscv.opt
- +++ b/gcc/config/riscv/riscv.opt
- @@ -238,6 +238,10 @@ int riscv_sv_subext
- TargetVariable
- int riscv_xthead_subext
-
- +minline-atomics
- +Target Var(TARGET_INLINE_SUBWORD_ATOMIC) Init(1)
- +Always inline subword atomic operations.
- +
- Enum
- Name(isa_spec_class) Type(enum riscv_isa_spec_class)
- Supported ISA specs (for use with the -misa-spec= option):
- --- a/gcc/config/riscv/sync.md
- +++ b/gcc/config/riscv/sync.md
- @@ -21,8 +21,11 @@
-
- (define_c_enum "unspec" [
- UNSPEC_COMPARE_AND_SWAP
- + UNSPEC_COMPARE_AND_SWAP_SUBWORD
- UNSPEC_SYNC_OLD_OP
- + UNSPEC_SYNC_OLD_OP_SUBWORD
- UNSPEC_SYNC_EXCHANGE
- + UNSPEC_SYNC_EXCHANGE_SUBWORD
- UNSPEC_ATOMIC_STORE
- UNSPEC_MEMORY_BARRIER
- ])
- @@ -91,6 +94,135 @@
- [(set_attr "type" "atomic")
- (set (attr "length") (const_int 8))])
-
- +(define_insn "subword_atomic_fetch_strong_<atomic_optab>"
- + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
- + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(any_atomic:SI (match_dup 1)
- + (match_operand:SI 2 "register_operand" "rI")) ;; value for op
- + (match_operand:SI 3 "register_operand" "rI")] ;; mask
- + UNSPEC_SYNC_OLD_OP_SUBWORD))
- + (match_operand:SI 4 "register_operand" "rI") ;; not_mask
- + (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1
- + (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- + {
- + return "1:\;"
- + "lr.w.aq\t%0, %1\;"
- + "<insn>\t%5, %0, %2\;"
- + "and\t%5, %5, %3\;"
- + "and\t%6, %0, %4\;"
- + "or\t%6, %6, %5\;"
- + "sc.w.rl\t%5, %6, %1\;"
- + "bnez\t%5, 1b";
- + }
- + [(set (attr "length") (const_int 28))])
- +
- +(define_expand "atomic_fetch_nand<mode>"
- + [(match_operand:SHORT 0 "register_operand") ;; old value at mem
- + (not:SHORT (and:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
- + (match_operand:SHORT 2 "reg_or_0_operand"))) ;; value for op
- + (match_operand:SI 3 "const_int_operand")] ;; model
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- +{
- + /* We have no QImode/HImode atomics, so form a mask, then use
- + subword_atomic_fetch_strong_nand to implement a LR/SC version of the
- + operation. */
- +
- + /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
- + is disabled */
- +
- + rtx old = gen_reg_rtx (SImode);
- + rtx mem = operands[1];
- + rtx value = operands[2];
- + rtx aligned_mem = gen_reg_rtx (SImode);
- + rtx shift = gen_reg_rtx (SImode);
- + rtx mask = gen_reg_rtx (SImode);
- + rtx not_mask = gen_reg_rtx (SImode);
- +
- + riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask);
- +
- + rtx shifted_value = gen_reg_rtx (SImode);
- + riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
- +
- + emit_insn (gen_subword_atomic_fetch_strong_nand (old, aligned_mem,
- + shifted_value,
- + mask, not_mask));
- +
- + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
- + gen_lowpart (QImode, shift)));
- +
- + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
- +
- + DONE;
- +})
- +
- +(define_insn "subword_atomic_fetch_strong_nand"
- + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
- + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(not:SI (and:SI (match_dup 1)
- + (match_operand:SI 2 "register_operand" "rI"))) ;; value for op
- + (match_operand:SI 3 "register_operand" "rI")] ;; mask
- + UNSPEC_SYNC_OLD_OP_SUBWORD))
- + (match_operand:SI 4 "register_operand" "rI") ;; not_mask
- + (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1
- + (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- + {
- + return "1:\;"
- + "lr.w.aq\t%0, %1\;"
- + "and\t%5, %0, %2\;"
- + "not\t%5, %5\;"
- + "and\t%5, %5, %3\;"
- + "and\t%6, %0, %4\;"
- + "or\t%6, %6, %5\;"
- + "sc.w.rl\t%5, %6, %1\;"
- + "bnez\t%5, 1b";
- + }
- + [(set (attr "length") (const_int 32))])
- +
- +(define_expand "atomic_fetch_<atomic_optab><mode>"
- + [(match_operand:SHORT 0 "register_operand") ;; old value at mem
- + (any_atomic:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location
- + (match_operand:SHORT 2 "reg_or_0_operand")) ;; value for op
- + (match_operand:SI 3 "const_int_operand")] ;; model
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- +{
- + /* We have no QImode/HImode atomics, so form a mask, then use
- + subword_atomic_fetch_strong_<mode> to implement a LR/SC version of the
- + operation. */
- +
- + /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
- + is disabled */
- +
- + rtx old = gen_reg_rtx (SImode);
- + rtx mem = operands[1];
- + rtx value = operands[2];
- + rtx aligned_mem = gen_reg_rtx (SImode);
- + rtx shift = gen_reg_rtx (SImode);
- + rtx mask = gen_reg_rtx (SImode);
- + rtx not_mask = gen_reg_rtx (SImode);
- +
- + riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask);
- +
- + rtx shifted_value = gen_reg_rtx (SImode);
- + riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
- +
- + emit_insn (gen_subword_atomic_fetch_strong_<atomic_optab> (old, aligned_mem,
- + shifted_value,
- + mask, not_mask));
- +
- + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
- + gen_lowpart (QImode, shift)));
- +
- + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
- +
- + DONE;
- +})
- +
- (define_insn "atomic_exchange<mode>"
- [(set (match_operand:GPR 0 "register_operand" "=&r")
- (unspec_volatile:GPR
- @@ -104,6 +236,56 @@
- [(set_attr "type" "atomic")
- (set (attr "length") (const_int 8))])
-
- +(define_expand "atomic_exchange<mode>"
- + [(match_operand:SHORT 0 "register_operand") ;; old value at mem
- + (match_operand:SHORT 1 "memory_operand") ;; mem location
- + (match_operand:SHORT 2 "register_operand") ;; value
- + (match_operand:SI 3 "const_int_operand")] ;; model
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- +{
- + rtx old = gen_reg_rtx (SImode);
- + rtx mem = operands[1];
- + rtx value = operands[2];
- + rtx aligned_mem = gen_reg_rtx (SImode);
- + rtx shift = gen_reg_rtx (SImode);
- + rtx mask = gen_reg_rtx (SImode);
- + rtx not_mask = gen_reg_rtx (SImode);
- +
- + riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask);
- +
- + rtx shifted_value = gen_reg_rtx (SImode);
- + riscv_lshift_subword (<MODE>mode, value, shift, &shifted_value);
- +
- + emit_insn (gen_subword_atomic_exchange_strong (old, aligned_mem,
- + shifted_value, not_mask));
- +
- + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
- + gen_lowpart (QImode, shift)));
- +
- + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
- + DONE;
- +})
- +
- +(define_insn "subword_atomic_exchange_strong"
- + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
- + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
- + (set (match_dup 1)
- + (unspec_volatile:SI
- + [(match_operand:SI 2 "reg_or_0_operand" "rI") ;; value
- + (match_operand:SI 3 "reg_or_0_operand" "rI")] ;; not_mask
- + UNSPEC_SYNC_EXCHANGE_SUBWORD))
- + (clobber (match_scratch:SI 4 "=&r"))] ;; tmp_1
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- + {
- + return "1:\;"
- + "lr.w.aq\t%0, %1\;"
- + "and\t%4, %0, %3\;"
- + "or\t%4, %4, %2\;"
- + "sc.w.rl\t%4, %4, %1\;"
- + "bnez\t%4, 1b";
- + }
- + [(set (attr "length") (const_int 20))])
- +
- (define_insn "atomic_cas_value_strong<mode>"
- [(set (match_operand:GPR 0 "register_operand" "=&r")
- (match_operand:GPR 1 "memory_operand" "+A"))
- @@ -153,6 +335,125 @@
- DONE;
- })
-
- +(define_expand "atomic_compare_and_swap<mode>"
- + [(match_operand:SI 0 "register_operand") ;; bool output
- + (match_operand:SHORT 1 "register_operand") ;; val output
- + (match_operand:SHORT 2 "memory_operand") ;; memory
- + (match_operand:SHORT 3 "reg_or_0_operand") ;; expected value
- + (match_operand:SHORT 4 "reg_or_0_operand") ;; desired value
- + (match_operand:SI 5 "const_int_operand") ;; is_weak
- + (match_operand:SI 6 "const_int_operand") ;; mod_s
- + (match_operand:SI 7 "const_int_operand")] ;; mod_f
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- +{
- + emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
- + operands[3], operands[4],
- + operands[6], operands[7]));
- +
- + rtx val = gen_reg_rtx (SImode);
- + if (operands[1] != const0_rtx)
- + emit_move_insn (val, gen_rtx_SIGN_EXTEND (SImode, operands[1]));
- + else
- + emit_move_insn (val, const0_rtx);
- +
- + rtx exp = gen_reg_rtx (SImode);
- + if (operands[3] != const0_rtx)
- + emit_move_insn (exp, gen_rtx_SIGN_EXTEND (SImode, operands[3]));
- + else
- + emit_move_insn (exp, const0_rtx);
- +
- + rtx compare = val;
- + if (exp != const0_rtx)
- + {
- + rtx difference = gen_rtx_MINUS (SImode, val, exp);
- + compare = gen_reg_rtx (SImode);
- + emit_move_insn (compare, difference);
- + }
- +
- + if (word_mode != SImode)
- + {
- + rtx reg = gen_reg_rtx (word_mode);
- + emit_move_insn (reg, gen_rtx_SIGN_EXTEND (word_mode, compare));
- + compare = reg;
- + }
- +
- + emit_move_insn (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx));
- + DONE;
- +})
- +
- +(define_expand "atomic_cas_value_strong<mode>"
- + [(match_operand:SHORT 0 "register_operand") ;; val output
- + (match_operand:SHORT 1 "memory_operand") ;; memory
- + (match_operand:SHORT 2 "reg_or_0_operand") ;; expected value
- + (match_operand:SHORT 3 "reg_or_0_operand") ;; desired value
- + (match_operand:SI 4 "const_int_operand") ;; mod_s
- + (match_operand:SI 5 "const_int_operand") ;; mod_f
- + (match_scratch:SHORT 6)]
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- +{
- + /* We have no QImode/HImode atomics, so form a mask, then use
- + subword_atomic_cas_strong<mode> to implement a LR/SC version of the
- + operation. */
- +
- + /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining
- + is disabled */
- +
- + rtx old = gen_reg_rtx (SImode);
- + rtx mem = operands[1];
- + rtx aligned_mem = gen_reg_rtx (SImode);
- + rtx shift = gen_reg_rtx (SImode);
- + rtx mask = gen_reg_rtx (SImode);
- + rtx not_mask = gen_reg_rtx (SImode);
- +
- + riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask);
- +
- + rtx o = operands[2];
- + rtx n = operands[3];
- + rtx shifted_o = gen_reg_rtx (SImode);
- + rtx shifted_n = gen_reg_rtx (SImode);
- +
- + riscv_lshift_subword (<MODE>mode, o, shift, &shifted_o);
- + riscv_lshift_subword (<MODE>mode, n, shift, &shifted_n);
- +
- + emit_move_insn (shifted_o, gen_rtx_AND (SImode, shifted_o, mask));
- + emit_move_insn (shifted_n, gen_rtx_AND (SImode, shifted_n, mask));
- +
- + emit_insn (gen_subword_atomic_cas_strong (old, aligned_mem,
- + shifted_o, shifted_n,
- + mask, not_mask));
- +
- + emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old,
- + gen_lowpart (QImode, shift)));
- +
- + emit_move_insn (operands[0], gen_lowpart (<MODE>mode, old));
- +
- + DONE;
- +})
- +
- +(define_insn "subword_atomic_cas_strong"
- + [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem
- + (match_operand:SI 1 "memory_operand" "+A")) ;; mem location
- + (set (match_dup 1)
- + (unspec_volatile:SI [(match_operand:SI 2 "reg_or_0_operand" "rJ") ;; expected value
- + (match_operand:SI 3 "reg_or_0_operand" "rJ")] ;; desired value
- + UNSPEC_COMPARE_AND_SWAP_SUBWORD))
- + (match_operand:SI 4 "register_operand" "rI") ;; mask
- + (match_operand:SI 5 "register_operand" "rI") ;; not_mask
- + (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_1
- + "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC"
- + {
- + return "1:\;"
- + "lr.w.aq\t%0, %1\;"
- + "and\t%6, %0, %4\;"
- + "bne\t%6, %z2, 1f\;"
- + "and\t%6, %0, %5\;"
- + "or\t%6, %6, %3\;"
- + "sc.w.rl\t%6, %6, %1\;"
- + "bnez\t%6, 1b\;"
- + "1:";
- + }
- + [(set (attr "length") (const_int 28))])
- +
- (define_expand "atomic_test_and_set"
- [(match_operand:QI 0 "register_operand" "") ;; bool output
- (match_operand:QI 1 "memory_operand" "+A") ;; memory
- --- a/gcc/doc/invoke.texi
- +++ b/gcc/doc/invoke.texi
- @@ -1226,7 +1226,8 @@ See RS/6000 and PowerPC Options.
- -mbig-endian -mlittle-endian
- -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{reg}
- -mstack-protector-guard-offset=@var{offset}
- --mcsr-check -mno-csr-check}
- +-mcsr-check -mno-csr-check
- +-minline-atomics -mno-inline-atomics}
-
- @emph{RL78 Options}
- @gccoptlist{-msim -mmul=none -mmul=g13 -mmul=g14 -mallregs
- @@ -29006,6 +29007,13 @@ Do or don't use smaller but slower prolo
- library function calls. The default is to use fast inline prologues and
- epilogues.
-
- +@opindex minline-atomics
- +@item -minline-atomics
- +@itemx -mno-inline-atomics
- +Do or don't use smaller but slower subword atomic emulation code that uses
- +libatomic function calls. The default is to use fast inline subword atomics
- +that do not require libatomic.
- +
- @opindex mshorten-memrefs
- @item -mshorten-memrefs
- @itemx -mno-shorten-memrefs
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-1.c
- @@ -0,0 +1,18 @@
- +/* { dg-do compile } */
- +/* { dg-options "-mno-inline-atomics" } */
- +/* { dg-message "note: '__sync_fetch_and_nand' changed semantics in GCC 4.4" "fetch_and_nand" { target *-*-* } 0 } */
- +/* { dg-final { scan-assembler "\tcall\t__sync_fetch_and_add_1" } } */
- +/* { dg-final { scan-assembler "\tcall\t__sync_fetch_and_nand_1" } } */
- +/* { dg-final { scan-assembler "\tcall\t__sync_bool_compare_and_swap_1" } } */
- +
- +char foo;
- +char bar;
- +char baz;
- +
- +int
- +main ()
- +{
- + __sync_fetch_and_add(&foo, 1);
- + __sync_fetch_and_nand(&bar, 1);
- + __sync_bool_compare_and_swap (&baz, 1, 2);
- +}
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-2.c
- @@ -0,0 +1,9 @@
- +/* { dg-do compile } */
- +/* Verify that subword atomics do not generate calls. */
- +/* { dg-options "-minline-atomics" } */
- +/* { dg-message "note: '__sync_fetch_and_nand' changed semantics in GCC 4.4" "fetch_and_nand" { target *-*-* } 0 } */
- +/* { dg-final { scan-assembler-not "\tcall\t__sync_fetch_and_add_1" } } */
- +/* { dg-final { scan-assembler-not "\tcall\t__sync_fetch_and_nand_1" } } */
- +/* { dg-final { scan-assembler-not "\tcall\t__sync_bool_compare_and_swap_1" } } */
- +
- +#include "inline-atomics-1.c"
- \ No newline at end of file
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-3.c
- @@ -0,0 +1,569 @@
- +/* Check all char alignments. */
- +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-op-1.c */
- +/* Test __atomic routines for existence and proper execution on 1 byte
- + values with each valid memory model. */
- +/* { dg-do run } */
- +/* { dg-options "-minline-atomics -Wno-address-of-packed-member" } */
- +
- +/* Test the execution of the __atomic_*OP builtin routines for a char. */
- +
- +extern void abort(void);
- +
- +char count, res;
- +const char init = ~0;
- +
- +struct A
- +{
- + char a;
- + char b;
- + char c;
- + char d;
- +} __attribute__ ((packed)) A;
- +
- +/* The fetch_op routines return the original value before the operation. */
- +
- +void
- +test_fetch_add (char* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_fetch_add (v, count, __ATOMIC_RELAXED) != 0)
- + abort ();
- +
- + if (__atomic_fetch_add (v, 1, __ATOMIC_CONSUME) != 1)
- + abort ();
- +
- + if (__atomic_fetch_add (v, count, __ATOMIC_ACQUIRE) != 2)
- + abort ();
- +
- + if (__atomic_fetch_add (v, 1, __ATOMIC_RELEASE) != 3)
- + abort ();
- +
- + if (__atomic_fetch_add (v, count, __ATOMIC_ACQ_REL) != 4)
- + abort ();
- +
- + if (__atomic_fetch_add (v, 1, __ATOMIC_SEQ_CST) != 5)
- + abort ();
- +}
- +
- +
- +void
- +test_fetch_sub (char* v)
- +{
- + *v = res = 20;
- + count = 0;
- +
- + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_RELAXED) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, 1, __ATOMIC_CONSUME) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQUIRE) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, 1, __ATOMIC_RELEASE) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQ_REL) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, 1, __ATOMIC_SEQ_CST) != res--)
- + abort ();
- +}
- +
- +void
- +test_fetch_and (char* v)
- +{
- + *v = init;
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_fetch_and (v, init, __ATOMIC_CONSUME) != 0)
- + abort ();
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + *v = ~*v;
- + if (__atomic_fetch_and (v, init, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST) != 0)
- + abort ();
- +}
- +
- +void
- +test_fetch_nand (char* v)
- +{
- + *v = init;
- +
- + if (__atomic_fetch_nand (v, 0, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, init, __ATOMIC_CONSUME) != init)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, 0, __ATOMIC_ACQUIRE) != 0 )
- + abort ();
- +
- + if (__atomic_fetch_nand (v, init, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL) != 0)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, 0, __ATOMIC_SEQ_CST) != init)
- + abort ();
- +}
- +
- +void
- +test_fetch_xor (char* v)
- +{
- + *v = init;
- + count = 0;
- +
- + if (__atomic_fetch_xor (v, count, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME) != init)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE) != 0)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, ~count, __ATOMIC_SEQ_CST) != init)
- + abort ();
- +}
- +
- +void
- +test_fetch_or (char* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_fetch_or (v, count, __ATOMIC_RELAXED) != 0)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, 2, __ATOMIC_CONSUME) != 1)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, count, __ATOMIC_ACQUIRE) != 3)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, 8, __ATOMIC_RELEASE) != 7)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, count, __ATOMIC_ACQ_REL) != 15)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, count, __ATOMIC_SEQ_CST) != 31)
- + abort ();
- +}
- +
- +/* The OP_fetch routines return the new value after the operation. */
- +
- +void
- +test_add_fetch (char* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_RELAXED) != 1)
- + abort ();
- +
- + if (__atomic_add_fetch (v, 1, __ATOMIC_CONSUME) != 2)
- + abort ();
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_ACQUIRE) != 3)
- + abort ();
- +
- + if (__atomic_add_fetch (v, 1, __ATOMIC_RELEASE) != 4)
- + abort ();
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_ACQ_REL) != 5)
- + abort ();
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_SEQ_CST) != 6)
- + abort ();
- +}
- +
- +
- +void
- +test_sub_fetch (char* v)
- +{
- + *v = res = 20;
- + count = 0;
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, 1, __ATOMIC_CONSUME) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQUIRE) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, 1, __ATOMIC_RELEASE) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_SEQ_CST) != --res)
- + abort ();
- +}
- +
- +void
- +test_and_fetch (char* v)
- +{
- + *v = init;
- +
- + if (__atomic_and_fetch (v, 0, __ATOMIC_RELAXED) != 0)
- + abort ();
- +
- + *v = init;
- + if (__atomic_and_fetch (v, init, __ATOMIC_CONSUME) != init)
- + abort ();
- +
- + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + *v = ~*v;
- + if (__atomic_and_fetch (v, init, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL) != 0)
- + abort ();
- +
- + *v = ~*v;
- + if (__atomic_and_fetch (v, 0, __ATOMIC_SEQ_CST) != 0)
- + abort ();
- +}
- +
- +void
- +test_nand_fetch (char* v)
- +{
- + *v = init;
- +
- + if (__atomic_nand_fetch (v, 0, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, init, __ATOMIC_CONSUME) != 0)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE) != init)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, init, __ATOMIC_RELEASE) != 0)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, init, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST) != init)
- + abort ();
- +}
- +
- +
- +
- +void
- +test_xor_fetch (char* v)
- +{
- + *v = init;
- + count = 0;
- +
- + if (__atomic_xor_fetch (v, count, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, ~count, __ATOMIC_CONSUME) != 0)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, ~count, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST) != 0)
- + abort ();
- +}
- +
- +void
- +test_or_fetch (char* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_or_fetch (v, count, __ATOMIC_RELAXED) != 1)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, 2, __ATOMIC_CONSUME) != 3)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, count, __ATOMIC_ACQUIRE) != 7)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, 8, __ATOMIC_RELEASE) != 15)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, count, __ATOMIC_ACQ_REL) != 31)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, count, __ATOMIC_SEQ_CST) != 63)
- + abort ();
- +}
- +
- +
- +/* Test the OP routines with a result which isn't used. Use both variations
- + within each function. */
- +
- +void
- +test_add (char* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + __atomic_add_fetch (v, count, __ATOMIC_RELAXED);
- + if (*v != 1)
- + abort ();
- +
- + __atomic_fetch_add (v, count, __ATOMIC_CONSUME);
- + if (*v != 2)
- + abort ();
- +
- + __atomic_add_fetch (v, 1 , __ATOMIC_ACQUIRE);
- + if (*v != 3)
- + abort ();
- +
- + __atomic_fetch_add (v, 1, __ATOMIC_RELEASE);
- + if (*v != 4)
- + abort ();
- +
- + __atomic_add_fetch (v, count, __ATOMIC_ACQ_REL);
- + if (*v != 5)
- + abort ();
- +
- + __atomic_fetch_add (v, count, __ATOMIC_SEQ_CST);
- + if (*v != 6)
- + abort ();
- +}
- +
- +
- +void
- +test_sub (char* v)
- +{
- + *v = res = 20;
- + count = 0;
- +
- + __atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_fetch_sub (v, count + 1, __ATOMIC_CONSUME);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_sub_fetch (v, 1, __ATOMIC_ACQUIRE);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_fetch_sub (v, 1, __ATOMIC_RELEASE);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_fetch_sub (v, count + 1, __ATOMIC_SEQ_CST);
- + if (*v != --res)
- + abort ();
- +}
- +
- +void
- +test_and (char* v)
- +{
- + *v = init;
- +
- + __atomic_and_fetch (v, 0, __ATOMIC_RELAXED);
- + if (*v != 0)
- + abort ();
- +
- + *v = init;
- + __atomic_fetch_and (v, init, __ATOMIC_CONSUME);
- + if (*v != init)
- + abort ();
- +
- + __atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE);
- + if (*v != 0)
- + abort ();
- +
- + *v = ~*v;
- + __atomic_fetch_and (v, init, __ATOMIC_RELEASE);
- + if (*v != init)
- + abort ();
- +
- + __atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL);
- + if (*v != 0)
- + abort ();
- +
- + *v = ~*v;
- + __atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST);
- + if (*v != 0)
- + abort ();
- +}
- +
- +void
- +test_nand (char* v)
- +{
- + *v = init;
- +
- + __atomic_fetch_nand (v, 0, __ATOMIC_RELAXED);
- + if (*v != init)
- + abort ();
- +
- + __atomic_fetch_nand (v, init, __ATOMIC_CONSUME);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE);
- + if (*v != init)
- + abort ();
- +
- + __atomic_nand_fetch (v, init, __ATOMIC_RELEASE);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL);
- + if (*v != init)
- + abort ();
- +
- + __atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST);
- + if (*v != init)
- + abort ();
- +}
- +
- +
- +
- +void
- +test_xor (char* v)
- +{
- + *v = init;
- + count = 0;
- +
- + __atomic_xor_fetch (v, count, __ATOMIC_RELAXED);
- + if (*v != init)
- + abort ();
- +
- + __atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE);
- + if (*v != init)
- + abort ();
- +
- + __atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL);
- + if (*v != init)
- + abort ();
- +
- + __atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST);
- + if (*v != 0)
- + abort ();
- +}
- +
- +void
- +test_or (char* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + __atomic_or_fetch (v, count, __ATOMIC_RELAXED);
- + if (*v != 1)
- + abort ();
- +
- + count *= 2;
- + __atomic_fetch_or (v, count, __ATOMIC_CONSUME);
- + if (*v != 3)
- + abort ();
- +
- + count *= 2;
- + __atomic_or_fetch (v, 4, __ATOMIC_ACQUIRE);
- + if (*v != 7)
- + abort ();
- +
- + count *= 2;
- + __atomic_fetch_or (v, 8, __ATOMIC_RELEASE);
- + if (*v != 15)
- + abort ();
- +
- + count *= 2;
- + __atomic_or_fetch (v, count, __ATOMIC_ACQ_REL);
- + if (*v != 31)
- + abort ();
- +
- + count *= 2;
- + __atomic_fetch_or (v, count, __ATOMIC_SEQ_CST);
- + if (*v != 63)
- + abort ();
- +}
- +
- +int
- +main ()
- +{
- + char* V[] = {&A.a, &A.b, &A.c, &A.d};
- +
- + for (int i = 0; i < 4; i++) {
- + test_fetch_add (V[i]);
- + test_fetch_sub (V[i]);
- + test_fetch_and (V[i]);
- + test_fetch_nand (V[i]);
- + test_fetch_xor (V[i]);
- + test_fetch_or (V[i]);
- +
- + test_add_fetch (V[i]);
- + test_sub_fetch (V[i]);
- + test_and_fetch (V[i]);
- + test_nand_fetch (V[i]);
- + test_xor_fetch (V[i]);
- + test_or_fetch (V[i]);
- +
- + test_add (V[i]);
- + test_sub (V[i]);
- + test_and (V[i]);
- + test_nand (V[i]);
- + test_xor (V[i]);
- + test_or (V[i]);
- + }
- +
- + return 0;
- +}
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-4.c
- @@ -0,0 +1,566 @@
- +/* Check all short alignments. */
- +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-op-2.c */
- +/* Test __atomic routines for existence and proper execution on 2 byte
- + values with each valid memory model. */
- +/* { dg-do run } */
- +/* { dg-options "-minline-atomics -Wno-address-of-packed-member" } */
- +
- +/* Test the execution of the __atomic_*OP builtin routines for a short. */
- +
- +extern void abort(void);
- +
- +short count, res;
- +const short init = ~0;
- +
- +struct A
- +{
- + short a;
- + short b;
- +} __attribute__ ((packed)) A;
- +
- +/* The fetch_op routines return the original value before the operation. */
- +
- +void
- +test_fetch_add (short* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_fetch_add (v, count, __ATOMIC_RELAXED) != 0)
- + abort ();
- +
- + if (__atomic_fetch_add (v, 1, __ATOMIC_CONSUME) != 1)
- + abort ();
- +
- + if (__atomic_fetch_add (v, count, __ATOMIC_ACQUIRE) != 2)
- + abort ();
- +
- + if (__atomic_fetch_add (v, 1, __ATOMIC_RELEASE) != 3)
- + abort ();
- +
- + if (__atomic_fetch_add (v, count, __ATOMIC_ACQ_REL) != 4)
- + abort ();
- +
- + if (__atomic_fetch_add (v, 1, __ATOMIC_SEQ_CST) != 5)
- + abort ();
- +}
- +
- +
- +void
- +test_fetch_sub (short* v)
- +{
- + *v = res = 20;
- + count = 0;
- +
- + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_RELAXED) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, 1, __ATOMIC_CONSUME) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQUIRE) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, 1, __ATOMIC_RELEASE) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQ_REL) != res--)
- + abort ();
- +
- + if (__atomic_fetch_sub (v, 1, __ATOMIC_SEQ_CST) != res--)
- + abort ();
- +}
- +
- +void
- +test_fetch_and (short* v)
- +{
- + *v = init;
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_fetch_and (v, init, __ATOMIC_CONSUME) != 0)
- + abort ();
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + *v = ~*v;
- + if (__atomic_fetch_and (v, init, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST) != 0)
- + abort ();
- +}
- +
- +void
- +test_fetch_nand (short* v)
- +{
- + *v = init;
- +
- + if (__atomic_fetch_nand (v, 0, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, init, __ATOMIC_CONSUME) != init)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, 0, __ATOMIC_ACQUIRE) != 0 )
- + abort ();
- +
- + if (__atomic_fetch_nand (v, init, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL) != 0)
- + abort ();
- +
- + if (__atomic_fetch_nand (v, 0, __ATOMIC_SEQ_CST) != init)
- + abort ();
- +}
- +
- +void
- +test_fetch_xor (short* v)
- +{
- + *v = init;
- + count = 0;
- +
- + if (__atomic_fetch_xor (v, count, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME) != init)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE) != 0)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_fetch_xor (v, ~count, __ATOMIC_SEQ_CST) != init)
- + abort ();
- +}
- +
- +void
- +test_fetch_or (short* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_fetch_or (v, count, __ATOMIC_RELAXED) != 0)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, 2, __ATOMIC_CONSUME) != 1)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, count, __ATOMIC_ACQUIRE) != 3)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, 8, __ATOMIC_RELEASE) != 7)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, count, __ATOMIC_ACQ_REL) != 15)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_fetch_or (v, count, __ATOMIC_SEQ_CST) != 31)
- + abort ();
- +}
- +
- +/* The OP_fetch routines return the new value after the operation. */
- +
- +void
- +test_add_fetch (short* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_RELAXED) != 1)
- + abort ();
- +
- + if (__atomic_add_fetch (v, 1, __ATOMIC_CONSUME) != 2)
- + abort ();
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_ACQUIRE) != 3)
- + abort ();
- +
- + if (__atomic_add_fetch (v, 1, __ATOMIC_RELEASE) != 4)
- + abort ();
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_ACQ_REL) != 5)
- + abort ();
- +
- + if (__atomic_add_fetch (v, count, __ATOMIC_SEQ_CST) != 6)
- + abort ();
- +}
- +
- +
- +void
- +test_sub_fetch (short* v)
- +{
- + *v = res = 20;
- + count = 0;
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, 1, __ATOMIC_CONSUME) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQUIRE) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, 1, __ATOMIC_RELEASE) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL) != --res)
- + abort ();
- +
- + if (__atomic_sub_fetch (v, count + 1, __ATOMIC_SEQ_CST) != --res)
- + abort ();
- +}
- +
- +void
- +test_and_fetch (short* v)
- +{
- + *v = init;
- +
- + if (__atomic_and_fetch (v, 0, __ATOMIC_RELAXED) != 0)
- + abort ();
- +
- + *v = init;
- + if (__atomic_and_fetch (v, init, __ATOMIC_CONSUME) != init)
- + abort ();
- +
- + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + *v = ~*v;
- + if (__atomic_and_fetch (v, init, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL) != 0)
- + abort ();
- +
- + *v = ~*v;
- + if (__atomic_and_fetch (v, 0, __ATOMIC_SEQ_CST) != 0)
- + abort ();
- +}
- +
- +void
- +test_nand_fetch (short* v)
- +{
- + *v = init;
- +
- + if (__atomic_nand_fetch (v, 0, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, init, __ATOMIC_CONSUME) != 0)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE) != init)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, init, __ATOMIC_RELEASE) != 0)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, init, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST) != init)
- + abort ();
- +}
- +
- +
- +
- +void
- +test_xor_fetch (short* v)
- +{
- + *v = init;
- + count = 0;
- +
- + if (__atomic_xor_fetch (v, count, __ATOMIC_RELAXED) != init)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, ~count, __ATOMIC_CONSUME) != 0)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE) != 0)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, ~count, __ATOMIC_RELEASE) != init)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQ_REL) != init)
- + abort ();
- +
- + if (__atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST) != 0)
- + abort ();
- +}
- +
- +void
- +test_or_fetch (short* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + if (__atomic_or_fetch (v, count, __ATOMIC_RELAXED) != 1)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, 2, __ATOMIC_CONSUME) != 3)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, count, __ATOMIC_ACQUIRE) != 7)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, 8, __ATOMIC_RELEASE) != 15)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, count, __ATOMIC_ACQ_REL) != 31)
- + abort ();
- +
- + count *= 2;
- + if (__atomic_or_fetch (v, count, __ATOMIC_SEQ_CST) != 63)
- + abort ();
- +}
- +
- +
- +/* Test the OP routines with a result which isn't used. Use both variations
- + within each function. */
- +
- +void
- +test_add (short* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + __atomic_add_fetch (v, count, __ATOMIC_RELAXED);
- + if (*v != 1)
- + abort ();
- +
- + __atomic_fetch_add (v, count, __ATOMIC_CONSUME);
- + if (*v != 2)
- + abort ();
- +
- + __atomic_add_fetch (v, 1 , __ATOMIC_ACQUIRE);
- + if (*v != 3)
- + abort ();
- +
- + __atomic_fetch_add (v, 1, __ATOMIC_RELEASE);
- + if (*v != 4)
- + abort ();
- +
- + __atomic_add_fetch (v, count, __ATOMIC_ACQ_REL);
- + if (*v != 5)
- + abort ();
- +
- + __atomic_fetch_add (v, count, __ATOMIC_SEQ_CST);
- + if (*v != 6)
- + abort ();
- +}
- +
- +
- +void
- +test_sub (short* v)
- +{
- + *v = res = 20;
- + count = 0;
- +
- + __atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_fetch_sub (v, count + 1, __ATOMIC_CONSUME);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_sub_fetch (v, 1, __ATOMIC_ACQUIRE);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_fetch_sub (v, 1, __ATOMIC_RELEASE);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL);
- + if (*v != --res)
- + abort ();
- +
- + __atomic_fetch_sub (v, count + 1, __ATOMIC_SEQ_CST);
- + if (*v != --res)
- + abort ();
- +}
- +
- +void
- +test_and (short* v)
- +{
- + *v = init;
- +
- + __atomic_and_fetch (v, 0, __ATOMIC_RELAXED);
- + if (*v != 0)
- + abort ();
- +
- + *v = init;
- + __atomic_fetch_and (v, init, __ATOMIC_CONSUME);
- + if (*v != init)
- + abort ();
- +
- + __atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE);
- + if (*v != 0)
- + abort ();
- +
- + *v = ~*v;
- + __atomic_fetch_and (v, init, __ATOMIC_RELEASE);
- + if (*v != init)
- + abort ();
- +
- + __atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL);
- + if (*v != 0)
- + abort ();
- +
- + *v = ~*v;
- + __atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST);
- + if (*v != 0)
- + abort ();
- +}
- +
- +void
- +test_nand (short* v)
- +{
- + *v = init;
- +
- + __atomic_fetch_nand (v, 0, __ATOMIC_RELAXED);
- + if (*v != init)
- + abort ();
- +
- + __atomic_fetch_nand (v, init, __ATOMIC_CONSUME);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE);
- + if (*v != init)
- + abort ();
- +
- + __atomic_nand_fetch (v, init, __ATOMIC_RELEASE);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL);
- + if (*v != init)
- + abort ();
- +
- + __atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST);
- + if (*v != init)
- + abort ();
- +}
- +
- +
- +
- +void
- +test_xor (short* v)
- +{
- + *v = init;
- + count = 0;
- +
- + __atomic_xor_fetch (v, count, __ATOMIC_RELAXED);
- + if (*v != init)
- + abort ();
- +
- + __atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE);
- + if (*v != 0)
- + abort ();
- +
- + __atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE);
- + if (*v != init)
- + abort ();
- +
- + __atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL);
- + if (*v != init)
- + abort ();
- +
- + __atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST);
- + if (*v != 0)
- + abort ();
- +}
- +
- +void
- +test_or (short* v)
- +{
- + *v = 0;
- + count = 1;
- +
- + __atomic_or_fetch (v, count, __ATOMIC_RELAXED);
- + if (*v != 1)
- + abort ();
- +
- + count *= 2;
- + __atomic_fetch_or (v, count, __ATOMIC_CONSUME);
- + if (*v != 3)
- + abort ();
- +
- + count *= 2;
- + __atomic_or_fetch (v, 4, __ATOMIC_ACQUIRE);
- + if (*v != 7)
- + abort ();
- +
- + count *= 2;
- + __atomic_fetch_or (v, 8, __ATOMIC_RELEASE);
- + if (*v != 15)
- + abort ();
- +
- + count *= 2;
- + __atomic_or_fetch (v, count, __ATOMIC_ACQ_REL);
- + if (*v != 31)
- + abort ();
- +
- + count *= 2;
- + __atomic_fetch_or (v, count, __ATOMIC_SEQ_CST);
- + if (*v != 63)
- + abort ();
- +}
- +
- +int
- +main () {
- + short* V[] = {&A.a, &A.b};
- +
- + for (int i = 0; i < 2; i++) {
- + test_fetch_add (V[i]);
- + test_fetch_sub (V[i]);
- + test_fetch_and (V[i]);
- + test_fetch_nand (V[i]);
- + test_fetch_xor (V[i]);
- + test_fetch_or (V[i]);
- +
- + test_add_fetch (V[i]);
- + test_sub_fetch (V[i]);
- + test_and_fetch (V[i]);
- + test_nand_fetch (V[i]);
- + test_xor_fetch (V[i]);
- + test_or_fetch (V[i]);
- +
- + test_add (V[i]);
- + test_sub (V[i]);
- + test_and (V[i]);
- + test_nand (V[i]);
- + test_xor (V[i]);
- + test_or (V[i]);
- + }
- +
- + return 0;
- +}
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-5.c
- @@ -0,0 +1,87 @@
- +/* Test __atomic routines for existence and proper execution on 1 byte
- + values with each valid memory model. */
- +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-compare-exchange-1.c */
- +/* { dg-do run } */
- +/* { dg-options "-minline-atomics" } */
- +
- +/* Test the execution of the __atomic_compare_exchange_n builtin for a char. */
- +
- +extern void abort(void);
- +
- +char v = 0;
- +char expected = 0;
- +char max = ~0;
- +char desired = ~0;
- +char zero = 0;
- +
- +#define STRONG 0
- +#define WEAK 1
- +
- +int
- +main ()
- +{
- +
- + if (!__atomic_compare_exchange_n (&v, &expected, max, STRONG , __ATOMIC_RELAXED, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != max)
- + abort ();
- +
- + if (!__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != max)
- + abort ();
- + if (v != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange_n (&v, &expected, desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (!__atomic_compare_exchange_n (&v, &expected, desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
- + abort ();
- + if (expected != 0)
- + abort ();
- + if (v != max)
- + abort ();
- +
- + /* Now test the generic version. */
- +
- + v = 0;
- +
- + if (!__atomic_compare_exchange (&v, &expected, &max, STRONG, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != max)
- + abort ();
- +
- + if (!__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != max)
- + abort ();
- + if (v != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange (&v, &expected, &desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (!__atomic_compare_exchange (&v, &expected, &desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
- + abort ();
- + if (expected != 0)
- + abort ();
- + if (v != max)
- + abort ();
- +
- + return 0;
- +}
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-6.c
- @@ -0,0 +1,87 @@
- +/* Test __atomic routines for existence and proper execution on 2 byte
- + values with each valid memory model. */
- +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-compare-exchange-2.c */
- +/* { dg-do run } */
- +/* { dg-options "-minline-atomics" } */
- +
- +/* Test the execution of the __atomic_compare_exchange_n builtin for a short. */
- +
- +extern void abort(void);
- +
- +short v = 0;
- +short expected = 0;
- +short max = ~0;
- +short desired = ~0;
- +short zero = 0;
- +
- +#define STRONG 0
- +#define WEAK 1
- +
- +int
- +main ()
- +{
- +
- + if (!__atomic_compare_exchange_n (&v, &expected, max, STRONG , __ATOMIC_RELAXED, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != max)
- + abort ();
- +
- + if (!__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != max)
- + abort ();
- + if (v != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange_n (&v, &expected, desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (!__atomic_compare_exchange_n (&v, &expected, desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
- + abort ();
- + if (expected != 0)
- + abort ();
- + if (v != max)
- + abort ();
- +
- + /* Now test the generic version. */
- +
- + v = 0;
- +
- + if (!__atomic_compare_exchange (&v, &expected, &max, STRONG, __ATOMIC_RELAXED, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
- + abort ();
- + if (expected != max)
- + abort ();
- +
- + if (!__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != max)
- + abort ();
- + if (v != 0)
- + abort ();
- +
- + if (__atomic_compare_exchange (&v, &expected, &desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE))
- + abort ();
- + if (expected != 0)
- + abort ();
- +
- + if (!__atomic_compare_exchange (&v, &expected, &desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
- + abort ();
- + if (expected != 0)
- + abort ();
- + if (v != max)
- + abort ();
- +
- + return 0;
- +}
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-7.c
- @@ -0,0 +1,69 @@
- +/* Test __atomic routines for existence and proper execution on 1 byte
- + values with each valid memory model. */
- +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-exchange-1.c */
- +/* { dg-do run } */
- +/* { dg-options "-minline-atomics" } */
- +
- +/* Test the execution of the __atomic_exchange_n builtin for a char. */
- +
- +extern void abort(void);
- +
- +char v, count, ret;
- +
- +int
- +main ()
- +{
- + v = 0;
- + count = 0;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELAXED) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQUIRE) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELEASE) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQ_REL) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_SEQ_CST) != count)
- + abort ();
- + count++;
- +
- + /* Now test the generic version. */
- +
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELAXED);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQUIRE);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELEASE);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQ_REL);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_SEQ_CST);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + return 0;
- +}
- --- /dev/null
- +++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-8.c
- @@ -0,0 +1,69 @@
- +/* Test __atomic routines for existence and proper execution on 2 byte
- + values with each valid memory model. */
- +/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-exchange-2.c */
- +/* { dg-do run } */
- +/* { dg-options "-minline-atomics" } */
- +
- +/* Test the execution of the __atomic_X builtin for a short. */
- +
- +extern void abort(void);
- +
- +short v, count, ret;
- +
- +int
- +main ()
- +{
- + v = 0;
- + count = 0;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELAXED) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQUIRE) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELEASE) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQ_REL) != count)
- + abort ();
- + count++;
- +
- + if (__atomic_exchange_n (&v, count + 1, __ATOMIC_SEQ_CST) != count)
- + abort ();
- + count++;
- +
- + /* Now test the generic version. */
- +
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELAXED);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQUIRE);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_RELEASE);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQ_REL);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + __atomic_exchange (&v, &count, &ret, __ATOMIC_SEQ_CST);
- + if (ret != count - 1 || v != count)
- + abort ();
- + count++;
- +
- + return 0;
- +}
- --- a/libgcc/config/riscv/atomic.c
- +++ b/libgcc/config/riscv/atomic.c
- @@ -30,6 +30,8 @@ see the files COPYING3 and COPYING.RUNTI
- #define INVERT "not %[tmp1], %[tmp1]\n\t"
- #define DONT_INVERT ""
-
- +/* Logic duplicated in gcc/gcc/config/riscv/sync.md for use when inlining is enabled */
- +
- #define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop) \
- type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v) \
- { \
|