diff mbox series

[AArch64,v3,5/6] aarch64: Implement -matomic-ool

Message ID 20181101214648.29432-6-richard.henderson@linaro.org
State Superseded
Headers show
Series LSE atomics out-of-line | expand

Commit Message

Richard Henderson Nov. 1, 2018, 9:46 p.m. UTC
* config/aarch64/aarch64.opt (-matomic-ool): New.
	* config/aarch64/aarch64.c (aarch64_atomic_ool_func): New.
	(aarch64_ool_cas_names, aarch64_ool_swp_names): New.
	(aarch64_ool_ldadd_names, aarch64_ool_ldset_names): New.
	(aarch64_ool_ldclr_names, aarch64_ool_ldeor_names): New.
	(aarch64_expand_compare_and_swap): Honor TARGET_ATOMIC_OOL.
	* config/aarch64/atomics.md (atomic_exchange<ALLI>): Likewise.
	(atomic_<atomic_op><ALLI>): Likewise.
	(atomic_fetch_<atomic_op><ALLI>): Likewise.
	(atomic_<atomic_op>_fetch<ALLI>): Likewise.
---
 gcc/config/aarch64/aarch64-protos.h           | 13 +++
 gcc/config/aarch64/aarch64.c                  | 87 +++++++++++++++++
 .../atomic-comp-swap-release-acquire.c        |  2 +-
 .../gcc.target/aarch64/atomic-op-acq_rel.c    |  2 +-
 .../gcc.target/aarch64/atomic-op-acquire.c    |  2 +-
 .../gcc.target/aarch64/atomic-op-char.c       |  2 +-
 .../gcc.target/aarch64/atomic-op-consume.c    |  2 +-
 .../gcc.target/aarch64/atomic-op-imm.c        |  2 +-
 .../gcc.target/aarch64/atomic-op-int.c        |  2 +-
 .../gcc.target/aarch64/atomic-op-long.c       |  2 +-
 .../gcc.target/aarch64/atomic-op-relaxed.c    |  2 +-
 .../gcc.target/aarch64/atomic-op-release.c    |  2 +-
 .../gcc.target/aarch64/atomic-op-seq_cst.c    |  2 +-
 .../gcc.target/aarch64/atomic-op-short.c      |  2 +-
 .../aarch64/atomic_cmp_exchange_zero_reg_1.c  |  2 +-
 .../atomic_cmp_exchange_zero_strong_1.c       |  2 +-
 .../gcc.target/aarch64/sync-comp-swap.c       |  2 +-
 .../gcc.target/aarch64/sync-op-acquire.c      |  2 +-
 .../gcc.target/aarch64/sync-op-full.c         |  2 +-
 gcc/config/aarch64/aarch64.opt                |  4 +
 gcc/config/aarch64/atomics.md                 | 94 +++++++++++++++++--
 gcc/doc/invoke.texi                           | 14 ++-
 22 files changed, 220 insertions(+), 26 deletions(-)

-- 
2.17.2

Comments

Kyrill Tkachov Sept. 5, 2019, 9:56 a.m. UTC | #1
Hi Richard,

On 11/1/18 9:46 PM, Richard Henderson wrote:
>         * config/aarch64/aarch64.opt (-matomic-ool): New.

>         * config/aarch64/aarch64.c (aarch64_atomic_ool_func): New.

>         (aarch64_ool_cas_names, aarch64_ool_swp_names): New.

>         (aarch64_ool_ldadd_names, aarch64_ool_ldset_names): New.

>         (aarch64_ool_ldclr_names, aarch64_ool_ldeor_names): New.

>         (aarch64_expand_compare_and_swap): Honor TARGET_ATOMIC_OOL.

>         * config/aarch64/atomics.md (atomic_exchange<ALLI>): Likewise.

>         (atomic_<atomic_op><ALLI>): Likewise.

>         (atomic_fetch_<atomic_op><ALLI>): Likewise.

>         (atomic_<atomic_op>_fetch<ALLI>): Likewise.

> ---

>  gcc/config/aarch64/aarch64-protos.h           | 13 +++

>  gcc/config/aarch64/aarch64.c                  | 87 +++++++++++++++++

>  .../atomic-comp-swap-release-acquire.c        |  2 +-

>  .../gcc.target/aarch64/atomic-op-acq_rel.c    |  2 +-

>  .../gcc.target/aarch64/atomic-op-acquire.c    |  2 +-

>  .../gcc.target/aarch64/atomic-op-char.c       |  2 +-

>  .../gcc.target/aarch64/atomic-op-consume.c    |  2 +-

>  .../gcc.target/aarch64/atomic-op-imm.c        |  2 +-

>  .../gcc.target/aarch64/atomic-op-int.c        |  2 +-

>  .../gcc.target/aarch64/atomic-op-long.c       |  2 +-

>  .../gcc.target/aarch64/atomic-op-relaxed.c    |  2 +-

>  .../gcc.target/aarch64/atomic-op-release.c    |  2 +-

>  .../gcc.target/aarch64/atomic-op-seq_cst.c    |  2 +-

>  .../gcc.target/aarch64/atomic-op-short.c      |  2 +-

>  .../aarch64/atomic_cmp_exchange_zero_reg_1.c  |  2 +-

>  .../atomic_cmp_exchange_zero_strong_1.c       |  2 +-

>  .../gcc.target/aarch64/sync-comp-swap.c       |  2 +-

>  .../gcc.target/aarch64/sync-op-acquire.c      |  2 +-

>  .../gcc.target/aarch64/sync-op-full.c         |  2 +-

>  gcc/config/aarch64/aarch64.opt                |  4 +

>  gcc/config/aarch64/atomics.md                 | 94 +++++++++++++++++--

>  gcc/doc/invoke.texi                           | 14 ++-

>  22 files changed, 220 insertions(+), 26 deletions(-)

>

> diff --git a/gcc/config/aarch64/aarch64-protos.h 

> b/gcc/config/aarch64/aarch64-protos.h

> index 1fe1a50d52a..1c1877cd200 100644

> --- a/gcc/config/aarch64/aarch64-protos.h

> +++ b/gcc/config/aarch64/aarch64-protos.h

> @@ -630,4 +630,17 @@ poly_uint64 aarch64_regmode_natural_size 

> (machine_mode);

>

>  bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);

>

> +struct atomic_ool_names

> +{

> +    const char *str[5][4];

> +};

> +

> +rtx aarch64_atomic_ool_func(machine_mode mode, rtx model_rtx,

> +                           const atomic_ool_names *names);

> +extern const atomic_ool_names aarch64_ool_swp_names;

> +extern const atomic_ool_names aarch64_ool_ldadd_names;

> +extern const atomic_ool_names aarch64_ool_ldset_names;

> +extern const atomic_ool_names aarch64_ool_ldclr_names;

> +extern const atomic_ool_names aarch64_ool_ldeor_names;

> +

>  #endif /* GCC_AARCH64_PROTOS_H */

> diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c

> index b29f437aeaf..9ab8b95c344 100644

> --- a/gcc/config/aarch64/aarch64.c

> +++ b/gcc/config/aarch64/aarch64.c

> @@ -14679,6 +14679,82 @@ aarch64_emit_unlikely_jump (rtx insn)

>    add_reg_br_prob_note (jump, profile_probability::very_unlikely ());

>  }

>

> +/* We store the names of the various atomic helpers in a 5x4 array.

> +   Return the libcall function given MODE, MODEL and NAMES.  */

> +

> +rtx

> +aarch64_atomic_ool_func(machine_mode mode, rtx model_rtx,

> +                       const atomic_ool_names *names)

> +{

> +  memmodel model = memmodel_base (INTVAL (model_rtx));

> +  int mode_idx, model_idx;

> +

> +  switch (mode)

> +    {

> +    case E_QImode:

> +      mode_idx = 0;

> +      break;

> +    case E_HImode:

> +      mode_idx = 1;

> +      break;

> +    case E_SImode:

> +      mode_idx = 2;

> +      break;

> +    case E_DImode:

> +      mode_idx = 3;

> +      break;

> +    case E_TImode:

> +      mode_idx = 4;

> +      break;

> +    default:

> +      gcc_unreachable ();

> +    }

> +

> +  switch (model)

> +    {

> +    case MEMMODEL_RELAXED:

> +      model_idx = 0;

> +      break;

> +    case MEMMODEL_CONSUME:

> +    case MEMMODEL_ACQUIRE:

> +      model_idx = 1;

> +      break;

> +    case MEMMODEL_RELEASE:

> +      model_idx = 2;

> +      break;

> +    case MEMMODEL_ACQ_REL:

> +    case MEMMODEL_SEQ_CST:

> +      model_idx = 3;

> +      break;

> +    default:

> +      gcc_unreachable ();

> +    }

> +

> +  return init_one_libfunc_visibility (names->str[mode_idx][model_idx],

> +                                     VISIBILITY_HIDDEN);

> +}

> +

> +#define DEF0(B, N) \

> +  { "__aa64_" #B #N "_relax", \

> +    "__aa64_" #B #N "_acq", \

> +    "__aa64_" #B #N "_rel", \

> +    "__aa64_" #B #N "_acq_rel" }

> +

> +#define DEF4(B)  DEF0(B, 1), DEF0(B, 2), DEF0(B, 4), DEF0(B, 8), \

> +                { NULL, NULL, NULL, NULL }

> +#define DEF5(B)  DEF0(B, 1), DEF0(B, 2), DEF0(B, 4), DEF0(B, 8), 

> DEF0(B, 16)

> +

> +static const atomic_ool_names aarch64_ool_cas_names = { { DEF5(cas) } };

> +const atomic_ool_names aarch64_ool_swp_names = { { DEF4(swp) } };

> +const atomic_ool_names aarch64_ool_ldadd_names = { { DEF4(ldadd) } };

> +const atomic_ool_names aarch64_ool_ldset_names = { { DEF4(ldset) } };

> +const atomic_ool_names aarch64_ool_ldclr_names = { { DEF4(ldclr) } };

> +const atomic_ool_names aarch64_ool_ldeor_names = { { DEF4(ldeor) } };

> +

> +#undef DEF0

> +#undef DEF4

> +#undef DEF5

> +

>  /* Expand a compare and swap pattern.  */

>

>  void

> @@ -14725,6 +14801,17 @@ aarch64_expand_compare_and_swap (rtx operands[])

>                                                     newval, mod_s));

>        cc_reg = aarch64_gen_compare_reg_maybe_ze (NE, rval, oldval, mode);

>      }

> +  else if (TARGET_ATOMIC_OOL)

> +    {

> +      /* Oldval must satisfy compare afterward.  */

> +      if (!aarch64_plus_operand (oldval, mode))

> +       oldval = force_reg (mode, oldval);

> +      rtx func = aarch64_atomic_ool_func (mode, mod_s, 

> &aarch64_ool_cas_names);

> +      rval = emit_library_call_value (func, NULL_RTX, LCT_NORMAL, r_mode,

> +                                     oldval, mode, newval, mode,

> +                                     XEXP (mem, 0), ptr_mode);



As reported at https://gcc.gnu.org/ml/gcc-patches/2019-09/msg00118.html

I've encountered ICEs here with -mabi=ilp32 due to the mode of the 
address of MEM.

    rtx addr = XEXP (mem, 0);
       if (Pmode != ptr_mode)
     addr = convert_memory_address (ptr_mode, addr);

above the emit_library_call_value to force the address into ptr_mode and 
use addr as the argument fixed that for me.

I needed to do similar additions to the other places in the patch that 
call emit_library_call_value.

This allowed an aarch64-none-elf build to succeed (which builds an 
-mabi=ilp32 multilib) but I'm not sure if it's papering over a problem?

Thanks,

Kyrill


> +      cc_reg = aarch64_gen_compare_reg_maybe_ze (NE, rval, oldval, mode);

> +    }

>    else

>      {

>        /* The oldval predicate varies by mode.  Test it and force to 

> reg.  */

> diff --git 

> a/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c

> index 49ca5d0d09c..e92f205c3a8 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf 

> -mno-atomic-ool" } */

>

>  #include "atomic-comp-swap-release-acquire.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c

> index 74f26348e42..6965431f7d9 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-acq_rel.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c

> index 66c1b1efe20..07dbca49d56 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-acquire.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c

> index c09d0434ecf..73bfbb7afc9 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-char.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c

> index 5783ab84f5c..c7945b3a22d 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-consume.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c

> index 18b8f0b04e9..e46bb3de7c1 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  int v = 0;

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c

> index 8520f0839ba..9b55deb5225 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-int.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c

> index d011f8c5ce2..2622f75331f 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  long v = 0;

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c

> index ed96bfdb978..f118a37a352 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-relaxed.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c

> index fc4be17de89..579634b08e8 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-release.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c

> index 613000fe490..016b0d6619f 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-seq_cst.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c

> index e82c8118ece..978bd1d8377 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "atomic-op-short.x"

>

> diff --git 

> a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c

> index f2a21ddf2e1..77430ecdbce 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-O2 -march=armv8-a+nolse" } */

> +/* { dg-options "-O2 -march=armv8-a+nolse -mno-atomic-ool" } */

>  /* { dg-skip-if "" { *-*-* } { "-mcpu=*" } { "" } } */

>

>  int

> diff --git 

> a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c 

> b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c

> index 8d2ae67dfbe..7d58b2f6bd0 100644

> --- a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c

> +++ b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-O2 -march=armv8-a+nolse" } */

> +/* { dg-options "-O2 -march=armv8-a+nolse -mno-atomic-ool" } */

>  /* { dg-skip-if "" { *-*-* } { "-mcpu=*" } { "" } } */

>

>  int

> diff --git a/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c 

> b/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c

> index e571b2f13b3..7fc5885d0fd 100644

> --- a/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c

> +++ b/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf 

> -mno-atomic-ool" } */

>

>  #include "sync-comp-swap.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c 

> b/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c

> index 357bf1be3b2..6ad0daa8998 100644

> --- a/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c

> +++ b/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "sync-op-acquire.x"

>

> diff --git a/gcc/testsuite/gcc.target/aarch64/sync-op-full.c 

> b/gcc/testsuite/gcc.target/aarch64/sync-op-full.c

> index c6ba1629965..9a7afeb70d3 100644

> --- a/gcc/testsuite/gcc.target/aarch64/sync-op-full.c

> +++ b/gcc/testsuite/gcc.target/aarch64/sync-op-full.c

> @@ -1,5 +1,5 @@

>  /* { dg-do compile } */

> -/* { dg-options "-march=armv8-a+nolse -O2" } */

> +/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */

>

>  #include "sync-op-full.x"

>

> diff --git a/gcc/config/aarch64/aarch64.opt 

> b/gcc/config/aarch64/aarch64.opt

> index b2e80cbf6f1..83166834165 100644

> --- a/gcc/config/aarch64/aarch64.opt

> +++ b/gcc/config/aarch64/aarch64.opt

> @@ -218,3 +218,7 @@ Enables verbose cost model dumping in the debug 

> dump files.

>  mtrack-speculation

>  Target Var(aarch64_track_speculation)

>  Generate code to track when the CPU might be speculating incorrectly.

> +

> +matomic-ool

> +Target Report Mask(ATOMIC_OOL) Save

> +Generate local calls to out-of-line atomic operations.

> diff --git a/gcc/config/aarch64/atomics.md b/gcc/config/aarch64/atomics.md

> index 08a3a1ff955..24c1fabee59 100644

> --- a/gcc/config/aarch64/atomics.md

> +++ b/gcc/config/aarch64/atomics.md

> @@ -186,16 +186,27 @@

>    (match_operand:SI 3 "const_int_operand" "")]

>    ""

>    {

> -    rtx (*gen) (rtx, rtx, rtx, rtx);

> -

>      /* Use an atomic SWP when available.  */

>      if (TARGET_LSE)

> -      gen = gen_aarch64_atomic_exchange<mode>_lse;

> +      {

> +       emit_insn (gen_aarch64_atomic_exchange<mode>_lse

> +                  (operands[0], operands[1], operands[2], operands[3]));

> +      }

> +    else if (TARGET_ATOMIC_OOL)

> +      {

> +       machine_mode mode = <MODE>mode;

> +       rtx func = aarch64_atomic_ool_func (mode, operands[3],

> + &aarch64_ool_swp_names);

> +       rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL,

> +                                           mode, operands[2], mode,

> +                                           XEXP (operands[1], 0), 

> ptr_mode);

> +        emit_move_insn (operands[0], rval);

> +      }

>      else

> -      gen = gen_aarch64_atomic_exchange<mode>;

> -

> -    emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));

> -

> +      {

> +       emit_insn (gen_aarch64_atomic_exchange<mode>

> +                  (operands[0], operands[1], operands[2], operands[3]));

> +      }

>      DONE;

>    }

>  )

> @@ -280,6 +291,39 @@

>            }

>          operands[1] = force_reg (<MODE>mode, operands[1]);

>        }

> +    else if (TARGET_ATOMIC_OOL)

> +      {

> +        const atomic_ool_names *names;

> +       switch (<CODE>)

> +         {

> +         case MINUS:

> +           operands[1] = expand_simple_unop (<MODE>mode, NEG, 

> operands[1],

> +                                             NULL, 1);

> +           /* fallthru */

> +         case PLUS:

> +           names = &aarch64_ool_ldadd_names;

> +           break;

> +         case IOR:

> +           names = &aarch64_ool_ldset_names;

> +           break;

> +         case XOR:

> +           names = &aarch64_ool_ldeor_names;

> +           break;

> +         case AND:

> +           operands[1] = expand_simple_unop (<MODE>mode, NOT, 

> operands[1],

> +                                             NULL, 1);

> +           names = &aarch64_ool_ldclr_names;

> +           break;

> +         default:

> +           gcc_unreachable ();

> +         }

> +        machine_mode mode = <MODE>mode;

> +       rtx func = aarch64_atomic_ool_func (mode, operands[2], names);

> +       emit_library_call_value (func, NULL_RTX, LCT_NORMAL, mode,

> +                                operands[1], mode,

> +                                XEXP (operands[0], 0), ptr_mode);

> +        DONE;

> +      }

>      else

>        gen = gen_aarch64_atomic_<atomic_optab><mode>;

>

> @@ -405,6 +449,40 @@

>          }

>        operands[2] = force_reg (<MODE>mode, operands[2]);

>      }

> +  else if (TARGET_ATOMIC_OOL)

> +    {

> +      const atomic_ool_names *names;

> +      switch (<CODE>)

> +       {

> +       case MINUS:

> +         operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],

> +                                           NULL, 1);

> +         /* fallthru */

> +       case PLUS:

> +         names = &aarch64_ool_ldadd_names;

> +         break;

> +       case IOR:

> +         names = &aarch64_ool_ldset_names;

> +         break;

> +       case XOR:

> +         names = &aarch64_ool_ldeor_names;

> +         break;

> +       case AND:

> +         operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],

> +                                           NULL, 1);

> +         names = &aarch64_ool_ldclr_names;

> +         break;

> +       default:

> +         gcc_unreachable ();

> +       }

> +      machine_mode mode = <MODE>mode;

> +      rtx func = aarch64_atomic_ool_func (mode, operands[3], names);

> +      rtx rval = emit_library_call_value (func, operands[0], 

> LCT_NORMAL, mode,

> +                                         operands[2], mode,

> +                                         XEXP (operands[1], 0), 

> ptr_mode);

> +      emit_move_insn (operands[0], rval);

> +      DONE;

> +    }

>    else

>      gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;

>

> @@ -494,7 +572,7 @@

>  {

>    /* Use an atomic load-operate instruction when possible. In this case

>       we will re-compute the result from the original mem value. */

> -  if (TARGET_LSE)

> +  if (TARGET_LSE || TARGET_ATOMIC_OOL)

>      {

>        rtx tmp = gen_reg_rtx (<MODE>mode);

>        operands[2] = force_reg (<MODE>mode, operands[2]);

> diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi

> index 284594df010..70bd0d0a0a1 100644

> --- a/gcc/doc/invoke.texi

> +++ b/gcc/doc/invoke.texi

> @@ -623,7 +623,7 @@ Objective-C and Objective-C++ Dialects}.

>  -mpc-relative-literal-loads @gol

>  -msign-return-address=@var{scope} @gol

>  -march=@var{name}  -mcpu=@var{name}  -mtune=@var{name} @gol

> --moverride=@var{string}  -mverbose-cost-dump -mtrack-speculation}

> +-moverride=@var{string}  -mverbose-cost-dump -mtrack-speculation 

> -matomic-ool}

>

>  @emph{Adapteva Epiphany Options}

>  @gccoptlist{-mhalf-reg-file  -mprefer-short-insn-regs @gol

> @@ -15109,6 +15109,18 @@ be used by the compiler when expanding calls to

>  @code{__builtin_speculation_safe_copy} to permit a more efficient code

>  sequence to be generated.

>

> +@item -matomic-ool

> +@itemx -mno-atomic-ool

> +Enable or disable calls to out-of-line helpers to implement atomic 

> operations.

> +These helpers will, at runtime, determine if ARMv8.1-Atomics instructions

> +should be used; if not, they will use the load/store-exclusive 

> instructions

> +that are present in the base ARMv8.0 ISA.

> +

> +This option is only applicable when compiling for the base ARMv8.0

> +instruction set.  If using a later revision, e.g. 

> @option{-march=armv8.1-a}

> +or @option{-march=armv8-a+lse}, the ARMv8.1-Atomics instructions will be

> +used directly.

> +

>  @item -march=@var{name}

>  @opindex march

>  Specify the name of the target architecture and, optionally, one or

> -- 

> 2.17.2

>
Richard Henderson Sept. 5, 2019, 12:17 p.m. UTC | #2
On 9/5/19 2:56 AM, Kyrill Tkachov wrote:
> On 11/1/18 9:46 PM, Richard Henderson wrote:

>> +  else if (TARGET_ATOMIC_OOL)

>> +    {

>> +      /* Oldval must satisfy compare afterward.  */

>> +      if (!aarch64_plus_operand (oldval, mode))

>> +       oldval = force_reg (mode, oldval);

>> +      rtx func = aarch64_atomic_ool_func (mode, mod_s, &aarch64_ool_cas_names);

>> +      rval = emit_library_call_value (func, NULL_RTX, LCT_NORMAL, r_mode,

>> +                                     oldval, mode, newval, mode,

>> +                                     XEXP (mem, 0), ptr_mode);

> 

> 

> As reported at https://gcc.gnu.org/ml/gcc-patches/2019-09/msg00118.html

> 

> I've encountered ICEs here with -mabi=ilp32 due to the mode of the address of MEM.

> 

>    rtx addr = XEXP (mem, 0);

>       if (Pmode != ptr_mode)

>     addr = convert_memory_address (ptr_mode, addr);

> 

> above the emit_library_call_value to force the address into ptr_mode and use

> addr as the argument fixed that for me.


The address should be promoted to Pmode for the call, as the little assembly
routine doesn't (and shouldn't) care for the extension itself.

I'll try aarch64-elf before re-posting.


r~
diff mbox series

Patch

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 1fe1a50d52a..1c1877cd200 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -630,4 +630,17 @@  poly_uint64 aarch64_regmode_natural_size (machine_mode);
 
 bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT);
 
+struct atomic_ool_names
+{
+    const char *str[5][4];
+};
+
+rtx aarch64_atomic_ool_func(machine_mode mode, rtx model_rtx,
+			    const atomic_ool_names *names);
+extern const atomic_ool_names aarch64_ool_swp_names;
+extern const atomic_ool_names aarch64_ool_ldadd_names;
+extern const atomic_ool_names aarch64_ool_ldset_names;
+extern const atomic_ool_names aarch64_ool_ldclr_names;
+extern const atomic_ool_names aarch64_ool_ldeor_names;
+
 #endif /* GCC_AARCH64_PROTOS_H */
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index b29f437aeaf..9ab8b95c344 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -14679,6 +14679,82 @@  aarch64_emit_unlikely_jump (rtx insn)
   add_reg_br_prob_note (jump, profile_probability::very_unlikely ());
 }
 
+/* We store the names of the various atomic helpers in a 5x4 array.
+   Return the libcall function given MODE, MODEL and NAMES.  */
+
+rtx
+aarch64_atomic_ool_func(machine_mode mode, rtx model_rtx,
+			const atomic_ool_names *names)
+{
+  memmodel model = memmodel_base (INTVAL (model_rtx));
+  int mode_idx, model_idx;
+
+  switch (mode)
+    {
+    case E_QImode:
+      mode_idx = 0;
+      break;
+    case E_HImode:
+      mode_idx = 1;
+      break;
+    case E_SImode:
+      mode_idx = 2;
+      break;
+    case E_DImode:
+      mode_idx = 3;
+      break;
+    case E_TImode:
+      mode_idx = 4;
+      break;
+    default:
+      gcc_unreachable ();
+    }
+
+  switch (model)
+    {
+    case MEMMODEL_RELAXED:
+      model_idx = 0;
+      break;
+    case MEMMODEL_CONSUME:
+    case MEMMODEL_ACQUIRE:
+      model_idx = 1;
+      break;
+    case MEMMODEL_RELEASE:
+      model_idx = 2;
+      break;
+    case MEMMODEL_ACQ_REL:
+    case MEMMODEL_SEQ_CST:
+      model_idx = 3;
+      break;
+    default:
+      gcc_unreachable ();
+    }
+
+  return init_one_libfunc_visibility (names->str[mode_idx][model_idx],
+				      VISIBILITY_HIDDEN);
+}
+
+#define DEF0(B, N) \
+  { "__aa64_" #B #N "_relax", \
+    "__aa64_" #B #N "_acq", \
+    "__aa64_" #B #N "_rel", \
+    "__aa64_" #B #N "_acq_rel" }
+
+#define DEF4(B)  DEF0(B, 1), DEF0(B, 2), DEF0(B, 4), DEF0(B, 8), \
+		 { NULL, NULL, NULL, NULL }
+#define DEF5(B)  DEF0(B, 1), DEF0(B, 2), DEF0(B, 4), DEF0(B, 8), DEF0(B, 16)
+
+static const atomic_ool_names aarch64_ool_cas_names = { { DEF5(cas) } };
+const atomic_ool_names aarch64_ool_swp_names = { { DEF4(swp) } };
+const atomic_ool_names aarch64_ool_ldadd_names = { { DEF4(ldadd) } };
+const atomic_ool_names aarch64_ool_ldset_names = { { DEF4(ldset) } };
+const atomic_ool_names aarch64_ool_ldclr_names = { { DEF4(ldclr) } };
+const atomic_ool_names aarch64_ool_ldeor_names = { { DEF4(ldeor) } };
+
+#undef DEF0
+#undef DEF4
+#undef DEF5
+
 /* Expand a compare and swap pattern.  */
 
 void
@@ -14725,6 +14801,17 @@  aarch64_expand_compare_and_swap (rtx operands[])
 						   newval, mod_s));
       cc_reg = aarch64_gen_compare_reg_maybe_ze (NE, rval, oldval, mode);
     }
+  else if (TARGET_ATOMIC_OOL)
+    {
+      /* Oldval must satisfy compare afterward.  */
+      if (!aarch64_plus_operand (oldval, mode))
+	oldval = force_reg (mode, oldval);
+      rtx func = aarch64_atomic_ool_func (mode, mod_s, &aarch64_ool_cas_names);
+      rval = emit_library_call_value (func, NULL_RTX, LCT_NORMAL, r_mode,
+				      oldval, mode, newval, mode,
+				      XEXP (mem, 0), ptr_mode);
+      cc_reg = aarch64_gen_compare_reg_maybe_ze (NE, rval, oldval, mode);
+    }
   else
     {
       /* The oldval predicate varies by mode.  Test it and force to reg.  */
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c b/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c
index 49ca5d0d09c..e92f205c3a8 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-comp-swap-release-acquire.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf -mno-atomic-ool" } */
 
 #include "atomic-comp-swap-release-acquire.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c
index 74f26348e42..6965431f7d9 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-acq_rel.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-acq_rel.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c
index 66c1b1efe20..07dbca49d56 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-acquire.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-acquire.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c
index c09d0434ecf..73bfbb7afc9 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-char.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-char.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c
index 5783ab84f5c..c7945b3a22d 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-consume.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-consume.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c
index 18b8f0b04e9..e46bb3de7c1 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-imm.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 int v = 0;
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c
index 8520f0839ba..9b55deb5225 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-int.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-int.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c
index d011f8c5ce2..2622f75331f 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-long.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 long v = 0;
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c
index ed96bfdb978..f118a37a352 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-relaxed.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-relaxed.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c
index fc4be17de89..579634b08e8 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-release.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-release.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c
index 613000fe490..016b0d6619f 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-seq_cst.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-seq_cst.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c b/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c
index e82c8118ece..978bd1d8377 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic-op-short.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "atomic-op-short.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c
index f2a21ddf2e1..77430ecdbce 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_reg_1.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-O2 -march=armv8-a+nolse" } */
+/* { dg-options "-O2 -march=armv8-a+nolse -mno-atomic-ool" } */
 /* { dg-skip-if "" { *-*-* } { "-mcpu=*" } { "" } } */
 
 int
diff --git a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c
index 8d2ae67dfbe..7d58b2f6bd0 100644
--- a/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/atomic_cmp_exchange_zero_strong_1.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-O2 -march=armv8-a+nolse" } */
+/* { dg-options "-O2 -march=armv8-a+nolse -mno-atomic-ool" } */
 /* { dg-skip-if "" { *-*-* } { "-mcpu=*" } { "" } } */
 
 int
diff --git a/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c b/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c
index e571b2f13b3..7fc5885d0fd 100644
--- a/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c
+++ b/gcc/testsuite/gcc.target/aarch64/sync-comp-swap.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -fno-ipa-icf -mno-atomic-ool" } */
 
 #include "sync-comp-swap.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c b/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c
index 357bf1be3b2..6ad0daa8998 100644
--- a/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c
+++ b/gcc/testsuite/gcc.target/aarch64/sync-op-acquire.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "sync-op-acquire.x"
 
diff --git a/gcc/testsuite/gcc.target/aarch64/sync-op-full.c b/gcc/testsuite/gcc.target/aarch64/sync-op-full.c
index c6ba1629965..9a7afeb70d3 100644
--- a/gcc/testsuite/gcc.target/aarch64/sync-op-full.c
+++ b/gcc/testsuite/gcc.target/aarch64/sync-op-full.c
@@ -1,5 +1,5 @@ 
 /* { dg-do compile } */
-/* { dg-options "-march=armv8-a+nolse -O2" } */
+/* { dg-options "-march=armv8-a+nolse -O2 -mno-atomic-ool" } */
 
 #include "sync-op-full.x"
 
diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
index b2e80cbf6f1..83166834165 100644
--- a/gcc/config/aarch64/aarch64.opt
+++ b/gcc/config/aarch64/aarch64.opt
@@ -218,3 +218,7 @@  Enables verbose cost model dumping in the debug dump files.
 mtrack-speculation
 Target Var(aarch64_track_speculation)
 Generate code to track when the CPU might be speculating incorrectly.
+
+matomic-ool
+Target Report Mask(ATOMIC_OOL) Save
+Generate local calls to out-of-line atomic operations.
diff --git a/gcc/config/aarch64/atomics.md b/gcc/config/aarch64/atomics.md
index 08a3a1ff955..24c1fabee59 100644
--- a/gcc/config/aarch64/atomics.md
+++ b/gcc/config/aarch64/atomics.md
@@ -186,16 +186,27 @@ 
   (match_operand:SI 3 "const_int_operand" "")]
   ""
   {
-    rtx (*gen) (rtx, rtx, rtx, rtx);
-
     /* Use an atomic SWP when available.  */
     if (TARGET_LSE)
-      gen = gen_aarch64_atomic_exchange<mode>_lse;
+      {
+	emit_insn (gen_aarch64_atomic_exchange<mode>_lse
+		   (operands[0], operands[1], operands[2], operands[3]));
+      }
+    else if (TARGET_ATOMIC_OOL)
+      {
+	machine_mode mode = <MODE>mode;
+	rtx func = aarch64_atomic_ool_func (mode, operands[3],
+					    &aarch64_ool_swp_names);
+	rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL,
+					    mode, operands[2], mode,
+					    XEXP (operands[1], 0), ptr_mode);
+        emit_move_insn (operands[0], rval);
+      }
     else
-      gen = gen_aarch64_atomic_exchange<mode>;
-
-    emit_insn (gen (operands[0], operands[1], operands[2], operands[3]));
-
+      {
+	emit_insn (gen_aarch64_atomic_exchange<mode>
+		   (operands[0], operands[1], operands[2], operands[3]));
+      }
     DONE;
   }
 )
@@ -280,6 +291,39 @@ 
 	  }
 	operands[1] = force_reg (<MODE>mode, operands[1]);
       }
+    else if (TARGET_ATOMIC_OOL)
+      {
+        const atomic_ool_names *names;
+	switch (<CODE>)
+	  {
+	  case MINUS:
+	    operands[1] = expand_simple_unop (<MODE>mode, NEG, operands[1],
+					      NULL, 1);
+	    /* fallthru */
+	  case PLUS:
+	    names = &aarch64_ool_ldadd_names;
+	    break;
+	  case IOR:
+	    names = &aarch64_ool_ldset_names;
+	    break;
+	  case XOR:
+	    names = &aarch64_ool_ldeor_names;
+	    break;
+	  case AND:
+	    operands[1] = expand_simple_unop (<MODE>mode, NOT, operands[1],
+					      NULL, 1);
+	    names = &aarch64_ool_ldclr_names;
+	    break;
+	  default:
+	    gcc_unreachable ();
+	  }
+        machine_mode mode = <MODE>mode;
+	rtx func = aarch64_atomic_ool_func (mode, operands[2], names);
+	emit_library_call_value (func, NULL_RTX, LCT_NORMAL, mode,
+				 operands[1], mode,
+				 XEXP (operands[0], 0), ptr_mode);
+        DONE;
+      }
     else
       gen = gen_aarch64_atomic_<atomic_optab><mode>;
 
@@ -405,6 +449,40 @@ 
 	}
       operands[2] = force_reg (<MODE>mode, operands[2]);
     }
+  else if (TARGET_ATOMIC_OOL)
+    {
+      const atomic_ool_names *names;
+      switch (<CODE>)
+	{
+	case MINUS:
+	  operands[2] = expand_simple_unop (<MODE>mode, NEG, operands[2],
+					    NULL, 1);
+	  /* fallthru */
+	case PLUS:
+	  names = &aarch64_ool_ldadd_names;
+	  break;
+	case IOR:
+	  names = &aarch64_ool_ldset_names;
+	  break;
+	case XOR:
+	  names = &aarch64_ool_ldeor_names;
+	  break;
+	case AND:
+	  operands[2] = expand_simple_unop (<MODE>mode, NOT, operands[2],
+					    NULL, 1);
+	  names = &aarch64_ool_ldclr_names;
+	  break;
+	default:
+	  gcc_unreachable ();
+	}
+      machine_mode mode = <MODE>mode;
+      rtx func = aarch64_atomic_ool_func (mode, operands[3], names);
+      rtx rval = emit_library_call_value (func, operands[0], LCT_NORMAL, mode,
+					  operands[2], mode,
+					  XEXP (operands[1], 0), ptr_mode);
+      emit_move_insn (operands[0], rval);
+      DONE;
+    }
   else
     gen = gen_aarch64_atomic_fetch_<atomic_optab><mode>;
 
@@ -494,7 +572,7 @@ 
 {
   /* Use an atomic load-operate instruction when possible.  In this case
      we will re-compute the result from the original mem value. */
-  if (TARGET_LSE)
+  if (TARGET_LSE || TARGET_ATOMIC_OOL)
     {
       rtx tmp = gen_reg_rtx (<MODE>mode);
       operands[2] = force_reg (<MODE>mode, operands[2]);
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 284594df010..70bd0d0a0a1 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -623,7 +623,7 @@  Objective-C and Objective-C++ Dialects}.
 -mpc-relative-literal-loads @gol
 -msign-return-address=@var{scope} @gol
 -march=@var{name}  -mcpu=@var{name}  -mtune=@var{name}  @gol
--moverride=@var{string}  -mverbose-cost-dump -mtrack-speculation} 
+-moverride=@var{string}  -mverbose-cost-dump -mtrack-speculation -matomic-ool} 
 
 @emph{Adapteva Epiphany Options}
 @gccoptlist{-mhalf-reg-file  -mprefer-short-insn-regs @gol
@@ -15109,6 +15109,18 @@  be used by the compiler when expanding calls to
 @code{__builtin_speculation_safe_copy} to permit a more efficient code
 sequence to be generated.
 
+@item -matomic-ool
+@itemx -mno-atomic-ool
+Enable or disable calls to out-of-line helpers to implement atomic operations.
+These helpers will, at runtime, determine if ARMv8.1-Atomics instructions
+should be used; if not, they will use the load/store-exclusive instructions
+that are present in the base ARMv8.0 ISA.
+
+This option is only applicable when compiling for the base ARMv8.0
+instruction set.  If using a later revision, e.g. @option{-march=armv8.1-a}
+or @option{-march=armv8-a+lse}, the ARMv8.1-Atomics instructions will be
+used directly.
+
 @item -march=@var{name}
 @opindex march
 Specify the name of the target architecture and, optionally, one or