diff mbox

RFR: JDK8: Merge up to jdk8u20-b16 (part1)

Message ID 1401978899.23149.29.camel@localhost.localdomain
State New
Headers show

Commit Message

Edward Nevill June 5, 2014, 2:34 p.m. UTC
Hi,

The following patches merge the jdk8 tree up to jdk8u20-b16 which is the tip of the jdk8u tree.

Because of the size of the patches (> 11Mb) I have included only the aarch64 specific patches inline below.

http://people.linaro.org/~edward.nevill/jdk8u20-b16/jdk8.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/corba.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/hotspot.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/jaxp.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/jaxws.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/jdk.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/langtools.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/nashorn.patch
http://people.linaro.org/~edward.nevill/jdk8u20-b16/hotspot_aarch64.patch

The following are the results from JTreg langtools and hotspot

AArch64:

client/hotspot.....Passed:553....Failed:19
client/langtools...Passed:2979...Error:29
server/hotspot.....Passed:553....Failed:19
server/langtools...Passed:3000...Error:8

x86:

client/hotspot.....Passed:547....Failed:17
client/langtools...Passed:2982...Error:26
server/hotspot.....Passed:558....Failed:6
server/langtools...Passed:2983...Error:25

I have done smoke tests on the builtin sim for client/slowdebug, server/release and server/fastdebug variants.

I appreciate that there is an increased no. of failures for hotspot over x86, however I would like to push this merge to get it off my desk and go back and look at the failures.

OK to push?
Ed.

--- CUT HERE ---
exporting patch:
# HG changeset patch
# User Edward Nevill edward.nevill@linaro.org
# Date 1401972493 -3600
#      Thu Jun 05 13:48:13 2014 +0100
# Node ID 8cb098504801769e6c53eec016a1767b0aa59c79
# Parent  6298eeefbb7babc0772332df5482d9eeaaf353eb
Aarch64 specific changes for merge to jdk8u20-b16
diff mbox

Patch

diff -r 6298eeefbb7b -r 8cb098504801 agent/src/os/linux/LinuxDebuggerLocal.c
--- a/agent/src/os/linux/LinuxDebuggerLocal.c	Thu Jun 05 13:07:27 2014 +0100
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c	Thu Jun 05 13:48:13 2014 +0100
@@ -365,7 +365,6 @@ 
 
 #undef REG_INDEX
 
-// ECN: FIXME - add case for aarch64
 #ifdef i386
 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_x86_X86ThreadContext_##reg
 
@@ -418,13 +417,6 @@ 
 
 #endif /* amd64 */
 
-#if defined(aarch64)
-  regs = (*env)->GetLongArrayElements(env, array, &isCopy);
-  for (i = 0; i < NPRGREG; i++ ) {
-    regs[i] = 0xDEADDEAD;
-  }
-#endif /* aarch64 */
-
 #if defined(sparc) || defined(sparcv9)
 
 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_##reg
@@ -458,6 +450,12 @@ 
   regs[REG_INDEX(R_O7)]  = gregs.u_regs[14];
 #endif /* sparc */
 
+#if defined(aarch64)
+
+#define REG_INDEX(reg) sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_##reg
+
+#endif /* aarch64 */
+
 
   (*env)->ReleaseLongArrayElements(env, array, regs, JNI_COMMIT);
   return array;
diff -r 6298eeefbb7b -r 8cb098504801 make/linux/makefiles/buildtree.make
--- a/make/linux/makefiles/buildtree.make	Thu Jun 05 13:07:27 2014 +0100
+++ b/make/linux/makefiles/buildtree.make	Thu Jun 05 13:48:13 2014 +0100
@@ -194,6 +194,7 @@ 
 DATA_MODE/sparcv9 = 64
 DATA_MODE/amd64 = 64
 DATA_MODE/ppc64 = 64
+DATA_MODE/aarch64 = 64
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
diff -r 6298eeefbb7b -r 8cb098504801 make/linux/makefiles/compiler1.make
--- a/make/linux/makefiles/compiler1.make	Thu Jun 05 13:07:27 2014 +0100
+++ b/make/linux/makefiles/compiler1.make	Thu Jun 05 13:48:13 2014 +0100
@@ -28,7 +28,4 @@ 
 
 VM_SUBDIR = client
 
-# for now don't make compiler1 if building aarch64
-#ifneq ($(SRCARCH), aarch64)
 CFLAGS += -DCOMPILER1
-#endif
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/aarch64.ad
--- a/src/cpu/aarch64/vm/aarch64.ad	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/aarch64.ad	Thu Jun 05 13:48:13 2014 +0100
@@ -717,10 +717,98 @@ 
 
 source_hpp %{
 
+class CallStubImpl {
+ 
+  //--------------------------------------------------------------
+  //---<  Used for optimization in Compile::shorten_branches  >---
+  //--------------------------------------------------------------
+
+ public:
+  // Size of call trampoline stub.
+  static uint size_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+  
+  // number of relocations needed by a call trampoline stub
+  static uint reloc_call_trampoline() { 
+    return 0; // no call trampolines on this platform
+  }
+};
+
+class HandlerImpl {
+
+ public:
+
+  static int emit_exception_handler(CodeBuffer &cbuf);
+  static int emit_deopt_handler(CodeBuffer& cbuf);
+
+  static uint size_exception_handler() {
+    // count up to 4 movz/n/k instructions and one branch instruction
+    return 5 * NativeInstruction::instruction_size;
+  }
+
+  static uint size_deopt_handler() {
+    // count one adr and one branch instruction
+    return 2 * NativeInstruction::instruction_size;
+  }
+};
+
+  bool followed_by_ordered_store(const Node *barrier);
+  bool preceded_by_ordered_load(const Node *barrier);
+
 %}
 
 source %{
 
+  // AArch64 has load acquire and store release instructions which we
+  // use for ordered memory accesses, e.g. for volatiles.  The ideal
+  // graph generator also inserts memory barriers around volatile
+  // accesses, and we don't want to generate both barriers and acq/rel
+  // instructions.  So, when we emit a MemBarAcquire we look back in
+  // the ideal graph for an ordered load and only emit the barrier if
+  // we don't find one.
+
+bool preceded_by_ordered_load(const Node *barrier) {
+  Node *x = barrier->lookup(TypeFunc::Parms);
+
+  if (! x)
+    return false;
+
+  if (x->is_DecodeNarrowPtr())
+    x = x->in(1);
+
+  if (x->is_Load())
+    return ! x->as_Load()->is_unordered();
+
+  return false;
+}
+
+bool followed_by_ordered_store(const Node *barrier) {
+
+  // Find following mem node.
+  //
+  Node *mem_proj = NULL;
+  for (DUIterator_Fast imax, i = barrier->fast_outs(imax); i < imax; i++) {
+    mem_proj = barrier->fast_out(i);      // Throw out-of-bounds if proj not found
+    assert(mem_proj->is_Proj(), "only projections here");
+    ProjNode *proj = mem_proj->as_Proj();
+    if (proj->_con == TypeFunc::Memory &&
+        !Compile::current()->node_arena()->contains(mem_proj)) // Unmatched old-space only
+      break;
+  }
+  assert(mem_proj->as_Proj()->_con == TypeFunc::Memory, "Graph broken");
+
+  // Search behind Proj.
+  for (DUIterator_Fast jmax, j = mem_proj->fast_outs(jmax); j < jmax; j++) {
+    Node *x = mem_proj->fast_out(j);
+    if (x->is_Store() && ! x->as_Store()->is_unordered()) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
 #define __ _masm.
 
 // advance declaratuons for helper functions to convert register
@@ -842,6 +930,11 @@ 
   return 0;  // absolute addressing, no offset
 }
 
+bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+  ShouldNotReachHere();
+}
+
 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
   // Empty encoding
 }
@@ -984,8 +1077,7 @@ 
   }
 
   if (do_polling() && C->is_method_compilation()) {
-    address polling_page(os::get_polling_page());
-    __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
+    __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
   }
 }
 
@@ -1412,14 +1504,8 @@ 
 
 //=============================================================================
 
-uint size_exception_handler()
-{
-  // count up to 4 movz/n/k instructions and one branch instruction
-  return 5 * NativeInstruction::instruction_size;
-}
-
 // Emit exception handler code.
-int emit_exception_handler(CodeBuffer& cbuf)
+int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
 {
   // mov rscratch1 #exception_blob_entry_point
   // br rscratch1
@@ -1436,14 +1522,8 @@ 
   return offset;
 }
 
-uint size_deopt_handler()
-{
-  // count one adr and one branch instruction
-  return 2 * NativeInstruction::instruction_size;
-}
-
 // Emit deopt handler code.
-int emit_deopt_handler(CodeBuffer& cbuf)
+int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
 {
   // Note that the code buffer's insts_mark is always relative to insts.
   // That's why we must use the macroassembler to generate a handler.
@@ -1530,6 +1610,11 @@ 
   return Op_RegL;
 }
 
+// AES support not yet implemented
+const bool Matcher::pass_original_key_for_aes() {
+  return false;
+}
+
 // x86 supports misaligned vectors store/load.
 const bool Matcher::misaligned_vectors_ok() {
   // TODO fixme
@@ -1554,6 +1639,9 @@ 
   return 0;
 }
 
+// Does the CPU require late expand (see block.cpp for description of late expand)?
+const bool Matcher::require_postalloc_expand = false;
+
 // Should the Matcher clone shifts on addressing modes, expecting them
 // to be subsumed into complex addressing expressions or compute them
 // into registers?  True for Intel but false for most RISCs
@@ -1677,19 +1765,6 @@ 
   return RegMask();
 }
 
-const RegMask Matcher::mathExactI_result_proj_mask() {
-  return R0_REG_mask();
-}
-
-const RegMask Matcher::mathExactL_result_proj_mask() {
-  ShouldNotReachHere();
-  return R0_REG_mask();
-}
-
-const RegMask Matcher::mathExactI_flags_proj_mask() {
-  return INT_FLAGS_mask();
-}
-
 // helper for encoding java_to_runtime calls on sim
 //
 // this is needed to compute the extra arguments required when
@@ -1735,19 +1810,10 @@ 
 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)	\
   MacroAssembler _masm(&cbuf);						\
   {									\
-    Register base = as_Register(BASE);					\
-    if (INDEX == -1) {							\
-	__ lea(SCRATCH, Address(base, DISP));				\
-    } else {								\
-	Register index_reg = as_Register(INDEX);			\
-	if (DISP == 0) {						\
-	  __ lea(SCRATCH, Address(base, index_reg, Address::lsl(SCALE))); \
-	} else {							\
-	  __ lea(SCRATCH, Address(base, DISP));				\
-	  __ lea(SCRATCH, Address(SCRATCH, index_reg, Address::lsl(SCALE))); \
-	}								\
-    }									\
-    __ INSN(REG, SCRATCH);						\
+    guarantee(INDEX == -1, "mode not permitted for volatile");		\
+    guarantee(DISP == 0, "mode not permitted for volatile");		\
+    guarantee(SCALE == 0, "mode not permitted for volatile");		\
+    __ INSN(REG, as_Register(BASE));					\
   }
 
 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
@@ -2301,13 +2367,12 @@ 
     }
     Label retry_load, done;
     __ bind(retry_load);
-    __ ldaxr(rscratch1, addr_reg);
+    __ ldxr(rscratch1, addr_reg);
     __ cmp(rscratch1, old_reg);
     __ br(Assembler::NE, done);
     __ stlxr(rscratch1, new_reg, addr_reg);
     __ cbnzw(rscratch1, retry_load);
     __ bind(done);
-    __ membar(__ AnyAny);
   %}
 
   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
@@ -2341,13 +2406,12 @@ 
     }
     Label retry_load, done;
     __ bind(retry_load);
-    __ ldaxrw(rscratch1, addr_reg);
+    __ ldxrw(rscratch1, addr_reg);
     __ cmpw(rscratch1, old_reg);
     __ br(Assembler::NE, done);
     __ stlxrw(rscratch1, new_reg, addr_reg);
     __ cbnzw(rscratch1, retry_load);
     __ bind(done);
-    __ membar(__ AnyAny);
   %}
 
   // auxiliary used for CompareAndSwapX to set result register
@@ -2512,7 +2576,13 @@ 
         __ mov_metadata(dst_reg, (Metadata*)con);
       } else {
         assert(rtype == relocInfo::none, "unexpected reloc type");
-        __ lea(dst_reg, Address(con, rtype));
+	if (con < (address)(uintptr_t)os::vm_page_size()) {
+	  __ mov(dst_reg, con);
+	} else {
+	  unsigned long offset;
+	  __ adrp(dst_reg, con, offset);
+	  __ add(dst_reg, dst_reg, offset);
+	}
       }
     }
   %}
@@ -2826,9 +2896,6 @@ 
       __ call_Unimplemented();
     }
   %}
-  // TODO
-  // this only works ifdef BUILTIN_SIM
-  // provide version for native AArch64 build
 
   enc_class aarch64_enc_java_to_runtime(method meth) %{
     MacroAssembler _masm(&cbuf);
@@ -3240,7 +3307,7 @@ 
   c_calling_convention
   %{
     // This is obviously always outgoing
-    (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
+    (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
   %}
 
   // Location of compiled Java return values.  Same as C for now.
@@ -4612,6 +4679,7 @@ 
 opclass memory(indirect, indIndexScaledOffsetI,  indIndexScaledOffsetL, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
 	       indirectN, indIndexScaledOffsetIN,  indIndexScaledOffsetLN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
 
+
 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 // operations. it allows the src to be either an iRegI or a (ConvL2I
 // iRegL). in the latter case the l2i normally planted for a ConvL2I
@@ -4749,7 +4817,7 @@ 
 instruct loadB(iRegINoSp dst, memory mem)
 %{
   match(Set dst (LoadB mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrsbw  $dst, $mem\t# byte" %}
@@ -4763,7 +4831,7 @@ 
 instruct loadB2L(iRegLNoSp dst, memory mem)
 %{
   match(Set dst (ConvI2L (LoadB mem)));
-  predicate(!treat_as_volatile(((MemNode*)(n->in(1)))));
+  predicate(n->in(1)->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrsb  $dst, $mem\t# byte" %}
@@ -4777,7 +4845,7 @@ 
 instruct loadUB(iRegINoSp dst, memory mem)
 %{
   match(Set dst (LoadUB mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrbw  $dst, $mem\t# byte" %}
@@ -4791,7 +4859,7 @@ 
 instruct loadUB2L(iRegLNoSp dst, memory mem)
 %{
   match(Set dst (ConvI2L (LoadUB mem)));
-  predicate(!treat_as_volatile(((MemNode*)(n->in(1)))));
+  predicate(n->in(1)->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrb  $dst, $mem\t# byte" %}
@@ -4805,7 +4873,7 @@ 
 instruct loadS(iRegINoSp dst, memory mem)
 %{
   match(Set dst (LoadS mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrshw  $dst, $mem\t# short" %}
@@ -4819,7 +4887,7 @@ 
 instruct loadS2L(iRegLNoSp dst, memory mem)
 %{
   match(Set dst (ConvI2L (LoadS mem)));
-  predicate(!treat_as_volatile(((MemNode*)(n->in(1)))));
+  predicate(n->in(1)->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrsh  $dst, $mem\t# short" %}
@@ -4833,7 +4901,7 @@ 
 instruct loadUS(iRegINoSp dst, memory mem)
 %{
   match(Set dst (LoadUS mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrh  $dst, $mem\t# short" %}
@@ -4847,7 +4915,7 @@ 
 instruct loadUS2L(iRegLNoSp dst, memory mem)
 %{
   match(Set dst (ConvI2L (LoadUS mem)));
-  predicate(!treat_as_volatile(((MemNode*)(n->in(1)))));
+  predicate(n->in(1)->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrh  $dst, $mem\t# short" %}
@@ -4861,7 +4929,7 @@ 
 instruct loadI(iRegINoSp dst, memory mem)
 %{
   match(Set dst (LoadI mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrw  $dst, $mem\t# int" %}
@@ -4875,7 +4943,7 @@ 
 instruct loadI2L(iRegLNoSp dst, memory mem)
 %{
   match(Set dst (ConvI2L (LoadI mem)));
-  predicate(!treat_as_volatile(((MemNode*)(n->in(1)))));
+  predicate(n->in(1)->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrsw  $dst, $mem\t# int" %}
@@ -4889,7 +4957,7 @@ 
 instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
 %{
   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
-  predicate(!treat_as_volatile(((MemNode*)(n->in(1))->in(1))));
+  predicate(n->in(1)->in(1)->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrw  $dst, $mem\t# int" %}
@@ -4903,7 +4971,7 @@ 
 instruct loadL(iRegLNoSp dst, memory mem)
 %{
   match(Set dst (LoadL mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldr  $dst, $mem\t# int" %}
@@ -4930,7 +4998,7 @@ 
 instruct loadP(iRegPNoSp dst, memory mem)
 %{
   match(Set dst (LoadP mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldr  $dst, $mem\t# ptr" %}
@@ -4944,7 +5012,7 @@ 
 instruct loadN(iRegNNoSp dst, memory mem)
 %{
   match(Set dst (LoadN mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
@@ -4958,7 +5026,7 @@ 
 instruct loadKlass(iRegPNoSp dst, memory mem)
 %{
   match(Set dst (LoadKlass mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldr  $dst, $mem\t# class" %}
@@ -4972,7 +5040,7 @@ 
 instruct loadNKlass(iRegNNoSp dst, memory mem)
 %{
   match(Set dst (LoadNKlass mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
@@ -4986,7 +5054,7 @@ 
 instruct loadF(vRegF dst, memory mem)
 %{
   match(Set dst (LoadF mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrs  $dst, $mem\t# float" %}
@@ -5000,7 +5068,7 @@ 
 instruct loadD(vRegD dst, memory mem)
 %{
   match(Set dst (LoadD mem));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Load()->is_unordered());
 
   ins_cost(4 * INSN_COST);
   format %{ "ldrd  $dst, $mem\t# double" %}
@@ -5231,7 +5299,7 @@ 
 instruct storeB(iRegI src, memory mem)
 %{
   match(Set mem (StoreB mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strb  $src, $mem\t# byte" %}
@@ -5245,7 +5313,7 @@ 
 instruct storeimmB0(immI0 zero, memory mem)
 %{
   match(Set mem (StoreB mem zero));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strb zr, $mem\t# byte" %}
@@ -5259,7 +5327,7 @@ 
 instruct storeC(iRegI src, memory mem)
 %{
   match(Set mem (StoreC mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strh  $src, $mem\t# short" %}
@@ -5272,7 +5340,7 @@ 
 instruct storeimmC0(immI0 zero, memory mem)
 %{
   match(Set mem (StoreC mem zero));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strh  zr, $mem\t# short" %}
@@ -5287,7 +5355,7 @@ 
 instruct storeI(iRegIorL2I src, memory mem)
 %{
   match(Set mem(StoreI mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strw  $src, $mem\t# int" %}
@@ -5300,7 +5368,7 @@ 
 instruct storeimmI0(immI0 zero, memory mem)
 %{
   match(Set mem(StoreI mem zero));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strw  zr, $mem\t# int" %}
@@ -5314,7 +5382,7 @@ 
 instruct storeL(iRegL src, memory mem)
 %{
   match(Set mem (StoreL mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "str  $src, $mem\t# int" %}
@@ -5328,7 +5396,7 @@ 
 instruct storeimmL0(immL0 zero, memory mem)
 %{
   match(Set mem (StoreL mem zero));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "str  zr, $mem\t# int" %}
@@ -5342,7 +5410,7 @@ 
 instruct storeP(iRegP src, memory mem)
 %{
   match(Set mem (StoreP mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "str  $src, $mem\t# ptr" %}
@@ -5356,7 +5424,7 @@ 
 instruct storeimmP0(immP0 zero, memory mem)
 %{
   match(Set mem (StoreP mem zero));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "str zr, $mem\t# ptr" %}
@@ -5415,7 +5483,7 @@ 
 instruct storeN(iRegN src, memory mem)
 %{
   match(Set mem (StoreN mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strw  $src, $mem\t# compressed ptr" %}
@@ -5430,7 +5498,7 @@ 
   match(Set mem (StoreN mem zero));
   predicate(Universe::narrow_oop_base() == NULL &&
             Universe::narrow_klass_base() == NULL &&
-            !((MemNode*)n)->is_volatile());
+            n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
@@ -5444,7 +5512,7 @@ 
 instruct storeF(vRegF src, memory mem)
 %{
   match(Set mem (StoreF mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strs  $src, $mem\t# float" %}
@@ -5461,7 +5529,7 @@ 
 instruct storeD(vRegD src, memory mem)
 %{
   match(Set mem (StoreD mem src));
-  predicate(!treat_as_volatile(((MemNode*)n)));
+  predicate(n->as_Store()->is_unordered());
 
   ins_cost(INSN_COST);
   format %{ "strd  $src, $mem\t# double" %}
@@ -5474,6 +5542,7 @@ 
 // Store Compressed Klass Pointer
 instruct storeNKlass(iRegN src, memory mem)
 %{
+  predicate(n->as_Store()->is_unordered());
   match(Set mem (StoreNKlass mem src));
 
   ins_cost(INSN_COST);
@@ -5526,10 +5595,9 @@ 
 //  ---------------- volatile loads and stores ----------------
 
 // Load Byte (8 bit signed)
-instruct loadB_volatile(iRegINoSp dst, memory mem)
+instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadB mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarsb  $dst, $mem\t# byte" %}
@@ -5540,10 +5608,9 @@ 
 %}
 
 // Load Byte (8 bit signed) into long
-instruct loadB2L_volatile(iRegLNoSp dst, memory mem)
+instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (ConvI2L (LoadB mem)));
-  predicate(treat_as_volatile(((MemNode*)(n->in(1)))));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarsb  $dst, $mem\t# byte" %}
@@ -5554,10 +5621,9 @@ 
 %}
 
 // Load Byte (8 bit unsigned)
-instruct loadUB_volatile(iRegINoSp dst, memory mem)
+instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadUB mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarb  $dst, $mem\t# byte" %}
@@ -5568,10 +5634,9 @@ 
 %}
 
 // Load Byte (8 bit unsigned) into long
-instruct loadUB2L_volatile(iRegLNoSp dst, memory mem)
+instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (ConvI2L (LoadUB mem)));
-  predicate(treat_as_volatile(((MemNode*)(n->in(1)))));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarb  $dst, $mem\t# byte" %}
@@ -5582,10 +5647,9 @@ 
 %}
 
 // Load Short (16 bit signed)
-instruct loadS_volatile(iRegINoSp dst, memory mem)
+instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadS mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarshw  $dst, $mem\t# short" %}
@@ -5595,10 +5659,9 @@ 
   ins_pipe(pipe_class_memory);
 %}
 
-instruct loadUS_volatile(iRegINoSp dst, memory mem)
+instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadUS mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarhw  $dst, $mem\t# short" %}
@@ -5609,10 +5672,9 @@ 
 %}
 
 // Load Short/Char (16 bit unsigned) into long
-instruct loadUS2L_volatile(iRegLNoSp dst, memory mem)
+instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (ConvI2L (LoadUS mem)));
-  predicate(treat_as_volatile(((MemNode*)(n->in(1)))));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarh  $dst, $mem\t# short" %}
@@ -5623,10 +5685,9 @@ 
 %}
 
 // Load Short/Char (16 bit signed) into long
-instruct loadS2L_volatile(iRegLNoSp dst, memory mem)
+instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (ConvI2L (LoadS mem)));
-  predicate(treat_as_volatile(((MemNode*)(n->in(1)))));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarh  $dst, $mem\t# short" %}
@@ -5637,10 +5698,9 @@ 
 %}
 
 // Load Integer (32 bit signed)
-instruct loadI_volatile(iRegINoSp dst, memory mem)
+instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadI mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarw  $dst, $mem\t# int" %}
@@ -5651,10 +5711,9 @@ 
 %}
 
 // Load Integer (32 bit unsigned) into long
-instruct loadUI2L_volatile(iRegLNoSp dst, memory mem, immL_32bits mask)
+instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 %{
   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
-  predicate(treat_as_volatile(((MemNode*)(n->in(1))->in(1))));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarw  $dst, $mem\t# int" %}
@@ -5665,10 +5724,9 @@ 
 %}
 
 // Load Long (64 bit signed)
-instruct loadL_volatile(iRegLNoSp dst, memory mem)
+instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadL mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldar  $dst, $mem\t# int" %}
@@ -5679,10 +5737,9 @@ 
 %}
 
 // Load Pointer
-instruct loadP_volatile(iRegPNoSp dst, memory mem)
+instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadP mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldar  $dst, $mem\t# ptr" %}
@@ -5693,10 +5750,9 @@ 
 %}
 
 // Load Compressed Pointer
-instruct loadN_volatile(iRegNNoSp dst, memory mem)
+instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadN mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
@@ -5707,10 +5763,9 @@ 
 %}
 
 // Load Float
-instruct loadF_volatile(vRegF dst, memory mem)
+instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadF mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldars  $dst, $mem\t# float" %}
@@ -5721,10 +5776,9 @@ 
 %}
 
 // Load Double
-instruct loadD_volatile(vRegD dst, memory mem)
+instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 %{
   match(Set dst (LoadD mem));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "ldard  $dst, $mem\t# double" %}
@@ -5735,10 +5789,9 @@ 
 %}
 
 // Store Byte
-instruct storeB_volatile(iRegI src, memory mem)
+instruct storeB_volatile(iRegI src, /* sync_memory*/indirect mem)
 %{
   match(Set mem (StoreB mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlrb  $src, $mem\t# byte" %}
@@ -5749,10 +5802,9 @@ 
 %}
 
 // Store Char/Short
-instruct storeC_volatile(iRegI src, memory mem)
+instruct storeC_volatile(iRegI src, /* sync_memory*/indirect mem)
 %{
   match(Set mem (StoreC mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlrh  $src, $mem\t# short" %}
@@ -5764,10 +5816,9 @@ 
 
 // Store Integer
 
-instruct storeI_volatile(iRegIorL2I src, memory mem)
+instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 %{
   match(Set mem(StoreI mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlrw  $src, $mem\t# int" %}
@@ -5778,10 +5829,9 @@ 
 %}
 
 // Store Long (64 bit signed)
-instruct storeL_volatile(iRegL src, memory mem)
+instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 %{
   match(Set mem (StoreL mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlr  $src, $mem\t# int" %}
@@ -5792,10 +5842,9 @@ 
 %}
 
 // Store Pointer
-instruct storeP_volatile(iRegP src, memory mem)
+instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 %{
   match(Set mem (StoreP mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlr  $src, $mem\t# ptr" %}
@@ -5806,10 +5855,9 @@ 
 %}
 
 // Store Compressed Pointer
-instruct storeN_volatile(iRegN src, memory mem)
+instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 %{
   match(Set mem (StoreN mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
@@ -5820,10 +5868,9 @@ 
 %}
 
 // Store Float
-instruct storeF_volatile(vRegF src, memory mem)
+instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 %{
   match(Set mem (StoreF mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlrs  $src, $mem\t# float" %}
@@ -5837,10 +5884,9 @@ 
 // implement storeImmF0 and storeFImmPacked
 
 // Store Double
-instruct storeD_volatile(vRegD src, memory mem)
+instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 %{
   match(Set mem (StoreD mem src));
-  predicate(treat_as_volatile(((MemNode*)n)));
 
   ins_cost(VOLATILE_REF_COST);
   format %{ "stlrd  $src, $mem\t# double" %}
@@ -5912,94 +5958,129 @@ 
 // ============================================================================
 // MemBar Instruction
 
-instruct membar_acquire()
-%{
+instruct load_fence() %{
+  match(LoadFence);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "load_fence" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadLoad|Assembler::LoadStore);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct unnecessary_membar_acquire() %{
+  predicate(preceded_by_ordered_load(n));
+  match(MemBarAcquire);
+  ins_cost(0);
+
+  format %{ "membar_acquire (elided)" %}
+
+  ins_encode %{
+    __ block_comment("membar_acquire (elided)");
+  %}
+
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct membar_acquire() %{
   match(MemBarAcquire);
   ins_cost(VOLATILE_REF_COST);
 
-  format %{ "MEMBAR-acquire\t# ???" %}
-
-  ins_encode %{
-    if (MacroAssembler::use_acq_rel_for_volatile_fields())
-      __ block_comment("membar_acquire (elided)");
-    else
-      __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad|Assembler::LoadStore));
-  %}
-
-  ins_pipe(pipe_class_memory);
-%}
-
-instruct membar_release()
-%{
+  format %{ "membar_acquire" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadLoad|Assembler::LoadStore);
+  %}
+
+  ins_pipe(pipe_class_memory);
+%}
+
+
+instruct membar_acquire_lock() %{
+  match(MemBarAcquireLock);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "membar_acquire_lock" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadLoad|Assembler::LoadStore);
+  %}
+
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct store_fence() %{
+  match(StoreFence);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "store_fence" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadStore|Assembler::StoreStore);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct unnecessary_membar_release() %{
+  match(MemBarRelease);
+  predicate(followed_by_ordered_store(n));
+  ins_cost(0);
+
+  format %{ "membar_release (elided)" %}
+
+  ins_encode %{
+    __ block_comment("membar_release (elided)");
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct membar_release() %{
   match(MemBarRelease);
   ins_cost(VOLATILE_REF_COST);
 
-  format %{ "MEMBAR-release" %}
-  ins_encode %{
-    if (MacroAssembler::use_acq_rel_for_volatile_fields())
-      __ block_comment("membar_release (elided)");
-    else
-      __ membar(Assembler::AnyAny);
-  %}
-  ins_pipe(pipe_class_memory);
-%}
-
-instruct membar_volatile() %{
-  match(MemBarVolatile);
-  ins_cost(VOLATILE_REF_COST);
-
-  format %{ "MEMBAR-volatile?" %}
-
-  ins_encode %{
-    __ membar(Assembler::AnyAny);
-  %}
-
-  ins_pipe(pipe_class_memory);
-%}
-
-instruct unnecessary_membar_volatile() %{
-  match(MemBarVolatile);
-  predicate(Matcher::post_store_load_barrier(n));
-  ins_cost(0);
-
-  size(0);
-  format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
-  ins_encode( );
-  ins_pipe(pipe_class_empty);
+  format %{ "membar_release" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadStore|Assembler::StoreStore);
+  %}
+  ins_pipe(pipe_class_memory);
 %}
 
 instruct membar_storestore() %{
   match(MemBarStoreStore);
   ins_cost(VOLATILE_REF_COST);
 
+  format %{ "MEMBAR-store-store" %}
+
   ins_encode %{
     __ membar(Assembler::StoreStore);
   %}
-
-  ins_pipe(pipe_class_memory);
-%}
-
-instruct membar_acquire_lock() %{
-  match(MemBarAcquireLock);
-
-  format %{ "MEMBAR-acquire-lock\t# ???" %}
-
-  ins_encode %{
-    __ block_comment("membar-acquire-lock");
-    __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad|Assembler::LoadStore));
-  %}
-
   ins_pipe(pipe_class_memory);
 %}
 
 instruct membar_release_lock() %{
   match(MemBarReleaseLock);
-
-  format %{ "MEMBAR-release-lock\t# ???" %}
-
-  ins_encode %{
-    __ block_comment("MEMBAR-release-lock");
-    __ membar(Assembler::AnyAny);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "membar_release_lock" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadStore|Assembler::StoreStore);
+  %}
+
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct membar_volatile() %{
+  match(MemBarVolatile);
+  ins_cost(VOLATILE_REF_COST*100);
+
+  format %{ "membar_volatile" %}
+
+  ins_encode %{
+    __ membar(Assembler::StoreLoad);
   %}
 
   ins_pipe(pipe_class_memory);
@@ -6219,7 +6300,7 @@ 
 // used when updating the eden heap top
 // implemented using ldaxr on AArch64
 
-instruct loadPLocked(iRegPNoSp dst, memory mem)
+instruct loadPLocked(iRegPNoSp dst, indirect mem)
 %{
   match(Set dst (LoadPLocked mem));
 
@@ -6258,7 +6339,7 @@ 
 %}
 
 // this has to be implemented as a CAS
-instruct storeLConditional(memory mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) 
+instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) 
 %{
   match(Set cr (StoreLConditional mem (Binary oldval newval)));
 
@@ -6275,7 +6356,7 @@ 
 %}
 
 // this has to be implemented as a CAS
-instruct storeIConditional(memory mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) 
+instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) 
 %{
   match(Set cr (StoreIConditional mem (Binary oldval newval)));
 
@@ -6294,7 +6375,7 @@ 
 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 // can't match them
 
-instruct compareAndSwapI(iRegINoSp res, memory mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 
   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 
@@ -6311,7 +6392,7 @@ 
   ins_pipe(pipe_class_memory);
 %}
 
-instruct compareAndSwapL(iRegINoSp res, memory mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 
   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 
@@ -6328,7 +6409,7 @@ 
   ins_pipe(pipe_class_memory);
 %}
 
-instruct compareAndSwapP(iRegINoSp res, memory mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 
   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 
@@ -6345,7 +6426,7 @@ 
   ins_pipe(pipe_class_memory);
 %}
 
-instruct compareAndSwapN(iRegINoSp res, memory mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
+instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 
   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 
@@ -7017,34 +7098,6 @@ 
 
 // Integer Addition
 
-instruct addExactI_reg(iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr)
-%{
-  match(AddExactI src1 src2);
-  effect(DEF cr);
-
-  format %{ "addsw    r0, $src1, $src2\t# addExact int" %}
-  ins_encode %{
-    __ addsw(r0,
-            as_Register($src1$$reg),
-            as_Register($src2$$reg));
-  %}
-  ins_pipe(pipe_class_default);
-%}
-
-instruct addExactI_reg_imm(iRegIorL2I src1, immI src2, rFlagsReg cr)
-%{
-  match(AddExactI src1 src2);
-  effect(DEF cr);
-
-  format %{ "addsw    r0, $src1, $src2\t# addExact int" %}
-  ins_encode %{
-    __ addsw(r0,
-            as_Register($src1$$reg),
-            $src2$$constant);
-  %}
-  ins_pipe(pipe_class_default);
-%}
-
 // TODO
 // these currently employ operations which do not set CR and hence are
 // not flagged as killing CR but we would like to isolate the cases
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/assembler_aarch64.hpp
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Jun 05 13:48:13 2014 +0100
@@ -1001,13 +1001,13 @@ 
 
   // A more convenient access to dmb for our purposes
   enum Membar_mask_bits {
-    StoreStore = ST,
-    LoadStore  = LD,
-    LoadLoad   = LD,
-    // We can use ISH for a full barrier because the ARM ARM says
-    // "This architecture assumes that all Processing Elements that
-    // use the same operating system or hypervisor are in the same
-    // Inner Shareable shareability domain."
+    // We can use ISH for a barrier because the ARM ARM says "This
+    // architecture assumes that all Processing Elements that use the
+    // same operating system or hypervisor are in the same Inner
+    // Shareable shareability domain."
+    StoreStore = ISHST,
+    LoadStore  = ISHLD,
+    LoadLoad   = ISHLD,
     StoreLoad  = ISH,
     AnyAny     = ISH
   };
@@ -1992,6 +1992,11 @@ 
   void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
 };
 
+inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a,
+					     Assembler::Membar_mask_bits b) {
+  return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b));
+}
+
 Instruction_aarch64::~Instruction_aarch64() {
   assem->emit();
 }
@@ -2003,8 +2008,6 @@ 
   return Assembler::Condition(int(cond) ^ 1);
 }
 
-// extra stuff needed to compile
-// not sure which of these methods are really necessary
 class BiasedLockingCounters;
 
 extern "C" void das(uint64_t start, int len);
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp
--- a/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/bytecodeInterpreter_aarch64.hpp	Thu Jun 05 13:48:13 2014 +0100
@@ -96,7 +96,7 @@ 
 #define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
 #define LOCALS_INT(offset)     ((jint)(locals[-(offset)]))
 #define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
-#define LOCALS_OBJECT(offset)  ((oop)locals[-(offset)])
+#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
 #define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
 #define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
 #define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -177,10 +177,6 @@ 
   return result;
 }
 
-static bool is_reg(LIR_Opr op) {
-  return op->is_double_cpu() | op->is_single_cpu();
-}
-
 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
   Register base = addr->base()->as_pointer_register();
   LIR_Opr opr = addr->index();
@@ -519,7 +515,7 @@ 
   __ pop(0x3, sp);                 // r0 & r1
   __ leave();
   __ br(rscratch1);
-  address polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
+  address polling_page(os::get_polling_page());
   assert(os::is_poll_address(polling_page), "should be");
   unsigned long off;
   __ adrp(rscratch1, Address(polling_page, rtype), off);
@@ -538,7 +534,7 @@ 
   // Pop the stack before the safepoint code
   __ remove_frame(initial_frame_size_in_bytes());
   if (UseCompilerSafepoints) {
-    address polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()));
+    address polling_page(os::get_polling_page());
     __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
   } else {
     poll_for_safepoint(relocInfo::poll_return_type);
@@ -547,8 +543,7 @@ 
 }
 
 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
-  address polling_page(os::get_polling_page()
-		       + (SafepointPollOffset % os::vm_page_size()));
+  address polling_page(os::get_polling_page());
   if (UseCompilerSafepoints) {
     guarantee(info != NULL, "Shouldn't be NULL");
     assert(os::is_poll_address(polling_page), "should be");
@@ -2745,148 +2740,12 @@ 
 }
 
 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
-  if (dest->is_address()) {
-      LIR_Address* to_addr = dest->as_address_ptr();
-      Register compressed_src = noreg;
-      if (is_reg(src)) {
-	  compressed_src = as_reg(src);
-	  if (type == T_ARRAY || type == T_OBJECT) {
-	    __ verify_oop(src->as_register());
-	    if (UseCompressedOops) {
-	      compressed_src = rscratch2;
-	      __ mov(compressed_src, src->as_register());
-	      __ encode_heap_oop(compressed_src);
-	    }
-	  }
-      } else if (src->is_single_fpu()) {
-	__ fmovs(rscratch2, src->as_float_reg());
-	src = FrameMap::rscratch2_opr,	type = T_INT;
-      } else if (src->is_double_fpu()) {
-	__ fmovd(rscratch2, src->as_double_reg());
-	src = FrameMap::rscratch2_long_opr, type = T_LONG;
-      }
-
-      if (dest->is_double_cpu())
-	__ lea(rscratch1, as_Address(to_addr));
-      else
-	__ lea(rscratch1, as_Address_lo(to_addr));
-
-      int null_check_here = code_offset();
-      switch (type) {
-      case T_ARRAY:   // fall through
-      case T_OBJECT:  // fall through
-	if (UseCompressedOops) {
-	  __ stlrw(compressed_src, rscratch1);
-	} else {
-	  __ stlr(compressed_src, rscratch1);
-	}
-	break;
-      case T_METADATA:
-	// We get here to store a method pointer to the stack to pass to
-	// a dtrace runtime call. This can't work on 64 bit with
-	// compressed klass ptrs: T_METADATA can be a compressed klass
-	// ptr or a 64 bit method pointer.
-	LP64_ONLY(ShouldNotReachHere());
-	__ stlr(src->as_register(), rscratch1);
-	break;
-      case T_ADDRESS:
-	__ stlr(src->as_register(), rscratch1);
-	break;
-      case T_INT:
-	__ stlrw(src->as_register(), rscratch1);
-	break;
-
-      case T_LONG: {
-	__ stlr(src->as_register_lo(), rscratch1);
-	break;
-      }
-
-      case T_BYTE:    // fall through
-      case T_BOOLEAN: {
-	__ stlrb(src->as_register(), rscratch1);
-	break;
-      }
-
-      case T_CHAR:    // fall through
-      case T_SHORT:
-	__ stlrh(src->as_register(), rscratch1);
-	break;
-
-      default:
-	ShouldNotReachHere();
-      }
-      if (info != NULL) {
-	add_debug_info_for_null_check(null_check_here, info);
-      }
-  } else if (src->is_address()) {
-    LIR_Address* from_addr = src->as_address_ptr();
-
-    if (src->is_double_cpu())
-      __ lea(rscratch1, as_Address(from_addr));
-    else
-      __ lea(rscratch1, as_Address_lo(from_addr));
-
-    int null_check_here = code_offset();
-    switch (type) {
-    case T_ARRAY:   // fall through
-    case T_OBJECT:  // fall through
-      if (UseCompressedOops) {
-	__ ldarw(dest->as_register(), rscratch1);
-      } else {
-	__ ldar(dest->as_register(), rscratch1);
-      }
-      break;
-    case T_ADDRESS:
-      __ ldar(dest->as_register(), rscratch1);
-      break;
-    case T_INT:
-      __ ldarw(dest->as_register(), rscratch1);
-      break;
-    case T_LONG: {
-      __ ldar(dest->as_register_lo(), rscratch1);
-      break;
-    }
-
-    case T_BYTE:    // fall through
-    case T_BOOLEAN: {
-      __ ldarb(dest->as_register(), rscratch1);
-      break;
-    }
-
-    case T_CHAR:    // fall through
-    case T_SHORT:
-      __ ldarh(dest->as_register(), rscratch1);
-      break;
-
-    case T_FLOAT:
-      __ ldarw(rscratch2, rscratch1);
-      __ fmovs(dest->as_float_reg(), rscratch2);
-      break;
-
-    case T_DOUBLE:
-      __ ldar(rscratch2, rscratch1);
-      __ fmovd(dest->as_double_reg(), rscratch2);
-      break;
-
-    default:
-      ShouldNotReachHere();
-    }
-    if (info != NULL) {
-      add_debug_info_for_null_check(null_check_here, info);
-    }
-
-    if (type == T_ARRAY || type == T_OBJECT) {
-      if (UseCompressedOops) {
-	__ decode_heap_oop(dest->as_register());
-      }
-      __ verify_oop(dest->as_register());
-    } else if (type == T_ADDRESS && from_addr->disp() == oopDesc::klass_offset_in_bytes()) {
-      if (UseCompressedClassPointers) {
-	__ decode_klass_not_null(dest->as_register());
-      }
-    }
-  } else
+  if (dest->is_address() || src->is_address()) {
+    move_op(src, dest, type, lir_patch_none, info,
+	    /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
+  } else {
     ShouldNotReachHere();
+  }
 }
 
 #ifdef ASSERT
@@ -2940,17 +2799,18 @@ 
 }
 
 void LIR_Assembler::membar_acquire() {
-  __ block_comment("membar_acquire");
+  __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 }
 
 void LIR_Assembler::membar_release() {
-  __ block_comment("membar_release");
+  __ membar(Assembler::LoadStore|Assembler::StoreStore);
 }
 
-void LIR_Assembler::membar_loadload() { Unimplemented(); }
+void LIR_Assembler::membar_loadload() {
+  __ membar(Assembler::LoadLoad);
+}
 
 void LIR_Assembler::membar_storestore() {
-  COMMENT("membar_storestore");
   __ membar(MacroAssembler::StoreStore);
 }
 
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
--- a/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -1082,6 +1082,21 @@ 
       }
       break;
 
+    case deoptimize_id:
+      {
+        StubFrame f(sasm, "deoptimize", dont_gc_arguments);
+        OopMap* oop_map = save_live_registers(sasm);
+        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
+        oop_maps = new OopMapSet();
+        oop_maps->add_gc_map(call_offset, oop_map);
+        restore_live_registers(sasm);
+        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
+        assert(deopt_blob != NULL, "deoptimization blob must have been created");
+        __ leave();
+        __ b(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
+      }
+      break;
+
     case throw_range_check_failed_id:
       { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
@@ -1242,7 +1257,7 @@ 
 
 	assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
 
-        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+        __ membar(Assembler::StoreLoad);
         __ ldrb(rscratch1, Address(card_addr, offset));
 	__ cbzw(rscratch1, done);
 
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/c2_globals_aarch64.hpp
--- a/src/cpu/aarch64/vm/c2_globals_aarch64.hpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/c2_globals_aarch64.hpp	Thu Jun 05 13:48:13 2014 +0100
@@ -78,6 +78,8 @@ 
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
+define_pd_global(bool,  TrapBasedRangeChecks,        false);
+
 // Heap related flags
 define_pd_global(uintx,MetaspaceSize,    ScaleForWordSize(16*M));
 
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/frame_aarch64.inline.hpp
--- a/src/cpu/aarch64/vm/frame_aarch64.inline.hpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/frame_aarch64.inline.hpp	Thu Jun 05 13:48:13 2014 +0100
@@ -47,10 +47,12 @@ 
 inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
   intptr_t a = intptr_t(sp);
   intptr_t b = intptr_t(fp);
+#ifndef PRODUCT
   if (fp)
     if (sp > fp || (fp - sp > 0x100000))
       for(;;)
 	asm("nop");
+#endif
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
@@ -71,10 +73,12 @@ 
 inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
   intptr_t a = intptr_t(sp);
   intptr_t b = intptr_t(fp);
+#ifndef PRODUCT
   if (fp) 
     if (sp > fp || (fp - sp > 0x100000))
       for(;;)
 	asm("nop");
+#endif
   _sp = sp;
   _unextended_sp = unextended_sp;
   _fp = fp;
@@ -96,10 +100,12 @@ 
 inline frame::frame(intptr_t* sp, intptr_t* fp) {
   intptr_t a = intptr_t(sp);
   intptr_t b = intptr_t(fp);
+#ifndef PRODUCT
   if (fp)
     if (sp > fp || (fp - sp > 0x100000))
       for(;;)
 	asm("nop");
+#endif
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
@@ -269,6 +275,10 @@ 
   }
 }
 
+inline oop* frame::interpreter_frame_temp_oop_addr() const {
+  return (oop *)(fp() + interpreter_frame_oop_temp_offset);
+}
+
 #endif /* CC_INTERP */
 
 inline int frame::pd_oop_map_offset_adjustment() const {
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp
--- a/src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/globalDefinitions_aarch64.hpp	Thu Jun 05 13:48:13 2014 +0100
@@ -29,6 +29,12 @@ 
 
 const int StackAlignmentInBytes  = 16;
 
+// Indicates whether the C calling conventions require that
+// 32-bit integer argument values are properly extended to 64 bits.
+// If set, SharedRuntime::c_calling_convention() must adapt
+// signatures accordingly.
+const bool CCallingConventionRequiresIntsAsLongs = false;
+
 #define SUPPORTS_NATIVE_CX8
 
 #endif // CPU_AARCH64_VM_GLOBALDEFINITIONS_AARCH64_HPP
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/globals_aarch64.hpp
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Jun 05 13:48:13 2014 +0100
@@ -39,7 +39,8 @@ 
 define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
 
 define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
-define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs past to check cast
+define_pd_global(bool, TrapBasedNullChecks,  false);
+define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs passed to check cast
 
 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
 // assign a different value for C2 without touching a number of files. Use
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/interp_masm_aarch64.cpp
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -1417,10 +1417,10 @@ 
                                                         Register scratch, bool preloaded,
                                                         Condition cond, Label* where) {
   if (!preloaded) {
-    ldr(scratch, counter_addr);
+    ldrw(scratch, counter_addr);
   }
   add(scratch, scratch, increment);
-  str(scratch, counter_addr);
+  strw(scratch, counter_addr);
   ands(scratch, scratch, mask);
   br(cond, *where);
 }
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp
--- a/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -1,5 +1,5 @@ 
 /*
- * Copyright (c) 2013, Red Hat Inc.
+ * Copyright (c) 2014, Red Hat Inc.
  * Copyright (c) 2004, 2010, Oracle and/or its affiliates.
  * All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -35,39 +35,136 @@ 
 
 #define BUFFER_SIZE 30*wordSize
 
-// Instead of issuing lfence for LoadLoad barrier, we create data dependency
-// between loads, which is more efficient than lfence.
+// Instead of issuing a LoadLoad barrier we create an address
+// dependency between loads; this might be more efficient.
 
 // Common register usage:
-// rax/xmm0: result
+// r0/v0:      result
 // c_rarg0:    jni env
 // c_rarg1:    obj
 // c_rarg2:    jfield id
 
-// static const Register robj          = r9;
-// static const Register rcounter      = r10;
-// static const Register roffset       = r11;
-// static const Register rcounter_addr = r11;
+static const Register robj          = r3;
+static const Register rcounter      = r4;
+static const Register roffset       = r5;
+static const Register rcounter_addr = r6;
+static const Register result        = r7;
 
-// Warning: do not use rip relative addressing after the first counter load
-// since that may scratch r10!
+address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
+  const char *name;
+  switch (type) {
+    case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
+    case T_BYTE:    name = "jni_fast_GetByteField";    break;
+    case T_CHAR:    name = "jni_fast_GetCharField";    break;
+    case T_SHORT:   name = "jni_fast_GetShortField";   break;
+    case T_INT:     name = "jni_fast_GetIntField";     break;
+    case T_LONG:    name = "jni_fast_GetLongField";    break;
+    case T_FLOAT:   name = "jni_fast_GetFloatField";   break;
+    case T_DOUBLE:  name = "jni_fast_GetDoubleField";  break;
+    default:        ShouldNotReachHere();
+  }
+  ResourceMark rm;
+  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
+  CodeBuffer cbuf(blob);
+  MacroAssembler* masm = new MacroAssembler(&cbuf);
+  address fast_entry = __ pc();
 
-address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { Unimplemented(); return 0; }
+  Label slow;
 
-address JNI_FastGetField::generate_fast_get_boolean_field() { Unimplemented(); return 0; }
+  unsigned long offset;
+  __ adrp(rcounter_addr,
+	  SafepointSynchronize::safepoint_counter_addr(), offset);
+  Address safepoint_counter_addr(rcounter_addr, offset);
+  __ ldrw(rcounter, safepoint_counter_addr);
+  __ andw(rscratch1, rcounter, 1);
+  __ cbnzw(rscratch1, slow);
+  __ eor(robj, c_rarg1, rcounter);
+  __ eor(robj, robj, rcounter);               // obj, since
+                                              // robj ^ rcounter ^ rcounter == robj
+                                              // robj is address dependent on rcounter.
+  __ ldr(robj, Address(robj, 0));             // *obj
+  __ lsr(roffset, c_rarg2, 2);                // offset
 
-address JNI_FastGetField::generate_fast_get_byte_field() { Unimplemented(); return 0; }
+  assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
+  speculative_load_pclist[count] = __ pc();   // Used by the segfault handler
+  switch (type) {
+    case T_BOOLEAN: __ ldrb    (result, Address(robj, roffset)); break;
+    case T_BYTE:    __ ldrsb   (result, Address(robj, roffset)); break;
+    case T_CHAR:    __ ldrh    (result, Address(robj, roffset)); break;
+    case T_SHORT:   __ ldrsh   (result, Address(robj, roffset)); break;
+    case T_FLOAT:   __ ldrw    (result, Address(robj, roffset)); break;
+    case T_INT:     __ ldrsw   (result, Address(robj, roffset)); break;
+    case T_DOUBLE:
+    case T_LONG:    __ ldr     (result, Address(robj, roffset)); break;
+    default:        ShouldNotReachHere();
+  }
 
-address JNI_FastGetField::generate_fast_get_char_field() { Unimplemented(); return 0; }
+  // counter_addr is address dependent on result.
+  __ eor(rcounter_addr, rcounter_addr, result);
+  __ eor(rcounter_addr, rcounter_addr, result);
+  __ ldrw(rscratch1, safepoint_counter_addr);
+  __ cmpw(rcounter, rscratch1);
+  __ br (Assembler::NE, slow);
 
-address JNI_FastGetField::generate_fast_get_short_field() { Unimplemented(); return 0; }
+  switch (type) {
+    case T_FLOAT:   __ fmovs(v0, result); break;
+    case T_DOUBLE:  __ fmovd(v0, result); break;
+    default:        __ mov(r0, result);   break;
+  }
+  __ ret(lr);
 
-address JNI_FastGetField::generate_fast_get_int_field() { Unimplemented(); return 0; }
+  slowcase_entry_pclist[count++] = __ pc();
+  __ bind(slow);
+  address slow_case_addr;
+  switch (type) {
+    case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
+    case T_BYTE:    slow_case_addr = jni_GetByteField_addr();    break;
+    case T_CHAR:    slow_case_addr = jni_GetCharField_addr();    break;
+    case T_SHORT:   slow_case_addr = jni_GetShortField_addr();   break;
+    case T_INT:     slow_case_addr = jni_GetIntField_addr();     break;
+    case T_LONG:    slow_case_addr = jni_GetLongField_addr();    break;
+    case T_FLOAT:   slow_case_addr = jni_GetFloatField_addr();   break;
+    case T_DOUBLE:  slow_case_addr = jni_GetDoubleField_addr();  break;
+    default:        ShouldNotReachHere();
+  }
+  // tail call
+  __ lea(rscratch1, ExternalAddress(slow_case_addr));
+  __ br(rscratch1);
 
-address JNI_FastGetField::generate_fast_get_long_field() { Unimplemented(); return 0; }
+  __ flush ();
 
-address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { Unimplemented(); return 0; }
+  return fast_entry;
+}
 
-address JNI_FastGetField::generate_fast_get_float_field() { Unimplemented(); return 0; }
+address JNI_FastGetField::generate_fast_get_boolean_field() {
+  return generate_fast_get_int_field0(T_BOOLEAN);
+}
 
-address JNI_FastGetField::generate_fast_get_double_field() { Unimplemented(); return 0; }
+address JNI_FastGetField::generate_fast_get_byte_field() {
+  return generate_fast_get_int_field0(T_BYTE);
+}
+
+address JNI_FastGetField::generate_fast_get_char_field() {
+  return generate_fast_get_int_field0(T_CHAR);
+}
+
+address JNI_FastGetField::generate_fast_get_short_field() {
+  return generate_fast_get_int_field0(T_SHORT);
+}
+
+address JNI_FastGetField::generate_fast_get_int_field() {
+  return generate_fast_get_int_field0(T_INT);
+}
+
+address JNI_FastGetField::generate_fast_get_long_field() {
+  return generate_fast_get_int_field0(T_LONG);
+}
+
+address JNI_FastGetField::generate_fast_get_float_field() {
+  return generate_fast_get_int_field0(T_FLOAT);
+}
+
+address JNI_FastGetField::generate_fast_get_double_field() {
+  return generate_fast_get_int_field0(T_DOUBLE);
+}
+
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/macroAssembler_aarch64.cpp
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -2723,7 +2723,7 @@ 
 
   assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
 
-  membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+  membar(Assembler::Assembler::StoreLoad);
 
   ldrb(tmp2, Address(card_addr, offset));
   cbzw(tmp2, done);
@@ -3079,6 +3079,12 @@ 
 }
 
 void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
+  relocInfo::relocType rtype = dest.rspec().reloc()->type();
+  guarantee(rtype == relocInfo::none
+	    || rtype == relocInfo::external_word_type
+	    || rtype == relocInfo::poll_type
+	    || rtype == relocInfo::poll_return_type,
+	    "can only use a fixed address with an ADRP");
   if (labs(pc() - dest.target()) >= (1LL << 32)) {
     // Out of range.  This doesn't happen very often, but we have to
     // handle it
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -769,7 +769,9 @@ 
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
+                                         VMRegPair *regs2,
                                          int total_args_passed) {
+  assert(regs2 == NULL, "not needed on AArch64");
 // We return the amount of VMRegImpl stack slots we need to reserve for all
 // the arguments NOT counting out_preserve_stack_slots.
 
@@ -1414,7 +1416,7 @@ 
   // Now figure out where the args must be stored and how much stack space
   // they require.
   int out_arg_slots;
-  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 
   // Compute framesize for the wrapper.  We need to handlize all oops in
   // incoming registers
@@ -1924,7 +1926,7 @@ 
   if(os::is_MP()) {
     if (UseMembar) {
       // Force this write out before the read below
-      __ dsb(Assembler::SY);
+      __ dmb(Assembler::SY);
     } else {
       // Write serialization page so VM thread can do a pseudo remote membar.
       // We use the current thread pointer to calculate a thread specific
diff -r 6298eeefbb7b -r 8cb098504801 src/cpu/aarch64/vm/templateTable_aarch64.cpp
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -2405,8 +2405,7 @@ 
   __ bind(Done);
   // It's really not worth bothering to check whether this field
   // really is volatile in the slow case.
-  __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad |
-						    MacroAssembler::LoadStore));
+  __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
 }
 
 
@@ -2498,7 +2497,7 @@ 
   {
     Label notVolatile;
     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore));
+    __ membar(MacroAssembler::StoreStore);
     __ bind(notVolatile);
   }
 
@@ -2645,7 +2644,7 @@ 
   {
     Label notVolatile;
     __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad));
+    __ membar(MacroAssembler::StoreLoad);
     __ bind(notVolatile);
   }
 }
@@ -2734,7 +2733,7 @@ 
   {
     Label notVolatile;
     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore));
+    __ membar(MacroAssembler::StoreStore);
     __ bind(notVolatile);
   }
 
@@ -2778,7 +2777,7 @@ 
   {
     Label notVolatile;
     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad));
+    __ membar(MacroAssembler::StoreLoad);
     __ bind(notVolatile);
   }
 }
@@ -2855,8 +2854,7 @@ 
   {
     Label notVolatile;
     __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad |
-						      MacroAssembler::LoadStore));
+    __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
     __ bind(notVolatile);
   }
 }
diff -r 6298eeefbb7b -r 8cb098504801 src/share/vm/c1/c1_Runtime1.cpp
--- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -225,11 +225,6 @@ 
 #if defined(SPARC) || defined(PPC)
     case handle_exception_nofpu_id:  // Unused on sparc
 #endif
-#ifdef TARGET_ARCH_aarch64
-    case throw_index_exception_id:
-    case throw_array_store_exception_id:
-    case deoptimize_id:
-#endif
       break;
 
     // All other stubs should have oopmaps
diff -r 6298eeefbb7b -r 8cb098504801 src/share/vm/opto/generateOptoStub.cpp
--- a/src/share/vm/opto/generateOptoStub.cpp	Thu Jun 05 13:07:27 2014 +0100
+++ b/src/share/vm/opto/generateOptoStub.cpp	Thu Jun 05 13:48:13 2014 +0100
@@ -141,7 +141,7 @@ 
 
   Node *last_pc = new (C) ConPNode(t);
   _gvn.set_type(last_pc, t);
-  store_to_memory(NULL, adr_last_Java_pc, last_pc, T_ADDRESS, NoAlias);
+  store_to_memory(NULL, adr_last_Java_pc, last_pc, T_ADDRESS, NoAlias, MemNode::unordered);
 
 #endif /* defined(AARCH64) */
 
--- CUT HERE ---