@@ -57,6 +57,9 @@
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
+define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
+define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
+define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
@@ -75,6 +75,9 @@
define_pd_global(bool, OptoBundling, false);
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
+define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
+define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
+define_pd_global(intx, NonMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
@@ -689,6 +689,13 @@
return NULL;
}
+#ifndef PRODUCT
+// This is a generic constructor which is only used by pns() in debug.cpp.
+frame::frame(void* sp, void* fp, void* pc) {
+ frame((intptr_t*)sp, (intptr_t*)fp, (address)pc);
+}
+#endif
+
intptr_t* frame::real_fp() const {
if (_cb != NULL) {
// use the frame size if valid
@@ -43,8 +43,9 @@
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs);
- address generate_empty_entry(void);
- address generate_accessor_entry(void);
+ address generate_jump_to_normal_entry(void);
+ address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
+ address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry();
address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
@@ -237,6 +237,17 @@
__ blrt(rscratch1, gpargs, fpargs, rtype);
}
+// Jump into normal path for accessor and empty entry to jump to normal entry
+// The "fast" optimization don't update compilation count therefore can disable inlining
+// for these functions that should be inlined.
+address InterpreterGenerator::generate_jump_to_normal_entry(void) {
+ address entry_point = __ pc();
+
+ assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
+ __ b(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
+ return entry_point;
+}
+
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
@@ -261,43 +272,6 @@
return entry_point;
}
-
-// Empty method, generate a very fast return.
-
-address InterpreterGenerator::generate_empty_entry(void) {
- // rmethod: Method*
- // r13: sender sp must set sp to this value on return
-
- if (!UseFastEmptyMethods) {
- return NULL;
- }
-
- address entry_point = __ pc();
-
- // If we need a safepoint check, generate full interpreter entry.
- Label slow_path;
- {
- unsigned long offset;
- assert(SafepointSynchronize::_not_synchronized == 0,
- "SafepointSynchronize::_not_synchronized");
- __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
- __ ldrw(rscratch2, Address(rscratch2, offset));
- __ cbnz(rscratch2, slow_path);
- }
-
- // do nothing for empty methods (do not even increment invocation counter)
- // Code: _return
- // _return
- // return w/o popping parameters
- __ mov(sp, r13); // Restore caller's SP
- __ br(lr);
-
- __ bind(slow_path);
- (void) generate_normal_entry(false);
- return entry_point;
-
-}
-
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
@@ -660,12 +660,6 @@
//
//
-// Call an accessor method (assuming it is resolved, otherwise drop
-// into vanilla (slow path) entry
-address InterpreterGenerator::generate_accessor_entry(void) {
- return NULL;
-}
-
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
return NULL;
@@ -1461,50 +1455,6 @@
// ...
// [ parameter 1 ] <--- rlocals
-address AbstractInterpreterGenerator::generate_method_entry(
- AbstractInterpreter::MethodKind kind) {
- // determine code generation flags
- bool synchronized = false;
- address entry_point = NULL;
-
- switch (kind) {
- case Interpreter::zerolocals : break;
- case Interpreter::zerolocals_synchronized: synchronized = true; break;
- case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
- case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
- case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
- case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
- case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
-
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
- case Interpreter::java_lang_ref_reference_get
- : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
- case Interpreter::java_util_zip_CRC32_update
- : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_update_entry(); break;
- case Interpreter::java_util_zip_CRC32_updateBytes
- : // fall thru
- case Interpreter::java_util_zip_CRC32_updateByteBuffer
- : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_updateBytes_entry(kind); break;
- default : ShouldNotReachHere(); break;
- }
-
- if (entry_point) {
- return entry_point;
- }
-
- return ((InterpreterGenerator*) this)->
- generate_normal_entry(synchronized);
-}
-
-
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
@@ -1761,11 +1761,9 @@
// r2: scratch
__ cbz(r0, dispatch); // test result -- no osr if null
// nmethod may have been invalidated (VM may block upon call_VM return)
- __ ldrw(r2, Address(r0, nmethod::entry_bci_offset()));
- // InvalidOSREntryBci == -2 which overflows cmpw as unsigned
- // use cmnw against -InvalidOSREntryBci which does the same thing
- __ cmn(r2, -InvalidOSREntryBci);
- __ br(Assembler::EQ, dispatch);
+ __ ldrb(r2, Address(r0, nmethod::state_offset()));
+ __ cmpw(r2, nmethod::in_use);
+ __ br(Assembler::NE, dispatch);
// We have the address of an on stack replacement routine in r0
// We need to prepare to execute the OSR method. First we must
@@ -136,6 +136,17 @@
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
UseCRC32Intrinsics = true;
}
+
+ if (UseSHA) {
+ warning("SHA instructions are not implemented");
+ FLAG_SET_DEFAULT(UseSHA, false);
+ }
+ if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
+ warning("SHA intrinsics are not implemented");
+ FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+ FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+ FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+ }
}
void VM_Version::initialize() {
@@ -1400,7 +1400,7 @@
#ifndef SYS_clock_getres
#if defined(IA32) || defined(AMD64)
-#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229) AARCH64_ONLY(114)
+ #define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229) AARCH64_ONLY(114)
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
#else
#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
@@ -1980,11 +1980,11 @@
static Elf32_Half running_arch_code=EM_MIPS;
#elif (defined M68K)
static Elf32_Half running_arch_code=EM_68K;
- #elif (defined AARCH64)
- static Elf32_Half running_arch_code=EM_AARCH64;
+#elif (defined AARCH64)
+ static Elf32_Half running_arch_code=EM_AARCH64;
#else
#error Method os::dll_load requires that one of following is defined:\
- IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64
+ IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64
#endif
// Identify compatability class for VM's architecture and library's architecture
@@ -5876,11 +5876,11 @@
extern char** environ;
#ifndef __NR_fork
-#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57) AARCH64_ONLY(1079)
+ #define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57) AARCH64_ONLY(1079)
#endif
#ifndef __NR_execve
-#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59) AARCH64_ONLY(221)
+ #define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59) AARCH64_ONLY(221)
#endif
// Run the specified command in a separate process. Return its exit value,
@@ -690,6 +690,10 @@
}
#endif
+int os::extra_bang_size_in_bytes() {
+ return 0;
+}
+
extern "C" {
int SpinPause() {
}
@@ -1148,7 +1148,7 @@
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
FLAG_SET_ERGO(uintx, ReservedCodeCacheSize, ReservedCodeCacheSize * 5);
// The maximum B/BL offset range on AArch64 is 128MB
- AARCH64_ONLY(FLAG_SET_DEFAULT(ReservedCodeCacheSize, MIN2(ReservedCodeCacheSize, 128*M)));
+ AARCH64_ONLY(FLAG_SET_ERGO(uintx, ReservedCodeCacheSize, MIN2(ReservedCodeCacheSize, 128*M)));
}
// Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {