@@ -132,6 +132,15 @@
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
+ if (_info->deoptimize_on_exception()) {
+ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ __ call(RuntimeAddress(a));
+ ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
+ debug_only(__ should_not_reach_here());
+ return;
+ }
+
if (_index->is_cpu_register()) {
__ mov(rscratch1, _index->as_register());
} else {
@@ -145,6 +154,20 @@
}
__ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
+ debug_only(__ should_not_reach_here());
+}
+
+PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
+ _info = new CodeEmitInfo(info);
+}
+
+void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
+ __ bind(_entry);
+ address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ __ call(RuntimeAddress(a));
+ ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
@@ -432,10 +455,19 @@
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
+ address a;
+ if (_info->deoptimize_on_exception()) {
+ // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
+ a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+ } else {
+ a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+ }
+
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
- __ bl(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
+ __ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
+ ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
@@ -2832,6 +2832,45 @@
ShouldNotReachHere();
}
+#ifdef ASSERT
+// emit run-time assertion
+void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
+ assert(op->code() == lir_assert, "must be");
+
+ if (op->in_opr1()->is_valid()) {
+ assert(op->in_opr2()->is_valid(), "both operands must be valid");
+ comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
+ } else {
+ assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
+ assert(op->condition() == lir_cond_always, "no other conditions allowed");
+ }
+
+ Label ok;
+ if (op->condition() != lir_cond_always) {
+ Assembler::Condition acond = Assembler::AL;
+ switch (op->condition()) {
+ case lir_cond_equal: acond = Assembler::EQ; break;
+ case lir_cond_notEqual: acond = Assembler::NE; break;
+ case lir_cond_less: acond = Assembler::LT; break;
+ case lir_cond_lessEqual: acond = Assembler::LE; break;
+ case lir_cond_greaterEqual: acond = Assembler::GE; break;
+ case lir_cond_greater: acond = Assembler::GT; break;
+ case lir_cond_belowEqual: acond = Assembler::LS; break;
+ case lir_cond_aboveEqual: acond = Assembler::HS; break;
+ default: ShouldNotReachHere();
+ }
+ __ br(acond, ok);
+ }
+ if (op->halt()) {
+ const char* str = __ code_string(op->msg());
+ __ stop(str);
+ } else {
+ breakpoint();
+ }
+ __ bind(ok);
+}
+#endif
+
#ifndef PRODUCT
#define COMMENT(x) do { __ block_comment(x); } while (0)
#else
@@ -317,7 +317,7 @@
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
assert(x->is_pinned(),"");
- bool needs_range_check = true;
+ bool needs_range_check = x->compute_needs_range_check();
bool use_length = x->length() != NULL;
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
@@ -332,12 +332,10 @@
array.load_item();
index.load_nonconstant();
- if (use_length) {
- needs_range_check = x->compute_needs_range_check();
- if (needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
+ if (use_length && needs_range_check) {
+ length.set_instruction(x->length());
+ length.load_item();
+
}
if (needs_store_check) {
value.load_item();
@@ -679,7 +679,8 @@
switch (op2->code()) {
case lir_cmp:
case lir_cmp_fd2i:
- case lir_ucmp_fd2i: {
+ case lir_ucmp_fd2i:
+ case lir_assert: {
assert(left->is_fpu_register(), "invalid LIR");
assert(right->is_fpu_register(), "invalid LIR");
@@ -543,12 +543,10 @@
// distinguish each RT-Call.
// Note: This number affects also the RT-Call in generate_handle_exception because
// the oop-map is shared for all calls.
- const int num_rt_args = 2; // thread + dummy
-
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
assert(deopt_blob != NULL, "deoptimization blob must have been created");
- OopMap* oop_map = save_live_registers(sasm, num_rt_args);
+ OopMap* oop_map = save_live_registers(sasm);
__ mov(c_rarg0, rthread);
Label retaddr;
@@ -771,7 +769,7 @@
}
__ enter();
- OopMap* map = save_live_registers(sasm, 2);
+ OopMap* map = save_live_registers(sasm);
int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
oop_maps = new OopMapSet();
oop_maps->add_gc_map(call_offset, map);
@@ -789,7 +787,7 @@
{
Register bci = r0, method = r1;
__ enter();
- OopMap* map = save_live_registers(sasm, 3);
+ OopMap* map = save_live_registers(sasm);
// Retrieve bci
__ ldrw(bci, Address(rfp, 2*BytesPerWord));
// And a pointer to the Method*
@@ -911,7 +909,7 @@
}
__ enter();
- OopMap* map = save_live_registers(sasm, 3);
+ OopMap* map = save_live_registers(sasm);
int call_offset;
if (id == new_type_array_id) {
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
@@ -936,7 +934,7 @@
// r0,: klass
// r19,: rank
// r2: address of 1st dimension
- OopMap* map = save_live_registers(sasm, 4);
+ OopMap* map = save_live_registers(sasm);
__ mov(c_rarg1, r0);
__ mov(c_rarg3, r2);
__ mov(c_rarg2, r19);
@@ -1145,6 +1143,24 @@
}
break;
+ case predicate_failed_trap_id:
+ {
+ StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
+
+ OopMap* map = save_live_registers(sasm);
+
+ int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
+ oop_maps = new OopMapSet();
+ oop_maps->add_gc_map(call_offset, map);
+ restore_live_registers(sasm);
+ __ leave();
+ DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
+ assert(deopt_blob != NULL, "deoptimization blob must have been created");
+
+ __ b(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
+ }
+ break;
+
default:
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
@@ -57,7 +57,7 @@
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
-define_pd_global(bool, ProfileInterpreter, true);
+define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx,MetaspaceSize, 12*M );
@@ -44,7 +44,7 @@
#include "runtime/vframeArray.hpp"
#endif
-#if ASSERT
+#ifdef ASSERT
void RegisterMap::check_location_valid() {
}
#endif
@@ -358,7 +358,7 @@
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP. The unextended SP might also be the saved SP
// for MethodHandle call sites.
-#if ASSERT
+#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
frame fr;
@@ -171,7 +171,7 @@
return (intptr_t*) addr_at(offset);
}
-#if ASSERT
+#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
@@ -319,12 +319,18 @@
-inline oop frame::saved_oop_result(RegisterMap* map) const {
- return *((oop*) map->location(r0->as_VMReg()));
+inline oop frame::saved_oop_result(RegisterMap* map) const {
+ oop* result_adr = (oop *)map->location(r0->as_VMReg());
+ guarantee(result_adr != NULL, "bad register save location");
+
+ return (*result_adr);
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
- *((oop*) map->location(r0->as_VMReg())) = obj;
+ oop* result_adr = (oop *)map->location(r0->as_VMReg());
+ guarantee(result_adr != NULL, "bad register save location");
+
+ *result_adr = obj;
}
#endif // CPU_AARCH64_VM_FRAME_AARCH64_INLINE_HPP
@@ -851,8 +851,13 @@
if (!VerifyOops) return;
// Pass register number to verify_oop_subroutine
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop: %s: %s", reg->name(), s);
+ const char* b = NULL;
+ {
+ ResourceMark rm;
+ stringStream ss;
+ ss.print("verify_oop: %s: %s", reg->name(), s);
+ b = code_string(ss.as_string());
+ }
BLOCK_COMMENT("verify_oop {");
stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
@@ -875,8 +880,13 @@
void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
if (!VerifyOops) return;
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop_addr: %s", s);
+ const char* b = NULL;
+ {
+ ResourceMark rm;
+ stringStream ss;
+ ss.print("verify_oop_addr: %s", s);
+ b = code_string(ss.as_string());
+ }
BLOCK_COMMENT("verify_oop_addr {");
stp(r0, rscratch1, Address(pre(sp, -2 * wordSize)));
@@ -1462,7 +1462,8 @@
int callee_locals,
frame* caller,
frame* interpreter_frame,
- bool is_top_frame) {
+ bool is_top_frame,
+ bool is_bottom_frame) {
// Note: This calculation must exactly parallel the frame setup
// in AbstractInterpreterGenerator::generate_method_entry.
// If interpreter_frame!=NULL, set up the method, locals, and monitors.
@@ -36,7 +36,7 @@
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
-// Used on 64 bit platforms for UseCompressedOops base address or CDS
+// Used on 64 bit platforms for UseCompressedOops base address
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
#endif // OS_CPU_LINUX_AARCH64_VM_GLOBALS_LINUX_AARCH64_HPP
@@ -326,6 +326,11 @@
// to handle_unexpected_exception way down below.
thread->disable_stack_red_zone();
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+
+ // This is a likely cause, but hard to verify. Let's just print
+ // it as a hint.
+ tty->print_raw_cr("Please check if any of your loaded .so files has "
+ "enabled executable stack (see man page execstack(8))");
} else {
// Accessing stack address below sp may cause SEGV if current
// thread has MAP_GROWSDOWN stack. This should only happen when
@@ -356,7 +361,7 @@
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
- nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+ nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
}
@@ -148,7 +148,7 @@
void StubQueue::commit(int committed_code_size, CodeStrings& strings) {
- // assert(committed_code_size > 0, "committed_code_size must be > 0");
+ assert(committed_code_size > 0, "committed_code_size must be > 0");
int committed_size = round_to(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
Stub* s = current_stub();
assert(committed_size <= stub_size(s), "committed size must not exceed requested size");