duke@435: /* xdono@631: * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: #include "incls/_precompiled.incl" duke@435: #include "incls/_graphKit.cpp.incl" duke@435: duke@435: //----------------------------GraphKit----------------------------------------- duke@435: // Main utility constructor. duke@435: GraphKit::GraphKit(JVMState* jvms) duke@435: : Phase(Phase::Parser), duke@435: _env(C->env()), duke@435: _gvn(*C->initial_gvn()) duke@435: { duke@435: _exceptions = jvms->map()->next_exception(); duke@435: if (_exceptions != NULL) jvms->map()->set_next_exception(NULL); duke@435: set_jvms(jvms); duke@435: } duke@435: duke@435: // Private constructor for parser. duke@435: GraphKit::GraphKit() duke@435: : Phase(Phase::Parser), duke@435: _env(C->env()), duke@435: _gvn(*C->initial_gvn()) duke@435: { duke@435: _exceptions = NULL; duke@435: set_map(NULL); duke@435: debug_only(_sp = -99); duke@435: debug_only(set_bci(-99)); duke@435: } duke@435: duke@435: duke@435: duke@435: //---------------------------clean_stack--------------------------------------- duke@435: // Clear away rubbish from the stack area of the JVM state. duke@435: // This destroys any arguments that may be waiting on the stack. duke@435: void GraphKit::clean_stack(int from_sp) { duke@435: SafePointNode* map = this->map(); duke@435: JVMState* jvms = this->jvms(); duke@435: int stk_size = jvms->stk_size(); duke@435: int stkoff = jvms->stkoff(); duke@435: Node* top = this->top(); duke@435: for (int i = from_sp; i < stk_size; i++) { duke@435: if (map->in(stkoff + i) != top) { duke@435: map->set_req(stkoff + i, top); duke@435: } duke@435: } duke@435: } duke@435: duke@435: duke@435: //--------------------------------sync_jvms----------------------------------- duke@435: // Make sure our current jvms agrees with our parse state. duke@435: JVMState* GraphKit::sync_jvms() const { duke@435: JVMState* jvms = this->jvms(); duke@435: jvms->set_bci(bci()); // Record the new bci in the JVMState duke@435: jvms->set_sp(sp()); // Record the new sp in the JVMState duke@435: assert(jvms_in_sync(), "jvms is now in sync"); duke@435: return jvms; duke@435: } duke@435: duke@435: #ifdef ASSERT duke@435: bool GraphKit::jvms_in_sync() const { duke@435: Parse* parse = is_Parse(); duke@435: if (parse == NULL) { duke@435: if (bci() != jvms()->bci()) return false; duke@435: if (sp() != (int)jvms()->sp()) return false; duke@435: return true; duke@435: } duke@435: if (jvms()->method() != parse->method()) return false; duke@435: if (jvms()->bci() != parse->bci()) return false; duke@435: int jvms_sp = jvms()->sp(); duke@435: if (jvms_sp != parse->sp()) return false; duke@435: int jvms_depth = jvms()->depth(); duke@435: if (jvms_depth != parse->depth()) return false; duke@435: return true; duke@435: } duke@435: duke@435: // Local helper checks for special internal merge points duke@435: // used to accumulate and merge exception states. duke@435: // They are marked by the region's in(0) edge being the map itself. duke@435: // Such merge points must never "escape" into the parser at large, duke@435: // until they have been handed to gvn.transform. duke@435: static bool is_hidden_merge(Node* reg) { duke@435: if (reg == NULL) return false; duke@435: if (reg->is_Phi()) { duke@435: reg = reg->in(0); duke@435: if (reg == NULL) return false; duke@435: } duke@435: return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root(); duke@435: } duke@435: duke@435: void GraphKit::verify_map() const { duke@435: if (map() == NULL) return; // null map is OK duke@435: assert(map()->req() <= jvms()->endoff(), "no extra garbage on map"); duke@435: assert(!map()->has_exceptions(), "call add_exception_states_from 1st"); duke@435: assert(!is_hidden_merge(control()), "call use_exception_state, not set_map"); duke@435: } duke@435: duke@435: void GraphKit::verify_exception_state(SafePointNode* ex_map) { duke@435: assert(ex_map->next_exception() == NULL, "not already part of a chain"); duke@435: assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop"); duke@435: } duke@435: #endif duke@435: duke@435: //---------------------------stop_and_kill_map--------------------------------- duke@435: // Set _map to NULL, signalling a stop to further bytecode execution. duke@435: // First smash the current map's control to a constant, to mark it dead. duke@435: void GraphKit::stop_and_kill_map() { duke@435: SafePointNode* dead_map = stop(); duke@435: if (dead_map != NULL) { duke@435: dead_map->disconnect_inputs(NULL); // Mark the map as killed. duke@435: assert(dead_map->is_killed(), "must be so marked"); duke@435: } duke@435: } duke@435: duke@435: duke@435: //--------------------------------stopped-------------------------------------- duke@435: // Tell if _map is NULL, or control is top. duke@435: bool GraphKit::stopped() { duke@435: if (map() == NULL) return true; duke@435: else if (control() == top()) return true; duke@435: else return false; duke@435: } duke@435: duke@435: duke@435: //-----------------------------has_ex_handler---------------------------------- duke@435: // Tell if this method or any caller method has exception handlers. duke@435: bool GraphKit::has_ex_handler() { duke@435: for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) { duke@435: if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) { duke@435: return true; duke@435: } duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: //------------------------------save_ex_oop------------------------------------ duke@435: // Save an exception without blowing stack contents or other JVM state. duke@435: void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) { duke@435: assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again"); duke@435: ex_map->add_req(ex_oop); duke@435: debug_only(verify_exception_state(ex_map)); duke@435: } duke@435: duke@435: inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) { duke@435: assert(GraphKit::has_saved_ex_oop(ex_map), "ex_oop must be there"); duke@435: Node* ex_oop = ex_map->in(ex_map->req()-1); duke@435: if (clear_it) ex_map->del_req(ex_map->req()-1); duke@435: return ex_oop; duke@435: } duke@435: duke@435: //-----------------------------saved_ex_oop------------------------------------ duke@435: // Recover a saved exception from its map. duke@435: Node* GraphKit::saved_ex_oop(SafePointNode* ex_map) { duke@435: return common_saved_ex_oop(ex_map, false); duke@435: } duke@435: duke@435: //--------------------------clear_saved_ex_oop--------------------------------- duke@435: // Erase a previously saved exception from its map. duke@435: Node* GraphKit::clear_saved_ex_oop(SafePointNode* ex_map) { duke@435: return common_saved_ex_oop(ex_map, true); duke@435: } duke@435: duke@435: #ifdef ASSERT duke@435: //---------------------------has_saved_ex_oop---------------------------------- duke@435: // Erase a previously saved exception from its map. duke@435: bool GraphKit::has_saved_ex_oop(SafePointNode* ex_map) { duke@435: return ex_map->req() == ex_map->jvms()->endoff()+1; duke@435: } duke@435: #endif duke@435: duke@435: //-------------------------make_exception_state-------------------------------- duke@435: // Turn the current JVM state into an exception state, appending the ex_oop. duke@435: SafePointNode* GraphKit::make_exception_state(Node* ex_oop) { duke@435: sync_jvms(); duke@435: SafePointNode* ex_map = stop(); // do not manipulate this map any more duke@435: set_saved_ex_oop(ex_map, ex_oop); duke@435: return ex_map; duke@435: } duke@435: duke@435: duke@435: //--------------------------add_exception_state-------------------------------- duke@435: // Add an exception to my list of exceptions. duke@435: void GraphKit::add_exception_state(SafePointNode* ex_map) { duke@435: if (ex_map == NULL || ex_map->control() == top()) { duke@435: return; duke@435: } duke@435: #ifdef ASSERT duke@435: verify_exception_state(ex_map); duke@435: if (has_exceptions()) { duke@435: assert(ex_map->jvms()->same_calls_as(_exceptions->jvms()), "all collected exceptions must come from the same place"); duke@435: } duke@435: #endif duke@435: duke@435: // If there is already an exception of exactly this type, merge with it. duke@435: // In particular, null-checks and other low-level exceptions common up here. duke@435: Node* ex_oop = saved_ex_oop(ex_map); duke@435: const Type* ex_type = _gvn.type(ex_oop); duke@435: if (ex_oop == top()) { duke@435: // No action needed. duke@435: return; duke@435: } duke@435: assert(ex_type->isa_instptr(), "exception must be an instance"); duke@435: for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) { duke@435: const Type* ex_type2 = _gvn.type(saved_ex_oop(e2)); duke@435: // We check sp also because call bytecodes can generate exceptions duke@435: // both before and after arguments are popped! duke@435: if (ex_type2 == ex_type duke@435: && e2->_jvms->sp() == ex_map->_jvms->sp()) { duke@435: combine_exception_states(ex_map, e2); duke@435: return; duke@435: } duke@435: } duke@435: duke@435: // No pre-existing exception of the same type. Chain it on the list. duke@435: push_exception_state(ex_map); duke@435: } duke@435: duke@435: //-----------------------add_exception_states_from----------------------------- duke@435: void GraphKit::add_exception_states_from(JVMState* jvms) { duke@435: SafePointNode* ex_map = jvms->map()->next_exception(); duke@435: if (ex_map != NULL) { duke@435: jvms->map()->set_next_exception(NULL); duke@435: for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) { duke@435: next_map = ex_map->next_exception(); duke@435: ex_map->set_next_exception(NULL); duke@435: add_exception_state(ex_map); duke@435: } duke@435: } duke@435: } duke@435: duke@435: //-----------------------transfer_exceptions_into_jvms------------------------- duke@435: JVMState* GraphKit::transfer_exceptions_into_jvms() { duke@435: if (map() == NULL) { duke@435: // We need a JVMS to carry the exceptions, but the map has gone away. duke@435: // Create a scratch JVMS, cloned from any of the exception states... duke@435: if (has_exceptions()) { duke@435: _map = _exceptions; duke@435: _map = clone_map(); duke@435: _map->set_next_exception(NULL); duke@435: clear_saved_ex_oop(_map); duke@435: debug_only(verify_map()); duke@435: } else { duke@435: // ...or created from scratch duke@435: JVMState* jvms = new (C) JVMState(_method, NULL); duke@435: jvms->set_bci(_bci); duke@435: jvms->set_sp(_sp); duke@435: jvms->set_map(new (C, TypeFunc::Parms) SafePointNode(TypeFunc::Parms, jvms)); duke@435: set_jvms(jvms); duke@435: for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top()); duke@435: set_all_memory(top()); duke@435: while (map()->req() < jvms->endoff()) map()->add_req(top()); duke@435: } duke@435: // (This is a kludge, in case you didn't notice.) duke@435: set_control(top()); duke@435: } duke@435: JVMState* jvms = sync_jvms(); duke@435: assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet"); duke@435: jvms->map()->set_next_exception(_exceptions); duke@435: _exceptions = NULL; // done with this set of exceptions duke@435: return jvms; duke@435: } duke@435: duke@435: static inline void add_n_reqs(Node* dstphi, Node* srcphi) { duke@435: assert(is_hidden_merge(dstphi), "must be a special merge node"); duke@435: assert(is_hidden_merge(srcphi), "must be a special merge node"); duke@435: uint limit = srcphi->req(); duke@435: for (uint i = PhiNode::Input; i < limit; i++) { duke@435: dstphi->add_req(srcphi->in(i)); duke@435: } duke@435: } duke@435: static inline void add_one_req(Node* dstphi, Node* src) { duke@435: assert(is_hidden_merge(dstphi), "must be a special merge node"); duke@435: assert(!is_hidden_merge(src), "must not be a special merge node"); duke@435: dstphi->add_req(src); duke@435: } duke@435: duke@435: //-----------------------combine_exception_states------------------------------ duke@435: // This helper function combines exception states by building phis on a duke@435: // specially marked state-merging region. These regions and phis are duke@435: // untransformed, and can build up gradually. The region is marked by duke@435: // having a control input of its exception map, rather than NULL. Such duke@435: // regions do not appear except in this function, and in use_exception_state. duke@435: void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) { duke@435: if (failing()) return; // dying anyway... duke@435: JVMState* ex_jvms = ex_map->_jvms; duke@435: assert(ex_jvms->same_calls_as(phi_map->_jvms), "consistent call chains"); duke@435: assert(ex_jvms->stkoff() == phi_map->_jvms->stkoff(), "matching locals"); duke@435: assert(ex_jvms->sp() == phi_map->_jvms->sp(), "matching stack sizes"); duke@435: assert(ex_jvms->monoff() == phi_map->_jvms->monoff(), "matching JVMS"); duke@435: assert(ex_map->req() == phi_map->req(), "matching maps"); duke@435: uint tos = ex_jvms->stkoff() + ex_jvms->sp(); duke@435: Node* hidden_merge_mark = root(); duke@435: Node* region = phi_map->control(); duke@435: MergeMemNode* phi_mem = phi_map->merged_memory(); duke@435: MergeMemNode* ex_mem = ex_map->merged_memory(); duke@435: if (region->in(0) != hidden_merge_mark) { duke@435: // The control input is not (yet) a specially-marked region in phi_map. duke@435: // Make it so, and build some phis. duke@435: region = new (C, 2) RegionNode(2); duke@435: _gvn.set_type(region, Type::CONTROL); duke@435: region->set_req(0, hidden_merge_mark); // marks an internal ex-state duke@435: region->init_req(1, phi_map->control()); duke@435: phi_map->set_control(region); duke@435: Node* io_phi = PhiNode::make(region, phi_map->i_o(), Type::ABIO); duke@435: record_for_igvn(io_phi); duke@435: _gvn.set_type(io_phi, Type::ABIO); duke@435: phi_map->set_i_o(io_phi); duke@435: for (MergeMemStream mms(phi_mem); mms.next_non_empty(); ) { duke@435: Node* m = mms.memory(); duke@435: Node* m_phi = PhiNode::make(region, m, Type::MEMORY, mms.adr_type(C)); duke@435: record_for_igvn(m_phi); duke@435: _gvn.set_type(m_phi, Type::MEMORY); duke@435: mms.set_memory(m_phi); duke@435: } duke@435: } duke@435: duke@435: // Either or both of phi_map and ex_map might already be converted into phis. duke@435: Node* ex_control = ex_map->control(); duke@435: // if there is special marking on ex_map also, we add multiple edges from src duke@435: bool add_multiple = (ex_control->in(0) == hidden_merge_mark); duke@435: // how wide was the destination phi_map, originally? duke@435: uint orig_width = region->req(); duke@435: duke@435: if (add_multiple) { duke@435: add_n_reqs(region, ex_control); duke@435: add_n_reqs(phi_map->i_o(), ex_map->i_o()); duke@435: } else { duke@435: // ex_map has no merges, so we just add single edges everywhere duke@435: add_one_req(region, ex_control); duke@435: add_one_req(phi_map->i_o(), ex_map->i_o()); duke@435: } duke@435: for (MergeMemStream mms(phi_mem, ex_mem); mms.next_non_empty2(); ) { duke@435: if (mms.is_empty()) { duke@435: // get a copy of the base memory, and patch some inputs into it duke@435: const TypePtr* adr_type = mms.adr_type(C); duke@435: Node* phi = mms.force_memory()->as_Phi()->slice_memory(adr_type); duke@435: assert(phi->as_Phi()->region() == mms.base_memory()->in(0), ""); duke@435: mms.set_memory(phi); duke@435: // Prepare to append interesting stuff onto the newly sliced phi: duke@435: while (phi->req() > orig_width) phi->del_req(phi->req()-1); duke@435: } duke@435: // Append stuff from ex_map: duke@435: if (add_multiple) { duke@435: add_n_reqs(mms.memory(), mms.memory2()); duke@435: } else { duke@435: add_one_req(mms.memory(), mms.memory2()); duke@435: } duke@435: } duke@435: uint limit = ex_map->req(); duke@435: for (uint i = TypeFunc::Parms; i < limit; i++) { duke@435: // Skip everything in the JVMS after tos. (The ex_oop follows.) duke@435: if (i == tos) i = ex_jvms->monoff(); duke@435: Node* src = ex_map->in(i); duke@435: Node* dst = phi_map->in(i); duke@435: if (src != dst) { duke@435: PhiNode* phi; duke@435: if (dst->in(0) != region) { duke@435: dst = phi = PhiNode::make(region, dst, _gvn.type(dst)); duke@435: record_for_igvn(phi); duke@435: _gvn.set_type(phi, phi->type()); duke@435: phi_map->set_req(i, dst); duke@435: // Prepare to append interesting stuff onto the new phi: duke@435: while (dst->req() > orig_width) dst->del_req(dst->req()-1); duke@435: } else { duke@435: assert(dst->is_Phi(), "nobody else uses a hidden region"); duke@435: phi = (PhiNode*)dst; duke@435: } duke@435: if (add_multiple && src->in(0) == ex_control) { duke@435: // Both are phis. duke@435: add_n_reqs(dst, src); duke@435: } else { duke@435: while (dst->req() < region->req()) add_one_req(dst, src); duke@435: } duke@435: const Type* srctype = _gvn.type(src); duke@435: if (phi->type() != srctype) { duke@435: const Type* dsttype = phi->type()->meet(srctype); duke@435: if (phi->type() != dsttype) { duke@435: phi->set_type(dsttype); duke@435: _gvn.set_type(phi, dsttype); duke@435: } duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: //--------------------------use_exception_state-------------------------------- duke@435: Node* GraphKit::use_exception_state(SafePointNode* phi_map) { duke@435: if (failing()) { stop(); return top(); } duke@435: Node* region = phi_map->control(); duke@435: Node* hidden_merge_mark = root(); duke@435: assert(phi_map->jvms()->map() == phi_map, "sanity: 1-1 relation"); duke@435: Node* ex_oop = clear_saved_ex_oop(phi_map); duke@435: if (region->in(0) == hidden_merge_mark) { duke@435: // Special marking for internal ex-states. Process the phis now. duke@435: region->set_req(0, region); // now it's an ordinary region duke@435: set_jvms(phi_map->jvms()); // ...so now we can use it as a map duke@435: // Note: Setting the jvms also sets the bci and sp. duke@435: set_control(_gvn.transform(region)); duke@435: uint tos = jvms()->stkoff() + sp(); duke@435: for (uint i = 1; i < tos; i++) { duke@435: Node* x = phi_map->in(i); duke@435: if (x->in(0) == region) { duke@435: assert(x->is_Phi(), "expected a special phi"); duke@435: phi_map->set_req(i, _gvn.transform(x)); duke@435: } duke@435: } duke@435: for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { duke@435: Node* x = mms.memory(); duke@435: if (x->in(0) == region) { duke@435: assert(x->is_Phi(), "nobody else uses a hidden region"); duke@435: mms.set_memory(_gvn.transform(x)); duke@435: } duke@435: } duke@435: if (ex_oop->in(0) == region) { duke@435: assert(ex_oop->is_Phi(), "expected a special phi"); duke@435: ex_oop = _gvn.transform(ex_oop); duke@435: } duke@435: } else { duke@435: set_jvms(phi_map->jvms()); duke@435: } duke@435: duke@435: assert(!is_hidden_merge(phi_map->control()), "hidden ex. states cleared"); duke@435: assert(!is_hidden_merge(phi_map->i_o()), "hidden ex. states cleared"); duke@435: return ex_oop; duke@435: } duke@435: duke@435: //---------------------------------java_bc------------------------------------- duke@435: Bytecodes::Code GraphKit::java_bc() const { duke@435: ciMethod* method = this->method(); duke@435: int bci = this->bci(); duke@435: if (method != NULL && bci != InvocationEntryBci) duke@435: return method->java_code_at_bci(bci); duke@435: else duke@435: return Bytecodes::_illegal; duke@435: } duke@435: duke@435: //------------------------------builtin_throw---------------------------------- duke@435: void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) { duke@435: bool must_throw = true; duke@435: duke@435: if (JvmtiExport::can_post_exceptions()) { duke@435: // Do not try anything fancy if we're notifying the VM on every throw. duke@435: // Cf. case Bytecodes::_athrow in parse2.cpp. duke@435: uncommon_trap(reason, Deoptimization::Action_none, duke@435: (ciKlass*)NULL, (char*)NULL, must_throw); duke@435: return; duke@435: } duke@435: duke@435: // If this particular condition has not yet happened at this duke@435: // bytecode, then use the uncommon trap mechanism, and allow for duke@435: // a future recompilation if several traps occur here. duke@435: // If the throw is hot, try to use a more complicated inline mechanism duke@435: // which keeps execution inside the compiled code. duke@435: bool treat_throw_as_hot = false; duke@435: ciMethodData* md = method()->method_data(); duke@435: duke@435: if (ProfileTraps) { duke@435: if (too_many_traps(reason)) { duke@435: treat_throw_as_hot = true; duke@435: } duke@435: // (If there is no MDO at all, assume it is early in duke@435: // execution, and that any deopts are part of the duke@435: // startup transient, and don't need to be remembered.) duke@435: duke@435: // Also, if there is a local exception handler, treat all throws duke@435: // as hot if there has been at least one in this method. duke@435: if (C->trap_count(reason) != 0 duke@435: && method()->method_data()->trap_count(reason) != 0 duke@435: && has_ex_handler()) { duke@435: treat_throw_as_hot = true; duke@435: } duke@435: } duke@435: duke@435: // If this throw happens frequently, an uncommon trap might cause duke@435: // a performance pothole. If there is a local exception handler, duke@435: // and if this particular bytecode appears to be deoptimizing often, duke@435: // let us handle the throw inline, with a preconstructed instance. duke@435: // Note: If the deopt count has blown up, the uncommon trap duke@435: // runtime is going to flush this nmethod, not matter what. duke@435: if (treat_throw_as_hot duke@435: && (!StackTraceInThrowable || OmitStackTraceInFastThrow)) { duke@435: // If the throw is local, we use a pre-existing instance and duke@435: // punt on the backtrace. This would lead to a missing backtrace duke@435: // (a repeat of 4292742) if the backtrace object is ever asked duke@435: // for its backtrace. duke@435: // Fixing this remaining case of 4292742 requires some flavor of duke@435: // escape analysis. Leave that for the future. duke@435: ciInstance* ex_obj = NULL; duke@435: switch (reason) { duke@435: case Deoptimization::Reason_null_check: duke@435: ex_obj = env()->NullPointerException_instance(); duke@435: break; duke@435: case Deoptimization::Reason_div0_check: duke@435: ex_obj = env()->ArithmeticException_instance(); duke@435: break; duke@435: case Deoptimization::Reason_range_check: duke@435: ex_obj = env()->ArrayIndexOutOfBoundsException_instance(); duke@435: break; duke@435: case Deoptimization::Reason_class_check: duke@435: if (java_bc() == Bytecodes::_aastore) { duke@435: ex_obj = env()->ArrayStoreException_instance(); duke@435: } else { duke@435: ex_obj = env()->ClassCastException_instance(); duke@435: } duke@435: break; duke@435: } duke@435: if (failing()) { stop(); return; } // exception allocation might fail duke@435: if (ex_obj != NULL) { duke@435: // Cheat with a preallocated exception object. duke@435: if (C->log() != NULL) duke@435: C->log()->elem("hot_throw preallocated='1' reason='%s'", duke@435: Deoptimization::trap_reason_name(reason)); duke@435: const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj); kvn@599: Node* ex_node = _gvn.transform( ConNode::make(C, ex_con) ); duke@435: duke@435: // Clear the detail message of the preallocated exception object. duke@435: // Weblogic sometimes mutates the detail message of exceptions duke@435: // using reflection. duke@435: int offset = java_lang_Throwable::get_detailMessage_offset(); duke@435: const TypePtr* adr_typ = ex_con->add_offset(offset); duke@435: duke@435: Node *adr = basic_plus_adr(ex_node, ex_node, offset); duke@435: Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), ex_con, T_OBJECT); duke@435: duke@435: add_exception_state(make_exception_state(ex_node)); duke@435: return; duke@435: } duke@435: } duke@435: duke@435: // %%% Maybe add entry to OptoRuntime which directly throws the exc.? duke@435: // It won't be much cheaper than bailing to the interp., since we'll duke@435: // have to pass up all the debug-info, and the runtime will have to duke@435: // create the stack trace. duke@435: duke@435: // Usual case: Bail to interpreter. duke@435: // Reserve the right to recompile if we haven't seen anything yet. duke@435: duke@435: Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; duke@435: if (treat_throw_as_hot duke@435: && (method()->method_data()->trap_recompiled_at(bci()) duke@435: || C->too_many_traps(reason))) { duke@435: // We cannot afford to take more traps here. Suffer in the interpreter. duke@435: if (C->log() != NULL) duke@435: C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", duke@435: Deoptimization::trap_reason_name(reason), duke@435: C->trap_count(reason)); duke@435: action = Deoptimization::Action_none; duke@435: } duke@435: duke@435: // "must_throw" prunes the JVM state to include only the stack, if there duke@435: // are no local exception handlers. This should cut down on register duke@435: // allocation time and code size, by drastically reducing the number duke@435: // of in-edges on the call to the uncommon trap. duke@435: duke@435: uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw); duke@435: } duke@435: duke@435: duke@435: //----------------------------PreserveJVMState--------------------------------- duke@435: PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) { duke@435: debug_only(kit->verify_map()); duke@435: _kit = kit; duke@435: _map = kit->map(); // preserve the map duke@435: _sp = kit->sp(); duke@435: kit->set_map(clone_map ? kit->clone_map() : NULL); duke@435: #ifdef ASSERT duke@435: _bci = kit->bci(); duke@435: Parse* parser = kit->is_Parse(); never@802: int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); duke@435: _block = block; duke@435: #endif duke@435: } duke@435: PreserveJVMState::~PreserveJVMState() { duke@435: GraphKit* kit = _kit; duke@435: #ifdef ASSERT duke@435: assert(kit->bci() == _bci, "bci must not shift"); duke@435: Parse* parser = kit->is_Parse(); never@802: int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); duke@435: assert(block == _block, "block must not shift"); duke@435: #endif duke@435: kit->set_map(_map); duke@435: kit->set_sp(_sp); duke@435: } duke@435: duke@435: duke@435: //-----------------------------BuildCutout------------------------------------- duke@435: BuildCutout::BuildCutout(GraphKit* kit, Node* p, float prob, float cnt) duke@435: : PreserveJVMState(kit) duke@435: { duke@435: assert(p->is_Con() || p->is_Bool(), "test must be a bool"); duke@435: SafePointNode* outer_map = _map; // preserved map is caller's duke@435: SafePointNode* inner_map = kit->map(); duke@435: IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt); duke@435: outer_map->set_control(kit->gvn().transform( new (kit->C, 1) IfTrueNode(iff) )); duke@435: inner_map->set_control(kit->gvn().transform( new (kit->C, 1) IfFalseNode(iff) )); duke@435: } duke@435: BuildCutout::~BuildCutout() { duke@435: GraphKit* kit = _kit; duke@435: assert(kit->stopped(), "cutout code must stop, throw, return, etc."); duke@435: } duke@435: duke@435: duke@435: //------------------------------clone_map-------------------------------------- duke@435: // Implementation of PreserveJVMState duke@435: // duke@435: // Only clone_map(...) here. If this function is only used in the duke@435: // PreserveJVMState class we may want to get rid of this extra duke@435: // function eventually and do it all there. duke@435: duke@435: SafePointNode* GraphKit::clone_map() { duke@435: if (map() == NULL) return NULL; duke@435: duke@435: // Clone the memory edge first duke@435: Node* mem = MergeMemNode::make(C, map()->memory()); duke@435: gvn().set_type_bottom(mem); duke@435: duke@435: SafePointNode *clonemap = (SafePointNode*)map()->clone(); duke@435: JVMState* jvms = this->jvms(); duke@435: JVMState* clonejvms = jvms->clone_shallow(C); duke@435: clonemap->set_memory(mem); duke@435: clonemap->set_jvms(clonejvms); duke@435: clonejvms->set_map(clonemap); duke@435: record_for_igvn(clonemap); duke@435: gvn().set_type_bottom(clonemap); duke@435: return clonemap; duke@435: } duke@435: duke@435: duke@435: //-----------------------------set_map_clone----------------------------------- duke@435: void GraphKit::set_map_clone(SafePointNode* m) { duke@435: _map = m; duke@435: _map = clone_map(); duke@435: _map->set_next_exception(NULL); duke@435: debug_only(verify_map()); duke@435: } duke@435: duke@435: duke@435: //----------------------------kill_dead_locals--------------------------------- duke@435: // Detect any locals which are known to be dead, and force them to top. duke@435: void GraphKit::kill_dead_locals() { duke@435: // Consult the liveness information for the locals. If any duke@435: // of them are unused, then they can be replaced by top(). This duke@435: // should help register allocation time and cut down on the size duke@435: // of the deoptimization information. duke@435: duke@435: // This call is made from many of the bytecode handling duke@435: // subroutines called from the Big Switch in do_one_bytecode. duke@435: // Every bytecode which might include a slow path is responsible duke@435: // for killing its dead locals. The more consistent we duke@435: // are about killing deads, the fewer useless phis will be duke@435: // constructed for them at various merge points. duke@435: duke@435: // bci can be -1 (InvocationEntryBci). We return the entry duke@435: // liveness for the method. duke@435: duke@435: if (method() == NULL || method()->code_size() == 0) { duke@435: // We are building a graph for a call to a native method. duke@435: // All locals are live. duke@435: return; duke@435: } duke@435: duke@435: ResourceMark rm; duke@435: duke@435: // Consult the liveness information for the locals. If any duke@435: // of them are unused, then they can be replaced by top(). This duke@435: // should help register allocation time and cut down on the size duke@435: // of the deoptimization information. duke@435: MethodLivenessResult live_locals = method()->liveness_at_bci(bci()); duke@435: duke@435: int len = (int)live_locals.size(); duke@435: assert(len <= jvms()->loc_size(), "too many live locals"); duke@435: for (int local = 0; local < len; local++) { duke@435: if (!live_locals.at(local)) { duke@435: set_local(local, top()); duke@435: } duke@435: } duke@435: } duke@435: duke@435: #ifdef ASSERT duke@435: //-------------------------dead_locals_are_killed------------------------------ duke@435: // Return true if all dead locals are set to top in the map. duke@435: // Used to assert "clean" debug info at various points. duke@435: bool GraphKit::dead_locals_are_killed() { duke@435: if (method() == NULL || method()->code_size() == 0) { duke@435: // No locals need to be dead, so all is as it should be. duke@435: return true; duke@435: } duke@435: duke@435: // Make sure somebody called kill_dead_locals upstream. duke@435: ResourceMark rm; duke@435: for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { duke@435: if (jvms->loc_size() == 0) continue; // no locals to consult duke@435: SafePointNode* map = jvms->map(); duke@435: ciMethod* method = jvms->method(); duke@435: int bci = jvms->bci(); duke@435: if (jvms == this->jvms()) { duke@435: bci = this->bci(); // it might not yet be synched duke@435: } duke@435: MethodLivenessResult live_locals = method->liveness_at_bci(bci); duke@435: int len = (int)live_locals.size(); duke@435: if (!live_locals.is_valid() || len == 0) duke@435: // This method is trivial, or is poisoned by a breakpoint. duke@435: return true; duke@435: assert(len == jvms->loc_size(), "live map consistent with locals map"); duke@435: for (int local = 0; local < len; local++) { duke@435: if (!live_locals.at(local) && map->local(jvms, local) != top()) { duke@435: if (PrintMiscellaneous && (Verbose || WizardMode)) { duke@435: tty->print_cr("Zombie local %d: ", local); duke@435: jvms->dump(); duke@435: } duke@435: return false; duke@435: } duke@435: } duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: #endif //ASSERT duke@435: duke@435: // Helper function for adding JVMState and debug information to node duke@435: void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { duke@435: // Add the safepoint edges to the call (or other safepoint). duke@435: duke@435: // Make sure dead locals are set to top. This duke@435: // should help register allocation time and cut down on the size duke@435: // of the deoptimization information. duke@435: assert(dead_locals_are_killed(), "garbage in debug info before safepoint"); duke@435: duke@435: // Walk the inline list to fill in the correct set of JVMState's duke@435: // Also fill in the associated edges for each JVMState. duke@435: duke@435: JVMState* youngest_jvms = sync_jvms(); duke@435: duke@435: // Do we need debug info here? If it is a SafePoint and this method duke@435: // cannot de-opt, then we do NOT need any debug info. duke@435: bool full_info = (C->deopt_happens() || call->Opcode() != Op_SafePoint); duke@435: duke@435: // If we are guaranteed to throw, we can prune everything but the duke@435: // input to the current bytecode. duke@435: bool can_prune_locals = false; duke@435: uint stack_slots_not_pruned = 0; duke@435: int inputs = 0, depth = 0; duke@435: if (must_throw) { duke@435: assert(method() == youngest_jvms->method(), "sanity"); duke@435: if (compute_stack_effects(inputs, depth)) { duke@435: can_prune_locals = true; duke@435: stack_slots_not_pruned = inputs; duke@435: } duke@435: } duke@435: duke@435: if (JvmtiExport::can_examine_or_deopt_anywhere()) { duke@435: // At any safepoint, this method can get breakpointed, which would duke@435: // then require an immediate deoptimization. duke@435: full_info = true; duke@435: can_prune_locals = false; // do not prune locals duke@435: stack_slots_not_pruned = 0; duke@435: } duke@435: duke@435: // do not scribble on the input jvms duke@435: JVMState* out_jvms = youngest_jvms->clone_deep(C); duke@435: call->set_jvms(out_jvms); // Start jvms list for call node duke@435: duke@435: // Presize the call: duke@435: debug_only(uint non_debug_edges = call->req()); duke@435: call->add_req_batch(top(), youngest_jvms->debug_depth()); duke@435: assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), ""); duke@435: duke@435: // Set up edges so that the call looks like this: duke@435: // Call [state:] ctl io mem fptr retadr duke@435: // [parms:] parm0 ... parmN duke@435: // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN duke@435: // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...] duke@435: // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN duke@435: // Note that caller debug info precedes callee debug info. duke@435: duke@435: // Fill pointer walks backwards from "young:" to "root:" in the diagram above: duke@435: uint debug_ptr = call->req(); duke@435: duke@435: // Loop over the map input edges associated with jvms, add them duke@435: // to the call node, & reset all offsets to match call node array. duke@435: for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) { duke@435: uint debug_end = debug_ptr; duke@435: uint debug_start = debug_ptr - in_jvms->debug_size(); duke@435: debug_ptr = debug_start; // back up the ptr duke@435: duke@435: uint p = debug_start; // walks forward in [debug_start, debug_end) duke@435: uint j, k, l; duke@435: SafePointNode* in_map = in_jvms->map(); duke@435: out_jvms->set_map(call); duke@435: duke@435: if (can_prune_locals) { duke@435: assert(in_jvms->method() == out_jvms->method(), "sanity"); duke@435: // If the current throw can reach an exception handler in this JVMS, duke@435: // then we must keep everything live that can reach that handler. duke@435: // As a quick and dirty approximation, we look for any handlers at all. duke@435: if (in_jvms->method()->has_exception_handlers()) { duke@435: can_prune_locals = false; duke@435: } duke@435: } duke@435: duke@435: // Add the Locals duke@435: k = in_jvms->locoff(); duke@435: l = in_jvms->loc_size(); duke@435: out_jvms->set_locoff(p); duke@435: if (full_info && !can_prune_locals) { duke@435: for (j = 0; j < l; j++) duke@435: call->set_req(p++, in_map->in(k+j)); duke@435: } else { duke@435: p += l; // already set to top above by add_req_batch duke@435: } duke@435: duke@435: // Add the Expression Stack duke@435: k = in_jvms->stkoff(); duke@435: l = in_jvms->sp(); duke@435: out_jvms->set_stkoff(p); duke@435: if (full_info && !can_prune_locals) { duke@435: for (j = 0; j < l; j++) duke@435: call->set_req(p++, in_map->in(k+j)); duke@435: } else if (can_prune_locals && stack_slots_not_pruned != 0) { duke@435: // Divide stack into {S0,...,S1}, where S0 is set to top. duke@435: uint s1 = stack_slots_not_pruned; duke@435: stack_slots_not_pruned = 0; // for next iteration duke@435: if (s1 > l) s1 = l; duke@435: uint s0 = l - s1; duke@435: p += s0; // skip the tops preinstalled by add_req_batch duke@435: for (j = s0; j < l; j++) duke@435: call->set_req(p++, in_map->in(k+j)); duke@435: } else { duke@435: p += l; // already set to top above by add_req_batch duke@435: } duke@435: duke@435: // Add the Monitors duke@435: k = in_jvms->monoff(); duke@435: l = in_jvms->mon_size(); duke@435: out_jvms->set_monoff(p); duke@435: for (j = 0; j < l; j++) duke@435: call->set_req(p++, in_map->in(k+j)); duke@435: kvn@498: // Copy any scalar object fields. kvn@498: k = in_jvms->scloff(); kvn@498: l = in_jvms->scl_size(); kvn@498: out_jvms->set_scloff(p); kvn@498: for (j = 0; j < l; j++) kvn@498: call->set_req(p++, in_map->in(k+j)); kvn@498: duke@435: // Finish the new jvms. duke@435: out_jvms->set_endoff(p); duke@435: duke@435: assert(out_jvms->endoff() == debug_end, "fill ptr must match"); duke@435: assert(out_jvms->depth() == in_jvms->depth(), "depth must match"); duke@435: assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match"); duke@435: assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match"); kvn@498: assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match"); duke@435: assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match"); duke@435: duke@435: // Update the two tail pointers in parallel. duke@435: out_jvms = out_jvms->caller(); duke@435: in_jvms = in_jvms->caller(); duke@435: } duke@435: duke@435: assert(debug_ptr == non_debug_edges, "debug info must fit exactly"); duke@435: duke@435: // Test the correctness of JVMState::debug_xxx accessors: duke@435: assert(call->jvms()->debug_start() == non_debug_edges, ""); duke@435: assert(call->jvms()->debug_end() == call->req(), ""); duke@435: assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, ""); duke@435: } duke@435: duke@435: bool GraphKit::compute_stack_effects(int& inputs, int& depth) { duke@435: Bytecodes::Code code = java_bc(); duke@435: if (code == Bytecodes::_wide) { duke@435: code = method()->java_code_at_bci(bci() + 1); duke@435: } duke@435: duke@435: BasicType rtype = T_ILLEGAL; duke@435: int rsize = 0; duke@435: duke@435: if (code != Bytecodes::_illegal) { duke@435: depth = Bytecodes::depth(code); // checkcast=0, athrow=-1 duke@435: rtype = Bytecodes::result_type(code); // checkcast=P, athrow=V duke@435: if (rtype < T_CONFLICT) duke@435: rsize = type2size[rtype]; duke@435: } duke@435: duke@435: switch (code) { duke@435: case Bytecodes::_illegal: duke@435: return false; duke@435: duke@435: case Bytecodes::_ldc: duke@435: case Bytecodes::_ldc_w: duke@435: case Bytecodes::_ldc2_w: duke@435: inputs = 0; duke@435: break; duke@435: duke@435: case Bytecodes::_dup: inputs = 1; break; duke@435: case Bytecodes::_dup_x1: inputs = 2; break; duke@435: case Bytecodes::_dup_x2: inputs = 3; break; duke@435: case Bytecodes::_dup2: inputs = 2; break; duke@435: case Bytecodes::_dup2_x1: inputs = 3; break; duke@435: case Bytecodes::_dup2_x2: inputs = 4; break; duke@435: case Bytecodes::_swap: inputs = 2; break; duke@435: case Bytecodes::_arraylength: inputs = 1; break; duke@435: duke@435: case Bytecodes::_getstatic: duke@435: case Bytecodes::_putstatic: duke@435: case Bytecodes::_getfield: duke@435: case Bytecodes::_putfield: duke@435: { duke@435: bool is_get = (depth >= 0), is_static = (depth & 1); duke@435: bool ignore; duke@435: ciBytecodeStream iter(method()); duke@435: iter.reset_to_bci(bci()); duke@435: iter.next(); duke@435: ciField* field = iter.get_field(ignore); duke@435: int size = field->type()->size(); duke@435: inputs = (is_static ? 0 : 1); duke@435: if (is_get) { duke@435: depth = size - inputs; duke@435: } else { duke@435: inputs += size; // putxxx pops the value from the stack duke@435: depth = - inputs; duke@435: } duke@435: } duke@435: break; duke@435: duke@435: case Bytecodes::_invokevirtual: duke@435: case Bytecodes::_invokespecial: duke@435: case Bytecodes::_invokestatic: duke@435: case Bytecodes::_invokeinterface: duke@435: { duke@435: bool is_static = (depth == 0); duke@435: bool ignore; duke@435: ciBytecodeStream iter(method()); duke@435: iter.reset_to_bci(bci()); duke@435: iter.next(); duke@435: ciMethod* method = iter.get_method(ignore); duke@435: inputs = method->arg_size_no_receiver(); duke@435: if (!is_static) inputs += 1; duke@435: int size = method->return_type()->size(); duke@435: depth = size - inputs; duke@435: } duke@435: break; duke@435: duke@435: case Bytecodes::_multianewarray: duke@435: { duke@435: ciBytecodeStream iter(method()); duke@435: iter.reset_to_bci(bci()); duke@435: iter.next(); duke@435: inputs = iter.get_dimensions(); duke@435: assert(rsize == 1, ""); duke@435: depth = rsize - inputs; duke@435: } duke@435: break; duke@435: duke@435: case Bytecodes::_ireturn: duke@435: case Bytecodes::_lreturn: duke@435: case Bytecodes::_freturn: duke@435: case Bytecodes::_dreturn: duke@435: case Bytecodes::_areturn: duke@435: assert(rsize = -depth, ""); duke@435: inputs = rsize; duke@435: break; duke@435: duke@435: case Bytecodes::_jsr: duke@435: case Bytecodes::_jsr_w: duke@435: inputs = 0; duke@435: depth = 1; // S.B. depth=1, not zero duke@435: break; duke@435: duke@435: default: duke@435: // bytecode produces a typed result duke@435: inputs = rsize - depth; duke@435: assert(inputs >= 0, ""); duke@435: break; duke@435: } duke@435: duke@435: #ifdef ASSERT duke@435: // spot check duke@435: int outputs = depth + inputs; duke@435: assert(outputs >= 0, "sanity"); duke@435: switch (code) { duke@435: case Bytecodes::_checkcast: assert(inputs == 1 && outputs == 1, ""); break; duke@435: case Bytecodes::_athrow: assert(inputs == 1 && outputs == 0, ""); break; duke@435: case Bytecodes::_aload_0: assert(inputs == 0 && outputs == 1, ""); break; duke@435: case Bytecodes::_return: assert(inputs == 0 && outputs == 0, ""); break; duke@435: case Bytecodes::_drem: assert(inputs == 4 && outputs == 2, ""); break; duke@435: } duke@435: #endif //ASSERT duke@435: duke@435: return true; duke@435: } duke@435: duke@435: duke@435: duke@435: //------------------------------basic_plus_adr--------------------------------- duke@435: Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { duke@435: // short-circuit a common case duke@435: if (offset == intcon(0)) return ptr; duke@435: return _gvn.transform( new (C, 4) AddPNode(base, ptr, offset) ); duke@435: } duke@435: duke@435: Node* GraphKit::ConvI2L(Node* offset) { duke@435: // short-circuit a common case duke@435: jint offset_con = find_int_con(offset, Type::OffsetBot); duke@435: if (offset_con != Type::OffsetBot) { duke@435: return longcon((long) offset_con); duke@435: } duke@435: return _gvn.transform( new (C, 2) ConvI2LNode(offset)); duke@435: } duke@435: Node* GraphKit::ConvL2I(Node* offset) { duke@435: // short-circuit a common case duke@435: jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot); duke@435: if (offset_con != (jlong)Type::OffsetBot) { duke@435: return intcon((int) offset_con); duke@435: } duke@435: return _gvn.transform( new (C, 2) ConvL2INode(offset)); duke@435: } duke@435: duke@435: //-------------------------load_object_klass----------------------------------- duke@435: Node* GraphKit::load_object_klass(Node* obj) { duke@435: // Special-case a fresh allocation to avoid building nodes: duke@435: Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); duke@435: if (akls != NULL) return akls; duke@435: Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); kvn@599: return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) ); duke@435: } duke@435: duke@435: //-------------------------load_array_length----------------------------------- duke@435: Node* GraphKit::load_array_length(Node* array) { duke@435: // Special-case a fresh allocation to avoid building nodes: rasbold@801: AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn); rasbold@801: Node *alen; rasbold@801: if (alloc == NULL) { rasbold@801: Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); rasbold@801: alen = _gvn.transform( new (C, 3) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); rasbold@801: } else { rasbold@801: alen = alloc->Ideal_length(); rasbold@801: Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_aryptr(), &_gvn); rasbold@801: if (ccast != alen) { rasbold@801: alen = _gvn.transform(ccast); rasbold@801: } rasbold@801: } rasbold@801: return alen; duke@435: } duke@435: duke@435: //------------------------------do_null_check---------------------------------- duke@435: // Helper function to do a NULL pointer check. Returned value is duke@435: // the incoming address with NULL casted away. You are allowed to use the duke@435: // not-null value only if you are control dependent on the test. duke@435: extern int explicit_null_checks_inserted, duke@435: explicit_null_checks_elided; duke@435: Node* GraphKit::null_check_common(Node* value, BasicType type, duke@435: // optional arguments for variations: duke@435: bool assert_null, duke@435: Node* *null_control) { duke@435: assert(!assert_null || null_control == NULL, "not both at once"); duke@435: if (stopped()) return top(); duke@435: if (!GenerateCompilerNullChecks && !assert_null && null_control == NULL) { duke@435: // For some performance testing, we may wish to suppress null checking. duke@435: value = cast_not_null(value); // Make it appear to be non-null (4962416). duke@435: return value; duke@435: } duke@435: explicit_null_checks_inserted++; duke@435: duke@435: // Construct NULL check duke@435: Node *chk = NULL; duke@435: switch(type) { duke@435: case T_LONG : chk = new (C, 3) CmpLNode(value, _gvn.zerocon(T_LONG)); break; duke@435: case T_INT : chk = new (C, 3) CmpINode( value, _gvn.intcon(0)); break; duke@435: case T_ARRAY : // fall through duke@435: type = T_OBJECT; // simplify further tests duke@435: case T_OBJECT : { duke@435: const Type *t = _gvn.type( value ); duke@435: duke@435: const TypeInstPtr* tp = t->isa_instptr(); duke@435: if (tp != NULL && !tp->klass()->is_loaded() duke@435: // Only for do_null_check, not any of its siblings: duke@435: && !assert_null && null_control == NULL) { duke@435: // Usually, any field access or invocation on an unloaded oop type duke@435: // will simply fail to link, since the statically linked class is duke@435: // likely also to be unloaded. However, in -Xcomp mode, sometimes duke@435: // the static class is loaded but the sharper oop type is not. duke@435: // Rather than checking for this obscure case in lots of places, duke@435: // we simply observe that a null check on an unloaded class duke@435: // will always be followed by a nonsense operation, so we duke@435: // can just issue the uncommon trap here. duke@435: // Our access to the unloaded class will only be correct duke@435: // after it has been loaded and initialized, which requires duke@435: // a trip through the interpreter. duke@435: #ifndef PRODUCT duke@435: if (WizardMode) { tty->print("Null check of unloaded "); tp->klass()->print(); tty->cr(); } duke@435: #endif duke@435: uncommon_trap(Deoptimization::Reason_unloaded, duke@435: Deoptimization::Action_reinterpret, duke@435: tp->klass(), "!loaded"); duke@435: return top(); duke@435: } duke@435: duke@435: if (assert_null) { duke@435: // See if the type is contained in NULL_PTR. duke@435: // If so, then the value is already null. duke@435: if (t->higher_equal(TypePtr::NULL_PTR)) { duke@435: explicit_null_checks_elided++; duke@435: return value; // Elided null assert quickly! duke@435: } duke@435: } else { duke@435: // See if mixing in the NULL pointer changes type. duke@435: // If so, then the NULL pointer was not allowed in the original duke@435: // type. In other words, "value" was not-null. duke@435: if (t->meet(TypePtr::NULL_PTR) != t) { duke@435: // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... duke@435: explicit_null_checks_elided++; duke@435: return value; // Elided null check quickly! duke@435: } duke@435: } duke@435: chk = new (C, 3) CmpPNode( value, null() ); duke@435: break; duke@435: } duke@435: duke@435: default : ShouldNotReachHere(); duke@435: } duke@435: assert(chk != NULL, "sanity check"); duke@435: chk = _gvn.transform(chk); duke@435: duke@435: BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne; duke@435: BoolNode *btst = new (C, 2) BoolNode( chk, btest); duke@435: Node *tst = _gvn.transform( btst ); duke@435: duke@435: //----------- twisti@1040: // if peephole optimizations occurred, a prior test existed. duke@435: // If a prior test existed, maybe it dominates as we can avoid this test. duke@435: if (tst != btst && type == T_OBJECT) { duke@435: // At this point we want to scan up the CFG to see if we can duke@435: // find an identical test (and so avoid this test altogether). duke@435: Node *cfg = control(); duke@435: int depth = 0; duke@435: while( depth < 16 ) { // Limit search depth for speed duke@435: if( cfg->Opcode() == Op_IfTrue && duke@435: cfg->in(0)->in(1) == tst ) { duke@435: // Found prior test. Use "cast_not_null" to construct an identical duke@435: // CastPP (and hence hash to) as already exists for the prior test. duke@435: // Return that casted value. duke@435: if (assert_null) { duke@435: replace_in_map(value, null()); duke@435: return null(); // do not issue the redundant test duke@435: } duke@435: Node *oldcontrol = control(); duke@435: set_control(cfg); duke@435: Node *res = cast_not_null(value); duke@435: set_control(oldcontrol); duke@435: explicit_null_checks_elided++; duke@435: return res; duke@435: } duke@435: cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true); duke@435: if (cfg == NULL) break; // Quit at region nodes duke@435: depth++; duke@435: } duke@435: } duke@435: duke@435: //----------- duke@435: // Branch to failure if null duke@435: float ok_prob = PROB_MAX; // a priori estimate: nulls never happen duke@435: Deoptimization::DeoptReason reason; duke@435: if (assert_null) duke@435: reason = Deoptimization::Reason_null_assert; duke@435: else if (type == T_OBJECT) duke@435: reason = Deoptimization::Reason_null_check; duke@435: else duke@435: reason = Deoptimization::Reason_div0_check; duke@435: ysr@777: // %%% Since Reason_unhandled is not recorded on a per-bytecode basis, ysr@777: // ciMethodData::has_trap_at will return a conservative -1 if any ysr@777: // must-be-null assertion has failed. This could cause performance ysr@777: // problems for a method after its first do_null_assert failure. ysr@777: // Consider using 'Reason_class_check' instead? ysr@777: duke@435: // To cause an implicit null check, we set the not-null probability twisti@1040: // to the maximum (PROB_MAX). For an explicit check the probability duke@435: // is set to a smaller value. duke@435: if (null_control != NULL || too_many_traps(reason)) { duke@435: // probability is less likely duke@435: ok_prob = PROB_LIKELY_MAG(3); duke@435: } else if (!assert_null && duke@435: (ImplicitNullCheckThreshold > 0) && duke@435: method() != NULL && duke@435: (method()->method_data()->trap_count(reason) duke@435: >= (uint)ImplicitNullCheckThreshold)) { duke@435: ok_prob = PROB_LIKELY_MAG(3); duke@435: } duke@435: duke@435: if (null_control != NULL) { duke@435: IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN); duke@435: Node* null_true = _gvn.transform( new (C, 1) IfFalseNode(iff)); duke@435: set_control( _gvn.transform( new (C, 1) IfTrueNode(iff))); duke@435: if (null_true == top()) duke@435: explicit_null_checks_elided++; duke@435: (*null_control) = null_true; duke@435: } else { duke@435: BuildCutout unless(this, tst, ok_prob); duke@435: // Check for optimizer eliding test at parse time duke@435: if (stopped()) { duke@435: // Failure not possible; do not bother making uncommon trap. duke@435: explicit_null_checks_elided++; duke@435: } else if (assert_null) { duke@435: uncommon_trap(reason, duke@435: Deoptimization::Action_make_not_entrant, duke@435: NULL, "assert_null"); duke@435: } else { kvn@767: replace_in_map(value, zerocon(type)); duke@435: builtin_throw(reason); duke@435: } duke@435: } duke@435: duke@435: // Must throw exception, fall-thru not possible? duke@435: if (stopped()) { duke@435: return top(); // No result duke@435: } duke@435: duke@435: if (assert_null) { duke@435: // Cast obj to null on this path. duke@435: replace_in_map(value, zerocon(type)); duke@435: return zerocon(type); duke@435: } duke@435: duke@435: // Cast obj to not-null on this path, if there is no null_control. duke@435: // (If there is a null_control, a non-null value may come back to haunt us.) duke@435: if (type == T_OBJECT) { duke@435: Node* cast = cast_not_null(value, false); duke@435: if (null_control == NULL || (*null_control) == top()) duke@435: replace_in_map(value, cast); duke@435: value = cast; duke@435: } duke@435: duke@435: return value; duke@435: } duke@435: duke@435: duke@435: //------------------------------cast_not_null---------------------------------- duke@435: // Cast obj to not-null on this path duke@435: Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) { duke@435: const Type *t = _gvn.type(obj); duke@435: const Type *t_not_null = t->join(TypePtr::NOTNULL); duke@435: // Object is already not-null? duke@435: if( t == t_not_null ) return obj; duke@435: duke@435: Node *cast = new (C, 2) CastPPNode(obj,t_not_null); duke@435: cast->init_req(0, control()); duke@435: cast = _gvn.transform( cast ); duke@435: duke@435: // Scan for instances of 'obj' in the current JVM mapping. duke@435: // These instances are known to be not-null after the test. duke@435: if (do_replace_in_map) duke@435: replace_in_map(obj, cast); duke@435: duke@435: return cast; // Return casted value duke@435: } duke@435: duke@435: duke@435: //--------------------------replace_in_map------------------------------------- duke@435: void GraphKit::replace_in_map(Node* old, Node* neww) { duke@435: this->map()->replace_edge(old, neww); duke@435: duke@435: // Note: This operation potentially replaces any edge duke@435: // on the map. This includes locals, stack, and monitors duke@435: // of the current (innermost) JVM state. duke@435: duke@435: // We can consider replacing in caller maps. duke@435: // The idea would be that an inlined function's null checks duke@435: // can be shared with the entire inlining tree. duke@435: // The expense of doing this is that the PreserveJVMState class duke@435: // would have to preserve caller states too, with a deep copy. duke@435: } duke@435: duke@435: duke@435: duke@435: //============================================================================= duke@435: //--------------------------------memory--------------------------------------- duke@435: Node* GraphKit::memory(uint alias_idx) { duke@435: MergeMemNode* mem = merged_memory(); duke@435: Node* p = mem->memory_at(alias_idx); duke@435: _gvn.set_type(p, Type::MEMORY); // must be mapped duke@435: return p; duke@435: } duke@435: duke@435: //-----------------------------reset_memory------------------------------------ duke@435: Node* GraphKit::reset_memory() { duke@435: Node* mem = map()->memory(); duke@435: // do not use this node for any more parsing! duke@435: debug_only( map()->set_memory((Node*)NULL) ); duke@435: return _gvn.transform( mem ); duke@435: } duke@435: duke@435: //------------------------------set_all_memory--------------------------------- duke@435: void GraphKit::set_all_memory(Node* newmem) { duke@435: Node* mergemem = MergeMemNode::make(C, newmem); duke@435: gvn().set_type_bottom(mergemem); duke@435: map()->set_memory(mergemem); duke@435: } duke@435: duke@435: //------------------------------set_all_memory_call---------------------------- duke@435: void GraphKit::set_all_memory_call(Node* call) { duke@435: Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) ); duke@435: set_all_memory(newmem); duke@435: } duke@435: duke@435: //============================================================================= duke@435: // duke@435: // parser factory methods for MemNodes duke@435: // duke@435: // These are layered on top of the factory methods in LoadNode and StoreNode, duke@435: // and integrate with the parser's memory state and _gvn engine. duke@435: // duke@435: duke@435: // factory methods in "int adr_idx" duke@435: Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, duke@435: int adr_idx, duke@435: bool require_atomic_access) { duke@435: assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); duke@435: const TypePtr* adr_type = NULL; // debug-mode-only argument duke@435: debug_only(adr_type = C->get_adr_type(adr_idx)); duke@435: Node* mem = memory(adr_idx); duke@435: Node* ld; duke@435: if (require_atomic_access && bt == T_LONG) { duke@435: ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t); duke@435: } else { coleenp@548: ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt); duke@435: } duke@435: return _gvn.transform(ld); duke@435: } duke@435: duke@435: Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, duke@435: int adr_idx, duke@435: bool require_atomic_access) { duke@435: assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); duke@435: const TypePtr* adr_type = NULL; duke@435: debug_only(adr_type = C->get_adr_type(adr_idx)); duke@435: Node *mem = memory(adr_idx); duke@435: Node* st; duke@435: if (require_atomic_access && bt == T_LONG) { duke@435: st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val); duke@435: } else { coleenp@548: st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt); duke@435: } duke@435: st = _gvn.transform(st); duke@435: set_memory(st, adr_idx); duke@435: // Back-to-back stores can only remove intermediate store with DU info duke@435: // so push on worklist for optimizer. duke@435: if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address)) duke@435: record_for_igvn(st); duke@435: duke@435: return st; duke@435: } duke@435: duke@435: void GraphKit::pre_barrier(Node* ctl, duke@435: Node* obj, duke@435: Node* adr, duke@435: uint adr_idx, duke@435: Node *val, duke@435: const Type* val_type, duke@435: BasicType bt) { duke@435: BarrierSet* bs = Universe::heap()->barrier_set(); duke@435: set_control(ctl); duke@435: switch (bs->kind()) { ysr@777: case BarrierSet::G1SATBCT: ysr@777: case BarrierSet::G1SATBCTLogging: ysr@777: g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt); ysr@777: break; duke@435: duke@435: case BarrierSet::CardTableModRef: duke@435: case BarrierSet::CardTableExtension: duke@435: case BarrierSet::ModRef: duke@435: break; duke@435: duke@435: case BarrierSet::Other: duke@435: default : duke@435: ShouldNotReachHere(); duke@435: duke@435: } duke@435: } duke@435: duke@435: void GraphKit::post_barrier(Node* ctl, duke@435: Node* store, duke@435: Node* obj, duke@435: Node* adr, duke@435: uint adr_idx, duke@435: Node *val, duke@435: BasicType bt, duke@435: bool use_precise) { duke@435: BarrierSet* bs = Universe::heap()->barrier_set(); duke@435: set_control(ctl); duke@435: switch (bs->kind()) { ysr@777: case BarrierSet::G1SATBCT: ysr@777: case BarrierSet::G1SATBCTLogging: ysr@777: g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise); ysr@777: break; duke@435: duke@435: case BarrierSet::CardTableModRef: duke@435: case BarrierSet::CardTableExtension: duke@435: write_barrier_post(store, obj, adr, val, use_precise); duke@435: break; duke@435: duke@435: case BarrierSet::ModRef: duke@435: break; duke@435: duke@435: case BarrierSet::Other: duke@435: default : duke@435: ShouldNotReachHere(); duke@435: duke@435: } duke@435: } duke@435: duke@435: Node* GraphKit::store_oop_to_object(Node* ctl, duke@435: Node* obj, duke@435: Node* adr, duke@435: const TypePtr* adr_type, duke@435: Node *val, duke@435: const Type* val_type, duke@435: BasicType bt) { duke@435: uint adr_idx = C->get_alias_index(adr_type); duke@435: Node* store; duke@435: pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt); duke@435: store = store_to_memory(control(), adr, val, bt, adr_idx); duke@435: post_barrier(control(), store, obj, adr, adr_idx, val, bt, false); duke@435: return store; duke@435: } duke@435: duke@435: Node* GraphKit::store_oop_to_array(Node* ctl, duke@435: Node* obj, duke@435: Node* adr, duke@435: const TypePtr* adr_type, duke@435: Node *val, duke@435: const Type* val_type, duke@435: BasicType bt) { duke@435: uint adr_idx = C->get_alias_index(adr_type); duke@435: Node* store; duke@435: pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt); duke@435: store = store_to_memory(control(), adr, val, bt, adr_idx); duke@435: post_barrier(control(), store, obj, adr, adr_idx, val, bt, true); duke@435: return store; duke@435: } duke@435: duke@435: Node* GraphKit::store_oop_to_unknown(Node* ctl, duke@435: Node* obj, duke@435: Node* adr, duke@435: const TypePtr* adr_type, duke@435: Node *val, duke@435: const Type* val_type, duke@435: BasicType bt) { duke@435: uint adr_idx = C->get_alias_index(adr_type); duke@435: Node* store; duke@435: pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt); duke@435: store = store_to_memory(control(), adr, val, bt, adr_idx); duke@435: post_barrier(control(), store, obj, adr, adr_idx, val, bt, true); duke@435: return store; duke@435: } duke@435: duke@435: duke@435: //-------------------------array_element_address------------------------- duke@435: Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, duke@435: const TypeInt* sizetype) { kvn@464: uint shift = exact_log2(type2aelembytes(elembt)); duke@435: uint header = arrayOopDesc::base_offset_in_bytes(elembt); duke@435: duke@435: // short-circuit a common case (saves lots of confusing waste motion) duke@435: jint idx_con = find_int_con(idx, -1); duke@435: if (idx_con >= 0) { duke@435: intptr_t offset = header + ((intptr_t)idx_con << shift); duke@435: return basic_plus_adr(ary, offset); duke@435: } duke@435: duke@435: // must be correct type for alignment purposes duke@435: Node* base = basic_plus_adr(ary, header); duke@435: #ifdef _LP64 duke@435: // The scaled index operand to AddP must be a clean 64-bit value. duke@435: // Java allows a 32-bit int to be incremented to a negative duke@435: // value, which appears in a 64-bit register as a large duke@435: // positive number. Using that large positive number as an duke@435: // operand in pointer arithmetic has bad consequences. duke@435: // On the other hand, 32-bit overflow is rare, and the possibility duke@435: // can often be excluded, if we annotate the ConvI2L node with duke@435: // a type assertion that its value is known to be a small positive duke@435: // number. (The prior range check has ensured this.) duke@435: // This assertion is used by ConvI2LNode::Ideal. duke@435: int index_max = max_jint - 1; // array size is max_jint, index is one less duke@435: if (sizetype != NULL) index_max = sizetype->_hi - 1; duke@435: const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax); duke@435: idx = _gvn.transform( new (C, 2) ConvI2LNode(idx, lidxtype) ); duke@435: #endif duke@435: Node* scale = _gvn.transform( new (C, 3) LShiftXNode(idx, intcon(shift)) ); duke@435: return basic_plus_adr(ary, base, scale); duke@435: } duke@435: duke@435: //-------------------------load_array_element------------------------- duke@435: Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) { duke@435: const Type* elemtype = arytype->elem(); duke@435: BasicType elembt = elemtype->array_element_basic_type(); duke@435: Node* adr = array_element_address(ary, idx, elembt, arytype->size()); duke@435: Node* ld = make_load(ctl, adr, elemtype, elembt, arytype); duke@435: return ld; duke@435: } duke@435: duke@435: //-------------------------set_arguments_for_java_call------------------------- duke@435: // Arguments (pre-popped from the stack) are taken from the JVMS. duke@435: void GraphKit::set_arguments_for_java_call(CallJavaNode* call) { duke@435: // Add the call arguments: duke@435: uint nargs = call->method()->arg_size(); duke@435: for (uint i = 0; i < nargs; i++) { duke@435: Node* arg = argument(i); duke@435: call->init_req(i + TypeFunc::Parms, arg); duke@435: } duke@435: } duke@435: duke@435: //---------------------------set_edges_for_java_call--------------------------- duke@435: // Connect a newly created call into the current JVMS. duke@435: // A return value node (if any) is returned from set_edges_for_java_call. duke@435: void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw) { duke@435: duke@435: // Add the predefined inputs: duke@435: call->init_req( TypeFunc::Control, control() ); duke@435: call->init_req( TypeFunc::I_O , i_o() ); duke@435: call->init_req( TypeFunc::Memory , reset_memory() ); duke@435: call->init_req( TypeFunc::FramePtr, frameptr() ); duke@435: call->init_req( TypeFunc::ReturnAdr, top() ); duke@435: duke@435: add_safepoint_edges(call, must_throw); duke@435: duke@435: Node* xcall = _gvn.transform(call); duke@435: duke@435: if (xcall == top()) { duke@435: set_control(top()); duke@435: return; duke@435: } duke@435: assert(xcall == call, "call identity is stable"); duke@435: duke@435: // Re-use the current map to produce the result. duke@435: duke@435: set_control(_gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Control))); duke@435: set_i_o( _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O ))); duke@435: set_all_memory_call(xcall); duke@435: duke@435: //return xcall; // no need, caller already has it duke@435: } duke@435: duke@435: Node* GraphKit::set_results_for_java_call(CallJavaNode* call) { duke@435: if (stopped()) return top(); // maybe the call folded up? duke@435: duke@435: // Capture the return value, if any. duke@435: Node* ret; duke@435: if (call->method() == NULL || duke@435: call->method()->return_type()->basic_type() == T_VOID) duke@435: ret = top(); duke@435: else ret = _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Parms)); duke@435: duke@435: // Note: Since any out-of-line call can produce an exception, duke@435: // we always insert an I_O projection from the call into the result. duke@435: duke@435: make_slow_call_ex(call, env()->Throwable_klass(), false); duke@435: duke@435: return ret; duke@435: } duke@435: duke@435: //--------------------set_predefined_input_for_runtime_call-------------------- duke@435: // Reading and setting the memory state is way conservative here. duke@435: // The real problem is that I am not doing real Type analysis on memory, duke@435: // so I cannot distinguish card mark stores from other stores. Across a GC duke@435: // point the Store Barrier and the card mark memory has to agree. I cannot duke@435: // have a card mark store and its barrier split across the GC point from duke@435: // either above or below. Here I get that to happen by reading ALL of memory. duke@435: // A better answer would be to separate out card marks from other memory. duke@435: // For now, return the input memory state, so that it can be reused duke@435: // after the call, if this call has restricted memory effects. duke@435: Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) { duke@435: // Set fixed predefined input arguments duke@435: Node* memory = reset_memory(); duke@435: call->init_req( TypeFunc::Control, control() ); duke@435: call->init_req( TypeFunc::I_O, top() ); // does no i/o duke@435: call->init_req( TypeFunc::Memory, memory ); // may gc ptrs duke@435: call->init_req( TypeFunc::FramePtr, frameptr() ); duke@435: call->init_req( TypeFunc::ReturnAdr, top() ); duke@435: return memory; duke@435: } duke@435: duke@435: //-------------------set_predefined_output_for_runtime_call-------------------- duke@435: // Set control and memory (not i_o) from the call. duke@435: // If keep_mem is not NULL, use it for the output state, duke@435: // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM. duke@435: // If hook_mem is NULL, this call produces no memory effects at all. duke@435: // If hook_mem is a Java-visible memory slice (such as arraycopy operands), duke@435: // then only that memory slice is taken from the call. duke@435: // In the last case, we must put an appropriate memory barrier before duke@435: // the call, so as to create the correct anti-dependencies on loads duke@435: // preceding the call. duke@435: void GraphKit::set_predefined_output_for_runtime_call(Node* call, duke@435: Node* keep_mem, duke@435: const TypePtr* hook_mem) { duke@435: // no i/o duke@435: set_control(_gvn.transform( new (C, 1) ProjNode(call,TypeFunc::Control) )); duke@435: if (keep_mem) { duke@435: // First clone the existing memory state duke@435: set_all_memory(keep_mem); duke@435: if (hook_mem != NULL) { duke@435: // Make memory for the call duke@435: Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) ); duke@435: // Set the RawPtr memory state only. This covers all the heap top/GC stuff duke@435: // We also use hook_mem to extract specific effects from arraycopy stubs. duke@435: set_memory(mem, hook_mem); duke@435: } duke@435: // ...else the call has NO memory effects. duke@435: duke@435: // Make sure the call advertises its memory effects precisely. duke@435: // This lets us build accurate anti-dependences in gcm.cpp. duke@435: assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem), duke@435: "call node must be constructed correctly"); duke@435: } else { duke@435: assert(hook_mem == NULL, ""); duke@435: // This is not a "slow path" call; all memory comes from the call. duke@435: set_all_memory_call(call); duke@435: } duke@435: } duke@435: duke@435: //------------------------------increment_counter------------------------------ duke@435: // for statistics: increment a VM counter by 1 duke@435: duke@435: void GraphKit::increment_counter(address counter_addr) { duke@435: Node* adr1 = makecon(TypeRawPtr::make(counter_addr)); duke@435: increment_counter(adr1); duke@435: } duke@435: duke@435: void GraphKit::increment_counter(Node* counter_addr) { duke@435: int adr_type = Compile::AliasIdxRaw; duke@435: Node* cnt = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type); duke@435: Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1))); duke@435: store_to_memory( NULL, counter_addr, incr, T_INT, adr_type ); duke@435: } duke@435: duke@435: duke@435: //------------------------------uncommon_trap---------------------------------- duke@435: // Bail out to the interpreter in mid-method. Implemented by calling the duke@435: // uncommon_trap blob. This helper function inserts a runtime call with the duke@435: // right debug info. duke@435: void GraphKit::uncommon_trap(int trap_request, duke@435: ciKlass* klass, const char* comment, duke@435: bool must_throw, duke@435: bool keep_exact_action) { duke@435: if (failing()) stop(); duke@435: if (stopped()) return; // trap reachable? duke@435: duke@435: // Note: If ProfileTraps is true, and if a deopt. actually duke@435: // occurs here, the runtime will make sure an MDO exists. There is duke@435: // no need to call method()->build_method_data() at this point. duke@435: duke@435: #ifdef ASSERT duke@435: if (!must_throw) { duke@435: // Make sure the stack has at least enough depth to execute duke@435: // the current bytecode. duke@435: int inputs, ignore; duke@435: if (compute_stack_effects(inputs, ignore)) { duke@435: assert(sp() >= inputs, "must have enough JVMS stack to execute"); duke@435: // It is a frequent error in library_call.cpp to issue an duke@435: // uncommon trap with the _sp value already popped. duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); duke@435: Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); duke@435: duke@435: switch (action) { duke@435: case Deoptimization::Action_maybe_recompile: duke@435: case Deoptimization::Action_reinterpret: duke@435: // Temporary fix for 6529811 to allow virtual calls to be sure they duke@435: // get the chance to go from mono->bi->mega duke@435: if (!keep_exact_action && duke@435: Deoptimization::trap_request_index(trap_request) < 0 && duke@435: too_many_recompiles(reason)) { duke@435: // This BCI is causing too many recompilations. duke@435: action = Deoptimization::Action_none; duke@435: trap_request = Deoptimization::make_trap_request(reason, action); duke@435: } else { duke@435: C->set_trap_can_recompile(true); duke@435: } duke@435: break; duke@435: case Deoptimization::Action_make_not_entrant: duke@435: C->set_trap_can_recompile(true); duke@435: break; duke@435: #ifdef ASSERT duke@435: case Deoptimization::Action_none: duke@435: case Deoptimization::Action_make_not_compilable: duke@435: break; duke@435: default: duke@435: assert(false, "bad action"); duke@435: #endif duke@435: } duke@435: duke@435: if (TraceOptoParse) { duke@435: char buf[100]; duke@435: tty->print_cr("Uncommon trap %s at bci:%d", duke@435: Deoptimization::format_trap_request(buf, sizeof(buf), duke@435: trap_request), bci()); duke@435: } duke@435: duke@435: CompileLog* log = C->log(); duke@435: if (log != NULL) { duke@435: int kid = (klass == NULL)? -1: log->identify(klass); duke@435: log->begin_elem("uncommon_trap bci='%d'", bci()); duke@435: char buf[100]; duke@435: log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf), duke@435: trap_request)); duke@435: if (kid >= 0) log->print(" klass='%d'", kid); duke@435: if (comment != NULL) log->print(" comment='%s'", comment); duke@435: log->end_elem(); duke@435: } duke@435: duke@435: // Make sure any guarding test views this path as very unlikely duke@435: Node *i0 = control()->in(0); duke@435: if (i0 != NULL && i0->is_If()) { // Found a guarding if test? duke@435: IfNode *iff = i0->as_If(); duke@435: float f = iff->_prob; // Get prob duke@435: if (control()->Opcode() == Op_IfTrue) { duke@435: if (f > PROB_UNLIKELY_MAG(4)) duke@435: iff->_prob = PROB_MIN; duke@435: } else { duke@435: if (f < PROB_LIKELY_MAG(4)) duke@435: iff->_prob = PROB_MAX; duke@435: } duke@435: } duke@435: duke@435: // Clear out dead values from the debug info. duke@435: kill_dead_locals(); duke@435: duke@435: // Now insert the uncommon trap subroutine call duke@435: address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin(); duke@435: const TypePtr* no_memory_effects = NULL; duke@435: // Pass the index of the class to be loaded duke@435: Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON | duke@435: (must_throw ? RC_MUST_THROW : 0), duke@435: OptoRuntime::uncommon_trap_Type(), duke@435: call_addr, "uncommon_trap", no_memory_effects, duke@435: intcon(trap_request)); duke@435: assert(call->as_CallStaticJava()->uncommon_trap_request() == trap_request, duke@435: "must extract request correctly from the graph"); duke@435: assert(trap_request != 0, "zero value reserved by uncommon_trap_request"); duke@435: duke@435: call->set_req(TypeFunc::ReturnAdr, returnadr()); duke@435: // The debug info is the only real input to this call. duke@435: duke@435: // Halt-and-catch fire here. The above call should never return! duke@435: HaltNode* halt = new(C, TypeFunc::Parms) HaltNode(control(), frameptr()); duke@435: _gvn.set_type_bottom(halt); duke@435: root()->add_req(halt); duke@435: duke@435: stop_and_kill_map(); duke@435: } duke@435: duke@435: duke@435: //--------------------------just_allocated_object------------------------------ duke@435: // Report the object that was just allocated. duke@435: // It must be the case that there are no intervening safepoints. duke@435: // We use this to determine if an object is so "fresh" that duke@435: // it does not require card marks. duke@435: Node* GraphKit::just_allocated_object(Node* current_control) { duke@435: if (C->recent_alloc_ctl() == current_control) duke@435: return C->recent_alloc_obj(); duke@435: return NULL; duke@435: } duke@435: duke@435: duke@435: //------------------------------store_barrier---------------------------------- duke@435: // Insert a write-barrier store. This is to let generational GC work; we have duke@435: // to flag all oop-stores before the next GC point. duke@435: void GraphKit::write_barrier_post(Node* oop_store, Node* obj, Node* adr, duke@435: Node* val, bool use_precise) { duke@435: // No store check needed if we're storing a NULL or an old object duke@435: // (latter case is probably a string constant). The concurrent duke@435: // mark sweep garbage collector, however, needs to have all nonNull duke@435: // oop updates flagged via card-marks. duke@435: if (val != NULL && val->is_Con()) { duke@435: // must be either an oop or NULL duke@435: const Type* t = val->bottom_type(); duke@435: if (t == TypePtr::NULL_PTR || t == Type::TOP) duke@435: // stores of null never (?) need barriers duke@435: return; duke@435: ciObject* con = t->is_oopptr()->const_oop(); duke@435: if (con != NULL duke@435: && con->is_perm() duke@435: && Universe::heap()->can_elide_permanent_oop_store_barriers()) duke@435: // no store barrier needed, because no old-to-new ref created duke@435: return; duke@435: } duke@435: duke@435: if (use_ReduceInitialCardMarks() duke@435: && obj == just_allocated_object(control())) { duke@435: // We can skip marks on a freshly-allocated object. duke@435: // Keep this code in sync with do_eager_card_mark in runtime.cpp. duke@435: // That routine eagerly marks the occasional object which is produced duke@435: // by the slow path, so that we don't have to do it here. duke@435: return; duke@435: } duke@435: duke@435: if (!use_precise) { duke@435: // All card marks for a (non-array) instance are in one place: duke@435: adr = obj; duke@435: } duke@435: // (Else it's an array (or unknown), and we want more precise card marks.) duke@435: assert(adr != NULL, ""); duke@435: duke@435: // Get the alias_index for raw card-mark memory duke@435: int adr_type = Compile::AliasIdxRaw; duke@435: // Convert the pointer to an int prior to doing math on it duke@435: Node* cast = _gvn.transform(new (C, 2) CastP2XNode(control(), adr)); duke@435: // Divide by card size duke@435: assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef, duke@435: "Only one we handle so far."); duke@435: CardTableModRefBS* ct = duke@435: (CardTableModRefBS*)(Universe::heap()->barrier_set()); duke@435: Node *b = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) )); duke@435: // We store into a byte array, so do not bother to left-shift by zero never@998: Node *c = byte_map_base_node(); duke@435: // Combine duke@435: Node *sb_ctl = control(); duke@435: Node *sb_adr = _gvn.transform(new (C, 4) AddPNode( top()/*no base ptr*/, c, b )); duke@435: Node *sb_val = _gvn.intcon(0); duke@435: // Smash zero into card duke@435: if( !UseConcMarkSweepGC ) { duke@435: BasicType bt = T_BYTE; duke@435: store_to_memory(sb_ctl, sb_adr, sb_val, bt, adr_type); duke@435: } else { duke@435: // Specialized path for CM store barrier duke@435: cms_card_mark( sb_ctl, sb_adr, sb_val, oop_store); duke@435: } duke@435: } duke@435: duke@435: // Specialized path for CMS store barrier duke@435: void GraphKit::cms_card_mark(Node* ctl, Node* adr, Node* val, Node *oop_store) { duke@435: BasicType bt = T_BYTE; duke@435: int adr_idx = Compile::AliasIdxRaw; duke@435: Node* mem = memory(adr_idx); duke@435: duke@435: // The type input is NULL in PRODUCT builds duke@435: const TypePtr* type = NULL; duke@435: debug_only(type = C->get_adr_type(adr_idx)); duke@435: duke@435: // Add required edge to oop_store, optimizer does not support precedence edges. duke@435: // Convert required edge to precedence edge before allocation. duke@435: Node *store = _gvn.transform( new (C, 5) StoreCMNode(ctl, mem, adr, type, val, oop_store) ); duke@435: set_memory(store, adr_idx); duke@435: duke@435: // For CMS, back-to-back card-marks can only remove the first one duke@435: // and this requires DU info. Push on worklist for optimizer. duke@435: if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address)) duke@435: record_for_igvn(store); duke@435: } duke@435: duke@435: duke@435: void GraphKit::round_double_arguments(ciMethod* dest_method) { duke@435: // (Note: TypeFunc::make has a cache that makes this fast.) duke@435: const TypeFunc* tf = TypeFunc::make(dest_method); duke@435: int nargs = tf->_domain->_cnt - TypeFunc::Parms; duke@435: for (int j = 0; j < nargs; j++) { duke@435: const Type *targ = tf->_domain->field_at(j + TypeFunc::Parms); duke@435: if( targ->basic_type() == T_DOUBLE ) { duke@435: // If any parameters are doubles, they must be rounded before duke@435: // the call, dstore_rounding does gvn.transform duke@435: Node *arg = argument(j); duke@435: arg = dstore_rounding(arg); duke@435: set_argument(j, arg); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void GraphKit::round_double_result(ciMethod* dest_method) { duke@435: // A non-strict method may return a double value which has an extended duke@435: // exponent, but this must not be visible in a caller which is 'strict' duke@435: // If a strict caller invokes a non-strict callee, round a double result duke@435: duke@435: BasicType result_type = dest_method->return_type()->basic_type(); duke@435: assert( method() != NULL, "must have caller context"); duke@435: if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) { duke@435: // Destination method's return value is on top of stack duke@435: // dstore_rounding() does gvn.transform duke@435: Node *result = pop_pair(); duke@435: result = dstore_rounding(result); duke@435: push_pair(result); duke@435: } duke@435: } duke@435: duke@435: // rounding for strict float precision conformance duke@435: Node* GraphKit::precision_rounding(Node* n) { duke@435: return UseStrictFP && _method->flags().is_strict() duke@435: && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding duke@435: ? _gvn.transform( new (C, 2) RoundFloatNode(0, n) ) duke@435: : n; duke@435: } duke@435: duke@435: // rounding for strict double precision conformance duke@435: Node* GraphKit::dprecision_rounding(Node *n) { duke@435: return UseStrictFP && _method->flags().is_strict() duke@435: && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding duke@435: ? _gvn.transform( new (C, 2) RoundDoubleNode(0, n) ) duke@435: : n; duke@435: } duke@435: duke@435: // rounding for non-strict double stores duke@435: Node* GraphKit::dstore_rounding(Node* n) { duke@435: return Matcher::strict_fp_requires_explicit_rounding duke@435: && UseSSE <= 1 duke@435: ? _gvn.transform( new (C, 2) RoundDoubleNode(0, n) ) duke@435: : n; duke@435: } duke@435: duke@435: //============================================================================= duke@435: // Generate a fast path/slow path idiom. Graph looks like: duke@435: // [foo] indicates that 'foo' is a parameter duke@435: // duke@435: // [in] NULL duke@435: // \ / duke@435: // CmpP duke@435: // Bool ne duke@435: // If duke@435: // / \ duke@435: // True False-<2> duke@435: // / | duke@435: // / cast_not_null duke@435: // Load | | ^ duke@435: // [fast_test] | | duke@435: // gvn to opt_test | | duke@435: // / \ | <1> duke@435: // True False | duke@435: // | \\ | duke@435: // [slow_call] \[fast_result] duke@435: // Ctl Val \ \ duke@435: // | \ \ duke@435: // Catch <1> \ \ duke@435: // / \ ^ \ \ duke@435: // Ex No_Ex | \ \ duke@435: // | \ \ | \ <2> \ duke@435: // ... \ [slow_res] | | \ [null_result] duke@435: // \ \--+--+--- | | duke@435: // \ | / \ | / duke@435: // --------Region Phi duke@435: // duke@435: //============================================================================= duke@435: // Code is structured as a series of driver functions all called 'do_XXX' that duke@435: // call a set of helper functions. Helper functions first, then drivers. duke@435: duke@435: //------------------------------null_check_oop--------------------------------- duke@435: // Null check oop. Set null-path control into Region in slot 3. duke@435: // Make a cast-not-nullness use the other not-null control. Return cast. duke@435: Node* GraphKit::null_check_oop(Node* value, Node* *null_control, duke@435: bool never_see_null) { duke@435: // Initial NULL check taken path duke@435: (*null_control) = top(); duke@435: Node* cast = null_check_common(value, T_OBJECT, false, null_control); duke@435: duke@435: // Generate uncommon_trap: duke@435: if (never_see_null && (*null_control) != top()) { duke@435: // If we see an unexpected null at a check-cast we record it and force a duke@435: // recompile; the offending check-cast will be compiled to handle NULLs. duke@435: // If we see more than one offending BCI, then all checkcasts in the duke@435: // method will be compiled to handle NULLs. duke@435: PreserveJVMState pjvms(this); duke@435: set_control(*null_control); kvn@767: replace_in_map(value, null()); duke@435: uncommon_trap(Deoptimization::Reason_null_check, duke@435: Deoptimization::Action_make_not_entrant); duke@435: (*null_control) = top(); // NULL path is dead duke@435: } duke@435: duke@435: // Cast away null-ness on the result duke@435: return cast; duke@435: } duke@435: duke@435: //------------------------------opt_iff---------------------------------------- duke@435: // Optimize the fast-check IfNode. Set the fast-path region slot 2. duke@435: // Return slow-path control. duke@435: Node* GraphKit::opt_iff(Node* region, Node* iff) { duke@435: IfNode *opt_iff = _gvn.transform(iff)->as_If(); duke@435: duke@435: // Fast path taken; set region slot 2 duke@435: Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_iff) ); duke@435: region->init_req(2,fast_taken); // Capture fast-control duke@435: duke@435: // Fast path not-taken, i.e. slow path duke@435: Node *slow_taken = _gvn.transform( new (C, 1) IfTrueNode(opt_iff) ); duke@435: return slow_taken; duke@435: } duke@435: duke@435: //-----------------------------make_runtime_call------------------------------- duke@435: Node* GraphKit::make_runtime_call(int flags, duke@435: const TypeFunc* call_type, address call_addr, duke@435: const char* call_name, duke@435: const TypePtr* adr_type, duke@435: // The following parms are all optional. duke@435: // The first NULL ends the list. duke@435: Node* parm0, Node* parm1, duke@435: Node* parm2, Node* parm3, duke@435: Node* parm4, Node* parm5, duke@435: Node* parm6, Node* parm7) { duke@435: // Slow-path call duke@435: int size = call_type->domain()->cnt(); duke@435: bool is_leaf = !(flags & RC_NO_LEAF); duke@435: bool has_io = (!is_leaf && !(flags & RC_NO_IO)); duke@435: if (call_name == NULL) { duke@435: assert(!is_leaf, "must supply name for leaf"); duke@435: call_name = OptoRuntime::stub_name(call_addr); duke@435: } duke@435: CallNode* call; duke@435: if (!is_leaf) { duke@435: call = new(C, size) CallStaticJavaNode(call_type, call_addr, call_name, duke@435: bci(), adr_type); duke@435: } else if (flags & RC_NO_FP) { duke@435: call = new(C, size) CallLeafNoFPNode(call_type, call_addr, call_name, adr_type); duke@435: } else { duke@435: call = new(C, size) CallLeafNode(call_type, call_addr, call_name, adr_type); duke@435: } duke@435: duke@435: // The following is similar to set_edges_for_java_call, duke@435: // except that the memory effects of the call are restricted to AliasIdxRaw. duke@435: duke@435: // Slow path call has no side-effects, uses few values duke@435: bool wide_in = !(flags & RC_NARROW_MEM); duke@435: bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot); duke@435: duke@435: Node* prev_mem = NULL; duke@435: if (wide_in) { duke@435: prev_mem = set_predefined_input_for_runtime_call(call); duke@435: } else { duke@435: assert(!wide_out, "narrow in => narrow out"); duke@435: Node* narrow_mem = memory(adr_type); duke@435: prev_mem = reset_memory(); duke@435: map()->set_memory(narrow_mem); duke@435: set_predefined_input_for_runtime_call(call); duke@435: } duke@435: duke@435: // Hook each parm in order. Stop looking at the first NULL. duke@435: if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0); duke@435: if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1); duke@435: if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2); duke@435: if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3); duke@435: if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4); duke@435: if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5); duke@435: if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6); duke@435: if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7); duke@435: /* close each nested if ===> */ } } } } } } } } duke@435: assert(call->in(call->req()-1) != NULL, "must initialize all parms"); duke@435: duke@435: if (!is_leaf) { duke@435: // Non-leaves can block and take safepoints: duke@435: add_safepoint_edges(call, ((flags & RC_MUST_THROW) != 0)); duke@435: } duke@435: // Non-leaves can throw exceptions: duke@435: if (has_io) { duke@435: call->set_req(TypeFunc::I_O, i_o()); duke@435: } duke@435: duke@435: if (flags & RC_UNCOMMON) { duke@435: // Set the count to a tiny probability. Cf. Estimate_Block_Frequency. duke@435: // (An "if" probability corresponds roughly to an unconditional count. duke@435: // Sort of.) duke@435: call->set_cnt(PROB_UNLIKELY_MAG(4)); duke@435: } duke@435: duke@435: Node* c = _gvn.transform(call); duke@435: assert(c == call, "cannot disappear"); duke@435: duke@435: if (wide_out) { duke@435: // Slow path call has full side-effects. duke@435: set_predefined_output_for_runtime_call(call); duke@435: } else { duke@435: // Slow path call has few side-effects, and/or sets few values. duke@435: set_predefined_output_for_runtime_call(call, prev_mem, adr_type); duke@435: } duke@435: duke@435: if (has_io) { duke@435: set_i_o(_gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O))); duke@435: } duke@435: return call; duke@435: duke@435: } duke@435: duke@435: //------------------------------merge_memory----------------------------------- duke@435: // Merge memory from one path into the current memory state. duke@435: void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) { duke@435: for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) { duke@435: Node* old_slice = mms.force_memory(); duke@435: Node* new_slice = mms.memory2(); duke@435: if (old_slice != new_slice) { duke@435: PhiNode* phi; duke@435: if (new_slice->is_Phi() && new_slice->as_Phi()->region() == region) { duke@435: phi = new_slice->as_Phi(); duke@435: #ifdef ASSERT duke@435: if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) duke@435: old_slice = old_slice->in(new_path); duke@435: // Caller is responsible for ensuring that any pre-existing duke@435: // phis are already aware of old memory. duke@435: int old_path = (new_path > 1) ? 1 : 2; // choose old_path != new_path duke@435: assert(phi->in(old_path) == old_slice, "pre-existing phis OK"); duke@435: #endif duke@435: mms.set_memory(phi); duke@435: } else { duke@435: phi = PhiNode::make(region, old_slice, Type::MEMORY, mms.adr_type(C)); duke@435: _gvn.set_type(phi, Type::MEMORY); duke@435: phi->set_req(new_path, new_slice); duke@435: mms.set_memory(_gvn.transform(phi)); // assume it is complete duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: //------------------------------make_slow_call_ex------------------------------ duke@435: // Make the exception handler hookups for the slow call duke@435: void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj) { duke@435: if (stopped()) return; duke@435: duke@435: // Make a catch node with just two handlers: fall-through and catch-all duke@435: Node* i_o = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::I_O, separate_io_proj) ); duke@435: Node* catc = _gvn.transform( new (C, 2) CatchNode(control(), i_o, 2) ); duke@435: Node* norm = _gvn.transform( new (C, 1) CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci) ); duke@435: Node* excp = _gvn.transform( new (C, 1) CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci) ); duke@435: duke@435: { PreserveJVMState pjvms(this); duke@435: set_control(excp); duke@435: set_i_o(i_o); duke@435: duke@435: if (excp != top()) { duke@435: // Create an exception state also. duke@435: // Use an exact type if the caller has specified a specific exception. duke@435: const Type* ex_type = TypeOopPtr::make_from_klass_unique(ex_klass)->cast_to_ptr_type(TypePtr::NotNull); duke@435: Node* ex_oop = new (C, 2) CreateExNode(ex_type, control(), i_o); duke@435: add_exception_state(make_exception_state(_gvn.transform(ex_oop))); duke@435: } duke@435: } duke@435: duke@435: // Get the no-exception control from the CatchNode. duke@435: set_control(norm); duke@435: } duke@435: duke@435: duke@435: //-------------------------------gen_subtype_check----------------------------- duke@435: // Generate a subtyping check. Takes as input the subtype and supertype. duke@435: // Returns 2 values: sets the default control() to the true path and returns duke@435: // the false path. Only reads invariant memory; sets no (visible) memory. duke@435: // The PartialSubtypeCheckNode sets the hidden 1-word cache in the encoding duke@435: // but that's not exposed to the optimizer. This call also doesn't take in an duke@435: // Object; if you wish to check an Object you need to load the Object's class duke@435: // prior to coming here. duke@435: Node* GraphKit::gen_subtype_check(Node* subklass, Node* superklass) { duke@435: // Fast check for identical types, perhaps identical constants. duke@435: // The types can even be identical non-constants, in cases duke@435: // involving Array.newInstance, Object.clone, etc. duke@435: if (subklass == superklass) duke@435: return top(); // false path is dead; no test needed. duke@435: duke@435: if (_gvn.type(superklass)->singleton()) { duke@435: ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass(); duke@435: ciKlass* subk = _gvn.type(subklass)->is_klassptr()->klass(); duke@435: duke@435: // In the common case of an exact superklass, try to fold up the duke@435: // test before generating code. You may ask, why not just generate duke@435: // the code and then let it fold up? The answer is that the generated duke@435: // code will necessarily include null checks, which do not always duke@435: // completely fold away. If they are also needless, then they turn duke@435: // into a performance loss. Example: duke@435: // Foo[] fa = blah(); Foo x = fa[0]; fa[1] = x; duke@435: // Here, the type of 'fa' is often exact, so the store check duke@435: // of fa[1]=x will fold up, without testing the nullness of x. duke@435: switch (static_subtype_check(superk, subk)) { duke@435: case SSC_always_false: duke@435: { duke@435: Node* always_fail = control(); duke@435: set_control(top()); duke@435: return always_fail; duke@435: } duke@435: case SSC_always_true: duke@435: return top(); duke@435: case SSC_easy_test: duke@435: { duke@435: // Just do a direct pointer compare and be done. duke@435: Node* cmp = _gvn.transform( new(C, 3) CmpPNode(subklass, superklass) ); duke@435: Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) ); duke@435: IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); duke@435: set_control( _gvn.transform( new(C, 1) IfTrueNode (iff) ) ); duke@435: return _gvn.transform( new(C, 1) IfFalseNode(iff) ); duke@435: } duke@435: case SSC_full_test: duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: duke@435: // %%% Possible further optimization: Even if the superklass is not exact, duke@435: // if the subklass is the unique subtype of the superklass, the check duke@435: // will always succeed. We could leave a dependency behind to ensure this. duke@435: duke@435: // First load the super-klass's check-offset duke@435: Node *p1 = basic_plus_adr( superklass, superklass, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes() ); duke@435: Node *chk_off = _gvn.transform( new (C, 3) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) ); duke@435: int cacheoff_con = sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes(); duke@435: bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con); duke@435: duke@435: // Load from the sub-klass's super-class display list, or a 1-word cache of duke@435: // the secondary superclass list, or a failing value with a sentinel offset duke@435: // if the super-klass is an interface or exceptionally deep in the Java duke@435: // hierarchy and we have to scan the secondary superclass list the hard way. duke@435: // Worst-case type is a little odd: NULL is allowed as a result (usually duke@435: // klass loads can never produce a NULL). duke@435: Node *chk_off_X = ConvI2X(chk_off); duke@435: Node *p2 = _gvn.transform( new (C, 4) AddPNode(subklass,subklass,chk_off_X) ); duke@435: // For some types like interfaces the following loadKlass is from a 1-word duke@435: // cache which is mutable so can't use immutable memory. Other duke@435: // types load from the super-class display table which is immutable. duke@435: Node *kmem = might_be_cache ? memory(p2) : immutable_memory(); kvn@599: Node *nkls = _gvn.transform( LoadKlassNode::make( _gvn, kmem, p2, _gvn.type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL ) ); duke@435: duke@435: // Compile speed common case: ARE a subtype and we canNOT fail duke@435: if( superklass == nkls ) duke@435: return top(); // false path is dead; no test needed. duke@435: duke@435: // See if we get an immediate positive hit. Happens roughly 83% of the duke@435: // time. Test to see if the value loaded just previously from the subklass duke@435: // is exactly the superklass. duke@435: Node *cmp1 = _gvn.transform( new (C, 3) CmpPNode( superklass, nkls ) ); duke@435: Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp1, BoolTest::eq ) ); duke@435: IfNode *iff1 = create_and_xform_if( control(), bol1, PROB_LIKELY(0.83f), COUNT_UNKNOWN ); duke@435: Node *iftrue1 = _gvn.transform( new (C, 1) IfTrueNode ( iff1 ) ); duke@435: set_control( _gvn.transform( new (C, 1) IfFalseNode( iff1 ) ) ); duke@435: duke@435: // Compile speed common case: Check for being deterministic right now. If duke@435: // chk_off is a constant and not equal to cacheoff then we are NOT a duke@435: // subklass. In this case we need exactly the 1 test above and we can duke@435: // return those results immediately. duke@435: if (!might_be_cache) { duke@435: Node* not_subtype_ctrl = control(); duke@435: set_control(iftrue1); // We need exactly the 1 test above duke@435: return not_subtype_ctrl; duke@435: } duke@435: duke@435: // Gather the various success & failures here duke@435: RegionNode *r_ok_subtype = new (C, 4) RegionNode(4); duke@435: record_for_igvn(r_ok_subtype); duke@435: RegionNode *r_not_subtype = new (C, 3) RegionNode(3); duke@435: record_for_igvn(r_not_subtype); duke@435: duke@435: r_ok_subtype->init_req(1, iftrue1); duke@435: duke@435: // Check for immediate negative hit. Happens roughly 11% of the time (which duke@435: // is roughly 63% of the remaining cases). Test to see if the loaded duke@435: // check-offset points into the subklass display list or the 1-element duke@435: // cache. If it points to the display (and NOT the cache) and the display duke@435: // missed then it's not a subtype. duke@435: Node *cacheoff = _gvn.intcon(cacheoff_con); duke@435: Node *cmp2 = _gvn.transform( new (C, 3) CmpINode( chk_off, cacheoff ) ); duke@435: Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmp2, BoolTest::ne ) ); duke@435: IfNode *iff2 = create_and_xform_if( control(), bol2, PROB_LIKELY(0.63f), COUNT_UNKNOWN ); duke@435: r_not_subtype->init_req(1, _gvn.transform( new (C, 1) IfTrueNode (iff2) ) ); duke@435: set_control( _gvn.transform( new (C, 1) IfFalseNode(iff2) ) ); duke@435: duke@435: // Check for self. Very rare to get here, but its taken 1/3 the time. duke@435: // No performance impact (too rare) but allows sharing of secondary arrays duke@435: // which has some footprint reduction. duke@435: Node *cmp3 = _gvn.transform( new (C, 3) CmpPNode( subklass, superklass ) ); duke@435: Node *bol3 = _gvn.transform( new (C, 2) BoolNode( cmp3, BoolTest::eq ) ); duke@435: IfNode *iff3 = create_and_xform_if( control(), bol3, PROB_LIKELY(0.36f), COUNT_UNKNOWN ); duke@435: r_ok_subtype->init_req(2, _gvn.transform( new (C, 1) IfTrueNode ( iff3 ) ) ); duke@435: set_control( _gvn.transform( new (C, 1) IfFalseNode( iff3 ) ) ); duke@435: duke@435: // Now do a linear scan of the secondary super-klass array. Again, no real duke@435: // performance impact (too rare) but it's gotta be done. duke@435: // (The stub also contains the self-check of subklass == superklass. duke@435: // Since the code is rarely used, there is no penalty for moving it duke@435: // out of line, and it can only improve I-cache density.) duke@435: Node* psc = _gvn.transform( duke@435: new (C, 3) PartialSubtypeCheckNode(control(), subklass, superklass) ); duke@435: duke@435: Node *cmp4 = _gvn.transform( new (C, 3) CmpPNode( psc, null() ) ); duke@435: Node *bol4 = _gvn.transform( new (C, 2) BoolNode( cmp4, BoolTest::ne ) ); duke@435: IfNode *iff4 = create_and_xform_if( control(), bol4, PROB_FAIR, COUNT_UNKNOWN ); duke@435: r_not_subtype->init_req(2, _gvn.transform( new (C, 1) IfTrueNode (iff4) ) ); duke@435: r_ok_subtype ->init_req(3, _gvn.transform( new (C, 1) IfFalseNode(iff4) ) ); duke@435: duke@435: // Return false path; set default control to true path. duke@435: set_control( _gvn.transform(r_ok_subtype) ); duke@435: return _gvn.transform(r_not_subtype); duke@435: } duke@435: duke@435: //----------------------------static_subtype_check----------------------------- duke@435: // Shortcut important common cases when superklass is exact: duke@435: // (0) superklass is java.lang.Object (can occur in reflective code) duke@435: // (1) subklass is already limited to a subtype of superklass => always ok duke@435: // (2) subklass does not overlap with superklass => always fail duke@435: // (3) superklass has NO subtypes and we can check with a simple compare. duke@435: int GraphKit::static_subtype_check(ciKlass* superk, ciKlass* subk) { duke@435: if (StressReflectiveCode) { duke@435: return SSC_full_test; // Let caller generate the general case. duke@435: } duke@435: duke@435: if (superk == env()->Object_klass()) { duke@435: return SSC_always_true; // (0) this test cannot fail duke@435: } duke@435: duke@435: ciType* superelem = superk; duke@435: if (superelem->is_array_klass()) duke@435: superelem = superelem->as_array_klass()->base_element_type(); duke@435: duke@435: if (!subk->is_interface()) { // cannot trust static interface types yet duke@435: if (subk->is_subtype_of(superk)) { duke@435: return SSC_always_true; // (1) false path dead; no dynamic test needed duke@435: } duke@435: if (!(superelem->is_klass() && superelem->as_klass()->is_interface()) && duke@435: !superk->is_subtype_of(subk)) { duke@435: return SSC_always_false; duke@435: } duke@435: } duke@435: duke@435: // If casting to an instance klass, it must have no subtypes duke@435: if (superk->is_interface()) { duke@435: // Cannot trust interfaces yet. duke@435: // %%% S.B. superk->nof_implementors() == 1 duke@435: } else if (superelem->is_instance_klass()) { duke@435: ciInstanceKlass* ik = superelem->as_instance_klass(); duke@435: if (!ik->has_subklass() && !ik->is_interface()) { duke@435: if (!ik->is_final()) { duke@435: // Add a dependency if there is a chance of a later subclass. duke@435: C->dependencies()->assert_leaf_type(ik); duke@435: } duke@435: return SSC_easy_test; // (3) caller can do a simple ptr comparison duke@435: } duke@435: } else { duke@435: // A primitive array type has no subtypes. duke@435: return SSC_easy_test; // (3) caller can do a simple ptr comparison duke@435: } duke@435: duke@435: return SSC_full_test; duke@435: } duke@435: duke@435: // Profile-driven exact type check: duke@435: Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass, duke@435: float prob, duke@435: Node* *casted_receiver) { duke@435: const TypeKlassPtr* tklass = TypeKlassPtr::make(klass); duke@435: Node* recv_klass = load_object_klass(receiver); duke@435: Node* want_klass = makecon(tklass); duke@435: Node* cmp = _gvn.transform( new(C, 3) CmpPNode(recv_klass, want_klass) ); duke@435: Node* bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) ); duke@435: IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN); duke@435: set_control( _gvn.transform( new(C, 1) IfTrueNode (iff) )); duke@435: Node* fail = _gvn.transform( new(C, 1) IfFalseNode(iff) ); duke@435: duke@435: const TypeOopPtr* recv_xtype = tklass->as_instance_type(); duke@435: assert(recv_xtype->klass_is_exact(), ""); duke@435: duke@435: // Subsume downstream occurrences of receiver with a cast to duke@435: // recv_xtype, since now we know what the type will be. duke@435: Node* cast = new(C, 2) CheckCastPPNode(control(), receiver, recv_xtype); duke@435: (*casted_receiver) = _gvn.transform(cast); duke@435: // (User must make the replace_in_map call.) duke@435: duke@435: return fail; duke@435: } duke@435: duke@435: duke@435: //-------------------------------gen_instanceof-------------------------------- duke@435: // Generate an instance-of idiom. Used by both the instance-of bytecode duke@435: // and the reflective instance-of call. duke@435: Node* GraphKit::gen_instanceof( Node *subobj, Node* superklass ) { duke@435: C->set_has_split_ifs(true); // Has chance for split-if optimization duke@435: assert( !stopped(), "dead parse path should be checked in callers" ); duke@435: assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), duke@435: "must check for not-null not-dead klass in callers"); duke@435: duke@435: // Make the merge point duke@435: enum { _obj_path = 1, _fail_path, _null_path, PATH_LIMIT }; duke@435: RegionNode* region = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT); duke@435: Node* phi = new(C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL); duke@435: C->set_has_split_ifs(true); // Has chance for split-if optimization duke@435: duke@435: // Null check; get casted pointer; set region slot 3 duke@435: Node* null_ctl = top(); duke@435: Node* not_null_obj = null_check_oop(subobj, &null_ctl); duke@435: duke@435: // If not_null_obj is dead, only null-path is taken duke@435: if (stopped()) { // Doing instance-of on a NULL? duke@435: set_control(null_ctl); duke@435: return intcon(0); duke@435: } duke@435: region->init_req(_null_path, null_ctl); duke@435: phi ->init_req(_null_path, intcon(0)); // Set null path value duke@435: duke@435: // Load the object's klass duke@435: Node* obj_klass = load_object_klass(not_null_obj); duke@435: duke@435: // Generate the subtype check duke@435: Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass); duke@435: duke@435: // Plug in the success path to the general merge in slot 1. duke@435: region->init_req(_obj_path, control()); duke@435: phi ->init_req(_obj_path, intcon(1)); duke@435: duke@435: // Plug in the failing path to the general merge in slot 2. duke@435: region->init_req(_fail_path, not_subtype_ctrl); duke@435: phi ->init_req(_fail_path, intcon(0)); duke@435: duke@435: // Return final merged results duke@435: set_control( _gvn.transform(region) ); duke@435: record_for_igvn(region); duke@435: return _gvn.transform(phi); duke@435: } duke@435: duke@435: //-------------------------------gen_checkcast--------------------------------- duke@435: // Generate a checkcast idiom. Used by both the checkcast bytecode and the duke@435: // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the duke@435: // uncommon-trap paths work. Adjust stack after this call. duke@435: // If failure_control is supplied and not null, it is filled in with duke@435: // the control edge for the cast failure. Otherwise, an appropriate duke@435: // uncommon trap or exception is thrown. duke@435: Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, duke@435: Node* *failure_control) { duke@435: kill_dead_locals(); // Benefit all the uncommon traps duke@435: const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr(); duke@435: const Type *toop = TypeOopPtr::make_from_klass(tk->klass()); duke@435: duke@435: // Fast cutout: Check the case that the cast is vacuously true. duke@435: // This detects the common cases where the test will short-circuit duke@435: // away completely. We do this before we perform the null check, duke@435: // because if the test is going to turn into zero code, we don't duke@435: // want a residual null check left around. (Causes a slowdown, duke@435: // for example, in some objArray manipulations, such as a[i]=a[j].) duke@435: if (tk->singleton()) { duke@435: const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); duke@435: if (objtp != NULL && objtp->klass() != NULL) { duke@435: switch (static_subtype_check(tk->klass(), objtp->klass())) { duke@435: case SSC_always_true: duke@435: return obj; duke@435: case SSC_always_false: duke@435: // It needs a null check because a null will *pass* the cast check. duke@435: // A non-null value will always produce an exception. duke@435: return do_null_assert(obj, T_OBJECT); duke@435: } duke@435: } duke@435: } duke@435: duke@435: ciProfileData* data = NULL; duke@435: if (failure_control == NULL) { // use MDO in regular case only duke@435: assert(java_bc() == Bytecodes::_aastore || duke@435: java_bc() == Bytecodes::_checkcast, duke@435: "interpreter profiles type checks only for these BCs"); duke@435: data = method()->method_data()->bci_to_data(bci()); duke@435: } duke@435: duke@435: // Make the merge point duke@435: enum { _obj_path = 1, _null_path, PATH_LIMIT }; duke@435: RegionNode* region = new (C, PATH_LIMIT) RegionNode(PATH_LIMIT); duke@435: Node* phi = new (C, PATH_LIMIT) PhiNode(region, toop); duke@435: C->set_has_split_ifs(true); // Has chance for split-if optimization duke@435: duke@435: // Use null-cast information if it is available duke@435: bool never_see_null = false; duke@435: // If we see an unexpected null at a check-cast we record it and force a duke@435: // recompile; the offending check-cast will be compiled to handle NULLs. duke@435: // If we see several offending BCIs, then all checkcasts in the duke@435: // method will be compiled to handle NULLs. duke@435: if (UncommonNullCast // Cutout for this technique duke@435: && failure_control == NULL // regular case duke@435: && obj != null() // And not the -Xcomp stupid case? duke@435: && !too_many_traps(Deoptimization::Reason_null_check)) { duke@435: // Finally, check the "null_seen" bit from the interpreter. duke@435: if (data == NULL || !data->as_BitData()->null_seen()) { duke@435: never_see_null = true; duke@435: } duke@435: } duke@435: duke@435: // Null check; get casted pointer; set region slot 3 duke@435: Node* null_ctl = top(); duke@435: Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null); duke@435: duke@435: // If not_null_obj is dead, only null-path is taken duke@435: if (stopped()) { // Doing instance-of on a NULL? duke@435: set_control(null_ctl); duke@435: return null(); duke@435: } duke@435: region->init_req(_null_path, null_ctl); duke@435: phi ->init_req(_null_path, null()); // Set null path value duke@435: duke@435: Node* cast_obj = NULL; // the casted version of the object duke@435: duke@435: // If the profile has seen exactly one type, narrow to that type. duke@435: // (The subsequent subtype check will always fold up.) duke@435: if (UseTypeProfile && TypeProfileCasts && data != NULL && duke@435: // Counter has never been decremented (due to cast failure). duke@435: // ...This is a reasonable thing to expect. It is true of duke@435: // all casts inserted by javac to implement generic types. duke@435: data->as_CounterData()->count() >= 0 && duke@435: !too_many_traps(Deoptimization::Reason_class_check)) { duke@435: // (No, this isn't a call, but it's enough like a virtual call duke@435: // to use the same ciMethod accessor to get the profile info...) duke@435: ciCallProfile profile = method()->call_profile_at_bci(bci()); duke@435: if (profile.count() >= 0 && // no cast failures here duke@435: profile.has_receiver(0) && duke@435: profile.morphism() == 1) { duke@435: ciKlass* exact_kls = profile.receiver(0); duke@435: int ssc = static_subtype_check(tk->klass(), exact_kls); duke@435: if (ssc == SSC_always_true) { duke@435: // If we narrow the type to match what the type profile sees, duke@435: // we can then remove the rest of the cast. duke@435: // This is a win, even if the exact_kls is very specific, duke@435: // because downstream operations, such as method calls, duke@435: // will often benefit from the sharper type. duke@435: Node* exact_obj = not_null_obj; // will get updated in place... duke@435: Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, duke@435: &exact_obj); duke@435: { PreserveJVMState pjvms(this); duke@435: set_control(slow_ctl); duke@435: uncommon_trap(Deoptimization::Reason_class_check, duke@435: Deoptimization::Action_maybe_recompile); duke@435: } duke@435: if (failure_control != NULL) // failure is now impossible duke@435: (*failure_control) = top(); duke@435: replace_in_map(not_null_obj, exact_obj); duke@435: // adjust the type of the phi to the exact klass: duke@435: phi->raise_bottom_type(_gvn.type(exact_obj)->meet(TypePtr::NULL_PTR)); duke@435: cast_obj = exact_obj; duke@435: } duke@435: // assert(cast_obj != NULL)... except maybe the profile lied to us. duke@435: } duke@435: } duke@435: duke@435: if (cast_obj == NULL) { duke@435: // Load the object's klass duke@435: Node* obj_klass = load_object_klass(not_null_obj); duke@435: duke@435: // Generate the subtype check duke@435: Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass ); duke@435: duke@435: // Plug in success path into the merge duke@435: cast_obj = _gvn.transform(new (C, 2) CheckCastPPNode(control(), duke@435: not_null_obj, toop)); duke@435: // Failure path ends in uncommon trap (or may be dead - failure impossible) duke@435: if (failure_control == NULL) { duke@435: if (not_subtype_ctrl != top()) { // If failure is possible duke@435: PreserveJVMState pjvms(this); duke@435: set_control(not_subtype_ctrl); duke@435: builtin_throw(Deoptimization::Reason_class_check, obj_klass); duke@435: } duke@435: } else { duke@435: (*failure_control) = not_subtype_ctrl; duke@435: } duke@435: } duke@435: duke@435: region->init_req(_obj_path, control()); duke@435: phi ->init_req(_obj_path, cast_obj); duke@435: duke@435: // A merge of NULL or Casted-NotNull obj duke@435: Node* res = _gvn.transform(phi); duke@435: duke@435: // Note I do NOT always 'replace_in_map(obj,result)' here. duke@435: // if( tk->klass()->can_be_primary_super() ) duke@435: // This means that if I successfully store an Object into an array-of-String duke@435: // I 'forget' that the Object is really now known to be a String. I have to duke@435: // do this because we don't have true union types for interfaces - if I store duke@435: // a Baz into an array-of-Interface and then tell the optimizer it's an duke@435: // Interface, I forget that it's also a Baz and cannot do Baz-like field duke@435: // references to it. FIX THIS WHEN UNION TYPES APPEAR! duke@435: // replace_in_map( obj, res ); duke@435: duke@435: // Return final merged results duke@435: set_control( _gvn.transform(region) ); duke@435: record_for_igvn(region); duke@435: return res; duke@435: } duke@435: duke@435: //------------------------------next_monitor----------------------------------- duke@435: // What number should be given to the next monitor? duke@435: int GraphKit::next_monitor() { duke@435: int current = jvms()->monitor_depth()* C->sync_stack_slots(); duke@435: int next = current + C->sync_stack_slots(); duke@435: // Keep the toplevel high water mark current: duke@435: if (C->fixed_slots() < next) C->set_fixed_slots(next); duke@435: return current; duke@435: } duke@435: duke@435: //------------------------------insert_mem_bar--------------------------------- duke@435: // Memory barrier to avoid floating things around duke@435: // The membar serves as a pinch point between both control and all memory slices. duke@435: Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) { duke@435: MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent); duke@435: mb->init_req(TypeFunc::Control, control()); duke@435: mb->init_req(TypeFunc::Memory, reset_memory()); duke@435: Node* membar = _gvn.transform(mb); duke@435: set_control(_gvn.transform(new (C, 1) ProjNode(membar,TypeFunc::Control) )); duke@435: set_all_memory_call(membar); duke@435: return membar; duke@435: } duke@435: duke@435: //-------------------------insert_mem_bar_volatile---------------------------- duke@435: // Memory barrier to avoid floating things around duke@435: // The membar serves as a pinch point between both control and memory(alias_idx). duke@435: // If you want to make a pinch point on all memory slices, do not use this duke@435: // function (even with AliasIdxBot); use insert_mem_bar() instead. duke@435: Node* GraphKit::insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent) { duke@435: // When Parse::do_put_xxx updates a volatile field, it appends a series duke@435: // of MemBarVolatile nodes, one for *each* volatile field alias category. duke@435: // The first membar is on the same memory slice as the field store opcode. duke@435: // This forces the membar to follow the store. (Bug 6500685 broke this.) duke@435: // All the other membars (for other volatile slices, including AliasIdxBot, duke@435: // which stands for all unknown volatile slices) are control-dependent duke@435: // on the first membar. This prevents later volatile loads or stores duke@435: // from sliding up past the just-emitted store. duke@435: duke@435: MemBarNode* mb = MemBarNode::make(C, opcode, alias_idx, precedent); duke@435: mb->set_req(TypeFunc::Control,control()); duke@435: if (alias_idx == Compile::AliasIdxBot) { duke@435: mb->set_req(TypeFunc::Memory, merged_memory()->base_memory()); duke@435: } else { duke@435: assert(!(opcode == Op_Initialize && alias_idx != Compile::AliasIdxRaw), "fix caller"); duke@435: mb->set_req(TypeFunc::Memory, memory(alias_idx)); duke@435: } duke@435: Node* membar = _gvn.transform(mb); duke@435: set_control(_gvn.transform(new (C, 1) ProjNode(membar, TypeFunc::Control))); duke@435: if (alias_idx == Compile::AliasIdxBot) { duke@435: merged_memory()->set_base_memory(_gvn.transform(new (C, 1) ProjNode(membar, TypeFunc::Memory))); duke@435: } else { duke@435: set_memory(_gvn.transform(new (C, 1) ProjNode(membar, TypeFunc::Memory)),alias_idx); duke@435: } duke@435: return membar; duke@435: } duke@435: duke@435: //------------------------------shared_lock------------------------------------ duke@435: // Emit locking code. duke@435: FastLockNode* GraphKit::shared_lock(Node* obj) { duke@435: // bci is either a monitorenter bc or InvocationEntryBci duke@435: // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces duke@435: assert(SynchronizationEntryBCI == InvocationEntryBci, ""); duke@435: duke@435: if( !GenerateSynchronizationCode ) duke@435: return NULL; // Not locking things? duke@435: if (stopped()) // Dead monitor? duke@435: return NULL; duke@435: duke@435: assert(dead_locals_are_killed(), "should kill locals before sync. point"); duke@435: duke@435: // Box the stack location duke@435: Node* box = _gvn.transform(new (C, 1) BoxLockNode(next_monitor())); duke@435: Node* mem = reset_memory(); duke@435: duke@435: FastLockNode * flock = _gvn.transform(new (C, 3) FastLockNode(0, obj, box) )->as_FastLock(); duke@435: if (PrintPreciseBiasedLockingStatistics) { duke@435: // Create the counters for this fast lock. duke@435: flock->create_lock_counter(sync_jvms()); // sync_jvms used to get current bci duke@435: } duke@435: // Add monitor to debug info for the slow path. If we block inside the duke@435: // slow path and de-opt, we need the monitor hanging around duke@435: map()->push_monitor( flock ); duke@435: duke@435: const TypeFunc *tf = LockNode::lock_type(); duke@435: LockNode *lock = new (C, tf->domain()->cnt()) LockNode(C, tf); duke@435: duke@435: lock->init_req( TypeFunc::Control, control() ); duke@435: lock->init_req( TypeFunc::Memory , mem ); duke@435: lock->init_req( TypeFunc::I_O , top() ) ; // does no i/o duke@435: lock->init_req( TypeFunc::FramePtr, frameptr() ); duke@435: lock->init_req( TypeFunc::ReturnAdr, top() ); duke@435: duke@435: lock->init_req(TypeFunc::Parms + 0, obj); duke@435: lock->init_req(TypeFunc::Parms + 1, box); duke@435: lock->init_req(TypeFunc::Parms + 2, flock); duke@435: add_safepoint_edges(lock); duke@435: duke@435: lock = _gvn.transform( lock )->as_Lock(); duke@435: duke@435: // lock has no side-effects, sets few values duke@435: set_predefined_output_for_runtime_call(lock, mem, TypeRawPtr::BOTTOM); duke@435: duke@435: insert_mem_bar(Op_MemBarAcquire); duke@435: duke@435: // Add this to the worklist so that the lock can be eliminated duke@435: record_for_igvn(lock); duke@435: duke@435: #ifndef PRODUCT duke@435: if (PrintLockStatistics) { duke@435: // Update the counter for this lock. Don't bother using an atomic duke@435: // operation since we don't require absolute accuracy. duke@435: lock->create_lock_counter(map()->jvms()); duke@435: int adr_type = Compile::AliasIdxRaw; duke@435: Node* counter_addr = makecon(TypeRawPtr::make(lock->counter()->addr())); duke@435: Node* cnt = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type); duke@435: Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1))); duke@435: store_to_memory(control(), counter_addr, incr, T_INT, adr_type); duke@435: } duke@435: #endif duke@435: duke@435: return flock; duke@435: } duke@435: duke@435: duke@435: //------------------------------shared_unlock---------------------------------- duke@435: // Emit unlocking code. duke@435: void GraphKit::shared_unlock(Node* box, Node* obj) { duke@435: // bci is either a monitorenter bc or InvocationEntryBci duke@435: // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces duke@435: assert(SynchronizationEntryBCI == InvocationEntryBci, ""); duke@435: duke@435: if( !GenerateSynchronizationCode ) duke@435: return; duke@435: if (stopped()) { // Dead monitor? duke@435: map()->pop_monitor(); // Kill monitor from debug info duke@435: return; duke@435: } duke@435: duke@435: // Memory barrier to avoid floating things down past the locked region duke@435: insert_mem_bar(Op_MemBarRelease); duke@435: duke@435: const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type(); duke@435: UnlockNode *unlock = new (C, tf->domain()->cnt()) UnlockNode(C, tf); duke@435: uint raw_idx = Compile::AliasIdxRaw; duke@435: unlock->init_req( TypeFunc::Control, control() ); duke@435: unlock->init_req( TypeFunc::Memory , memory(raw_idx) ); duke@435: unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o duke@435: unlock->init_req( TypeFunc::FramePtr, frameptr() ); duke@435: unlock->init_req( TypeFunc::ReturnAdr, top() ); duke@435: duke@435: unlock->init_req(TypeFunc::Parms + 0, obj); duke@435: unlock->init_req(TypeFunc::Parms + 1, box); duke@435: unlock = _gvn.transform(unlock)->as_Unlock(); duke@435: duke@435: Node* mem = reset_memory(); duke@435: duke@435: // unlock has no side-effects, sets few values duke@435: set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM); duke@435: duke@435: // Kill monitor from debug info duke@435: map()->pop_monitor( ); duke@435: } duke@435: duke@435: //-------------------------------get_layout_helper----------------------------- duke@435: // If the given klass is a constant or known to be an array, duke@435: // fetch the constant layout helper value into constant_value duke@435: // and return (Node*)NULL. Otherwise, load the non-constant duke@435: // layout helper value, and return the node which represents it. duke@435: // This two-faced routine is useful because allocation sites duke@435: // almost always feature constant types. duke@435: Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { duke@435: const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); duke@435: if (!StressReflectiveCode && inst_klass != NULL) { duke@435: ciKlass* klass = inst_klass->klass(); duke@435: bool xklass = inst_klass->klass_is_exact(); duke@435: if (xklass || klass->is_array_klass()) { duke@435: jint lhelper = klass->layout_helper(); duke@435: if (lhelper != Klass::_lh_neutral_value) { duke@435: constant_value = lhelper; duke@435: return (Node*) NULL; duke@435: } duke@435: } duke@435: } duke@435: constant_value = Klass::_lh_neutral_value; // put in a known value duke@435: Node* lhp = basic_plus_adr(klass_node, klass_node, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)); duke@435: return make_load(NULL, lhp, TypeInt::INT, T_INT); duke@435: } duke@435: duke@435: // We just put in an allocate/initialize with a big raw-memory effect. duke@435: // Hook selected additional alias categories on the initialization. duke@435: static void hook_memory_on_init(GraphKit& kit, int alias_idx, duke@435: MergeMemNode* init_in_merge, duke@435: Node* init_out_raw) { duke@435: DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory()); duke@435: assert(init_in_merge->memory_at(alias_idx) == init_in_raw, ""); duke@435: duke@435: Node* prevmem = kit.memory(alias_idx); duke@435: init_in_merge->set_memory_at(alias_idx, prevmem); duke@435: kit.set_memory(init_out_raw, alias_idx); duke@435: } duke@435: duke@435: //---------------------------set_output_for_allocation------------------------- duke@435: Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, duke@435: const TypeOopPtr* oop_type, duke@435: bool raw_mem_only) { duke@435: int rawidx = Compile::AliasIdxRaw; duke@435: alloc->set_req( TypeFunc::FramePtr, frameptr() ); duke@435: add_safepoint_edges(alloc); duke@435: Node* allocx = _gvn.transform(alloc); duke@435: set_control( _gvn.transform(new (C, 1) ProjNode(allocx, TypeFunc::Control) ) ); duke@435: // create memory projection for i_o duke@435: set_memory ( _gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx ); duke@435: make_slow_call_ex(allocx, env()->OutOfMemoryError_klass(), true); duke@435: duke@435: // create a memory projection as for the normal control path duke@435: Node* malloc = _gvn.transform(new (C, 1) ProjNode(allocx, TypeFunc::Memory)); duke@435: set_memory(malloc, rawidx); duke@435: duke@435: // a normal slow-call doesn't change i_o, but an allocation does duke@435: // we create a separate i_o projection for the normal control path duke@435: set_i_o(_gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::I_O, false) ) ); duke@435: Node* rawoop = _gvn.transform( new (C, 1) ProjNode(allocx, TypeFunc::Parms) ); duke@435: duke@435: // put in an initialization barrier duke@435: InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx, duke@435: rawoop)->as_Initialize(); duke@435: assert(alloc->initialization() == init, "2-way macro link must work"); duke@435: assert(init ->allocation() == alloc, "2-way macro link must work"); duke@435: if (ReduceFieldZeroing && !raw_mem_only) { duke@435: // Extract memory strands which may participate in the new object's duke@435: // initialization, and source them from the new InitializeNode. duke@435: // This will allow us to observe initializations when they occur, duke@435: // and link them properly (as a group) to the InitializeNode. duke@435: assert(init->in(InitializeNode::Memory) == malloc, ""); duke@435: MergeMemNode* minit_in = MergeMemNode::make(C, malloc); duke@435: init->set_req(InitializeNode::Memory, minit_in); duke@435: record_for_igvn(minit_in); // fold it up later, if possible duke@435: Node* minit_out = memory(rawidx); duke@435: assert(minit_out->is_Proj() && minit_out->in(0) == init, ""); duke@435: if (oop_type->isa_aryptr()) { duke@435: const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot); duke@435: int elemidx = C->get_alias_index(telemref); duke@435: hook_memory_on_init(*this, elemidx, minit_in, minit_out); duke@435: } else if (oop_type->isa_instptr()) { duke@435: ciInstanceKlass* ik = oop_type->klass()->as_instance_klass(); duke@435: for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) { duke@435: ciField* field = ik->nonstatic_field_at(i); kvn@458: if (field->offset() >= TrackedInitializationLimit * HeapWordSize) duke@435: continue; // do not bother to track really large numbers of fields duke@435: // Find (or create) the alias category for this field: duke@435: int fieldidx = C->alias_type(field)->index(); duke@435: hook_memory_on_init(*this, fieldidx, minit_in, minit_out); duke@435: } duke@435: } duke@435: } duke@435: duke@435: // Cast raw oop to the real thing... duke@435: Node* javaoop = new (C, 2) CheckCastPPNode(control(), rawoop, oop_type); duke@435: javaoop = _gvn.transform(javaoop); duke@435: C->set_recent_alloc(control(), javaoop); duke@435: assert(just_allocated_object(control()) == javaoop, "just allocated"); duke@435: duke@435: #ifdef ASSERT rasbold@801: { // Verify that the AllocateNode::Ideal_allocation recognizers work: rasbold@801: assert(AllocateNode::Ideal_allocation(rawoop, &_gvn) == alloc, rasbold@801: "Ideal_allocation works"); rasbold@801: assert(AllocateNode::Ideal_allocation(javaoop, &_gvn) == alloc, rasbold@801: "Ideal_allocation works"); duke@435: if (alloc->is_AllocateArray()) { rasbold@801: assert(AllocateArrayNode::Ideal_array_allocation(rawoop, &_gvn) == alloc->as_AllocateArray(), rasbold@801: "Ideal_allocation works"); rasbold@801: assert(AllocateArrayNode::Ideal_array_allocation(javaoop, &_gvn) == alloc->as_AllocateArray(), rasbold@801: "Ideal_allocation works"); duke@435: } else { rasbold@801: assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please"); duke@435: } duke@435: } duke@435: #endif //ASSERT duke@435: duke@435: return javaoop; duke@435: } duke@435: duke@435: //---------------------------new_instance-------------------------------------- duke@435: // This routine takes a klass_node which may be constant (for a static type) duke@435: // or may be non-constant (for reflective code). It will work equally well duke@435: // for either, and the graph will fold nicely if the optimizer later reduces duke@435: // the type to a constant. duke@435: // The optional arguments are for specialized use by intrinsics: duke@435: // - If 'extra_slow_test' if not null is an extra condition for the slow-path. duke@435: // - If 'raw_mem_only', do not cast the result to an oop. duke@435: // - If 'return_size_val', report the the total object size to the caller. duke@435: Node* GraphKit::new_instance(Node* klass_node, duke@435: Node* extra_slow_test, duke@435: bool raw_mem_only, // affect only raw memory duke@435: Node* *return_size_val) { duke@435: // Compute size in doublewords duke@435: // The size is always an integral number of doublewords, represented duke@435: // as a positive bytewise size stored in the klass's layout_helper. duke@435: // The layout_helper also encodes (in a low bit) the need for a slow path. duke@435: jint layout_con = Klass::_lh_neutral_value; duke@435: Node* layout_val = get_layout_helper(klass_node, layout_con); duke@435: int layout_is_con = (layout_val == NULL); duke@435: duke@435: if (extra_slow_test == NULL) extra_slow_test = intcon(0); duke@435: // Generate the initial go-slow test. It's either ALWAYS (return a duke@435: // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective duke@435: // case) a computed value derived from the layout_helper. duke@435: Node* initial_slow_test = NULL; duke@435: if (layout_is_con) { duke@435: assert(!StressReflectiveCode, "stress mode does not use these paths"); duke@435: bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con); duke@435: initial_slow_test = must_go_slow? intcon(1): extra_slow_test; duke@435: duke@435: } else { // reflective case duke@435: // This reflective path is used by Unsafe.allocateInstance. duke@435: // (It may be stress-tested by specifying StressReflectiveCode.) duke@435: // Basically, we want to get into the VM is there's an illegal argument. duke@435: Node* bit = intcon(Klass::_lh_instance_slow_path_bit); duke@435: initial_slow_test = _gvn.transform( new (C, 3) AndINode(layout_val, bit) ); duke@435: if (extra_slow_test != intcon(0)) { duke@435: initial_slow_test = _gvn.transform( new (C, 3) OrINode(initial_slow_test, extra_slow_test) ); duke@435: } duke@435: // (Macro-expander will further convert this to a Bool, if necessary.) duke@435: } duke@435: duke@435: // Find the size in bytes. This is easy; it's the layout_helper. duke@435: // The size value must be valid even if the slow path is taken. duke@435: Node* size = NULL; duke@435: if (layout_is_con) { duke@435: size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con)); duke@435: } else { // reflective case duke@435: // This reflective path is used by clone and Unsafe.allocateInstance. duke@435: size = ConvI2X(layout_val); duke@435: duke@435: // Clear the low bits to extract layout_helper_size_in_bytes: duke@435: assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit"); duke@435: Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong)); duke@435: size = _gvn.transform( new (C, 3) AndXNode(size, mask) ); duke@435: } duke@435: if (return_size_val != NULL) { duke@435: (*return_size_val) = size; duke@435: } duke@435: duke@435: // This is a precise notnull oop of the klass. duke@435: // (Actually, it need not be precise if this is a reflective allocation.) duke@435: // It's what we cast the result to. duke@435: const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr(); duke@435: if (!tklass) tklass = TypeKlassPtr::OBJECT; duke@435: const TypeOopPtr* oop_type = tklass->as_instance_type(); duke@435: duke@435: // Now generate allocation code kvn@509: kvn@1000: // The entire memory state is needed for slow path of the allocation kvn@1000: // since GC and deoptimization can happened. kvn@1000: Node *mem = reset_memory(); kvn@1000: set_all_memory(mem); // Create new memory state kvn@509: duke@435: AllocateNode* alloc duke@435: = new (C, AllocateNode::ParmLimit) duke@435: AllocateNode(C, AllocateNode::alloc_type(), kvn@509: control(), mem, i_o(), duke@435: size, klass_node, duke@435: initial_slow_test); duke@435: duke@435: return set_output_for_allocation(alloc, oop_type, raw_mem_only); duke@435: } duke@435: duke@435: //-------------------------------new_array------------------------------------- duke@435: // helper for both newarray and anewarray duke@435: // The 'length' parameter is (obviously) the length of the array. duke@435: // See comments on new_instance for the meaning of the other arguments. duke@435: Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) duke@435: Node* length, // number of array elements duke@435: bool raw_mem_only, // affect only raw memory duke@435: Node* *return_size_val) { duke@435: jint layout_con = Klass::_lh_neutral_value; duke@435: Node* layout_val = get_layout_helper(klass_node, layout_con); duke@435: int layout_is_con = (layout_val == NULL); duke@435: duke@435: if (!layout_is_con && !StressReflectiveCode && duke@435: !too_many_traps(Deoptimization::Reason_class_check)) { duke@435: // This is a reflective array creation site. duke@435: // Optimistically assume that it is a subtype of Object[], duke@435: // so that we can fold up all the address arithmetic. duke@435: layout_con = Klass::array_layout_helper(T_OBJECT); duke@435: Node* cmp_lh = _gvn.transform( new(C, 3) CmpINode(layout_val, intcon(layout_con)) ); duke@435: Node* bol_lh = _gvn.transform( new(C, 2) BoolNode(cmp_lh, BoolTest::eq) ); duke@435: { BuildCutout unless(this, bol_lh, PROB_MAX); duke@435: uncommon_trap(Deoptimization::Reason_class_check, duke@435: Deoptimization::Action_maybe_recompile); duke@435: } duke@435: layout_val = NULL; duke@435: layout_is_con = true; duke@435: } duke@435: duke@435: // Generate the initial go-slow test. Make sure we do not overflow duke@435: // if length is huge (near 2Gig) or negative! We do not need duke@435: // exact double-words here, just a close approximation of needed duke@435: // double-words. We can't add any offset or rounding bits, lest we duke@435: // take a size -1 of bytes and make it positive. Use an unsigned duke@435: // compare, so negative sizes look hugely positive. duke@435: int fast_size_limit = FastAllocateSizeLimit; duke@435: if (layout_is_con) { duke@435: assert(!StressReflectiveCode, "stress mode does not use these paths"); duke@435: // Increase the size limit if we have exact knowledge of array type. duke@435: int log2_esize = Klass::layout_helper_log2_element_size(layout_con); duke@435: fast_size_limit <<= (LogBytesPerLong - log2_esize); duke@435: } duke@435: duke@435: Node* initial_slow_cmp = _gvn.transform( new (C, 3) CmpUNode( length, intcon( fast_size_limit ) ) ); duke@435: Node* initial_slow_test = _gvn.transform( new (C, 2) BoolNode( initial_slow_cmp, BoolTest::gt ) ); duke@435: if (initial_slow_test->is_Bool()) { duke@435: // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick. duke@435: initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn); duke@435: } duke@435: duke@435: // --- Size Computation --- duke@435: // array_size = round_to_heap(array_header + (length << elem_shift)); duke@435: // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes) duke@435: // and round_to(x, y) == ((x + y-1) & ~(y-1)) duke@435: // The rounding mask is strength-reduced, if possible. duke@435: int round_mask = MinObjAlignmentInBytes - 1; duke@435: Node* header_size = NULL; duke@435: int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); duke@435: // (T_BYTE has the weakest alignment and size restrictions...) duke@435: if (layout_is_con) { duke@435: int hsize = Klass::layout_helper_header_size(layout_con); duke@435: int eshift = Klass::layout_helper_log2_element_size(layout_con); duke@435: BasicType etype = Klass::layout_helper_element_type(layout_con); duke@435: if ((round_mask & ~right_n_bits(eshift)) == 0) duke@435: round_mask = 0; // strength-reduce it if it goes away completely duke@435: assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded"); duke@435: assert(header_size_min <= hsize, "generic minimum is smallest"); duke@435: header_size_min = hsize; duke@435: header_size = intcon(hsize + round_mask); duke@435: } else { duke@435: Node* hss = intcon(Klass::_lh_header_size_shift); duke@435: Node* hsm = intcon(Klass::_lh_header_size_mask); duke@435: Node* hsize = _gvn.transform( new(C, 3) URShiftINode(layout_val, hss) ); duke@435: hsize = _gvn.transform( new(C, 3) AndINode(hsize, hsm) ); duke@435: Node* mask = intcon(round_mask); duke@435: header_size = _gvn.transform( new(C, 3) AddINode(hsize, mask) ); duke@435: } duke@435: duke@435: Node* elem_shift = NULL; duke@435: if (layout_is_con) { duke@435: int eshift = Klass::layout_helper_log2_element_size(layout_con); duke@435: if (eshift != 0) duke@435: elem_shift = intcon(eshift); duke@435: } else { duke@435: // There is no need to mask or shift this value. duke@435: // The semantics of LShiftINode include an implicit mask to 0x1F. duke@435: assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); duke@435: elem_shift = layout_val; duke@435: } duke@435: duke@435: // Transition to native address size for all offset calculations: duke@435: Node* lengthx = ConvI2X(length); duke@435: Node* headerx = ConvI2X(header_size); duke@435: #ifdef _LP64 duke@435: { const TypeLong* tllen = _gvn.find_long_type(lengthx); duke@435: if (tllen != NULL && tllen->_lo < 0) { duke@435: // Add a manual constraint to a positive range. Cf. array_element_address. duke@435: jlong size_max = arrayOopDesc::max_array_length(T_BYTE); duke@435: if (size_max > tllen->_hi) size_max = tllen->_hi; duke@435: const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin); duke@435: lengthx = _gvn.transform( new (C, 2) ConvI2LNode(length, tlcon)); duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: // Combine header size (plus rounding) and body size. Then round down. duke@435: // This computation cannot overflow, because it is used only in two duke@435: // places, one where the length is sharply limited, and the other duke@435: // after a successful allocation. duke@435: Node* abody = lengthx; duke@435: if (elem_shift != NULL) duke@435: abody = _gvn.transform( new(C, 3) LShiftXNode(lengthx, elem_shift) ); duke@435: Node* size = _gvn.transform( new(C, 3) AddXNode(headerx, abody) ); duke@435: if (round_mask != 0) { duke@435: Node* mask = MakeConX(~round_mask); duke@435: size = _gvn.transform( new(C, 3) AndXNode(size, mask) ); duke@435: } duke@435: // else if round_mask == 0, the size computation is self-rounding duke@435: duke@435: if (return_size_val != NULL) { duke@435: // This is the size duke@435: (*return_size_val) = size; duke@435: } duke@435: duke@435: // Now generate allocation code kvn@509: kvn@1000: // The entire memory state is needed for slow path of the allocation kvn@1000: // since GC and deoptimization can happened. kvn@1000: Node *mem = reset_memory(); kvn@1000: set_all_memory(mem); // Create new memory state kvn@509: duke@435: // Create the AllocateArrayNode and its result projections duke@435: AllocateArrayNode* alloc duke@435: = new (C, AllocateArrayNode::ParmLimit) duke@435: AllocateArrayNode(C, AllocateArrayNode::alloc_type(), kvn@509: control(), mem, i_o(), duke@435: size, klass_node, duke@435: initial_slow_test, duke@435: length); duke@435: duke@435: // Cast to correct type. Note that the klass_node may be constant or not, duke@435: // and in the latter case the actual array type will be inexact also. duke@435: // (This happens via a non-constant argument to inline_native_newArray.) duke@435: // In any case, the value of klass_node provides the desired array type. duke@435: const TypeInt* length_type = _gvn.find_int_type(length); duke@435: const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type(); duke@435: if (ary_type->isa_aryptr() && length_type != NULL) { duke@435: // Try to get a better type than POS for the size duke@435: ary_type = ary_type->is_aryptr()->cast_to_size(length_type); duke@435: } duke@435: duke@435: Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only); duke@435: rasbold@801: // Cast length on remaining path to be as narrow as possible rasbold@801: if (map()->find_edge(length) >= 0) { rasbold@801: Node* ccast = alloc->make_ideal_length(ary_type, &_gvn); rasbold@801: if (ccast != length) { rasbold@801: _gvn.set_type_bottom(ccast); rasbold@801: record_for_igvn(ccast); duke@435: replace_in_map(length, ccast); duke@435: } duke@435: } duke@435: duke@435: return javaoop; duke@435: } duke@435: duke@435: // The following "Ideal_foo" functions are placed here because they recognize duke@435: // the graph shapes created by the functions immediately above. duke@435: duke@435: //---------------------------Ideal_allocation---------------------------------- duke@435: // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode. duke@435: AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) { duke@435: if (ptr == NULL) { // reduce dumb test in callers duke@435: return NULL; duke@435: } duke@435: if (ptr->is_CheckCastPP()) { // strip a raw-to-oop cast duke@435: ptr = ptr->in(1); duke@435: if (ptr == NULL) return NULL; duke@435: } duke@435: if (ptr->is_Proj()) { duke@435: Node* allo = ptr->in(0); duke@435: if (allo != NULL && allo->is_Allocate()) { duke@435: return allo->as_Allocate(); duke@435: } duke@435: } duke@435: // Report failure to match. duke@435: return NULL; duke@435: } duke@435: duke@435: // Fancy version which also strips off an offset (and reports it to caller). duke@435: AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase, duke@435: intptr_t& offset) { duke@435: Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset); duke@435: if (base == NULL) return NULL; duke@435: return Ideal_allocation(base, phase); duke@435: } duke@435: duke@435: // Trace Initialize <- Proj[Parm] <- Allocate duke@435: AllocateNode* InitializeNode::allocation() { duke@435: Node* rawoop = in(InitializeNode::RawAddress); duke@435: if (rawoop->is_Proj()) { duke@435: Node* alloc = rawoop->in(0); duke@435: if (alloc->is_Allocate()) { duke@435: return alloc->as_Allocate(); duke@435: } duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: // Trace Allocate -> Proj[Parm] -> Initialize duke@435: InitializeNode* AllocateNode::initialization() { duke@435: ProjNode* rawoop = proj_out(AllocateNode::RawAddress); duke@435: if (rawoop == NULL) return NULL; duke@435: for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) { duke@435: Node* init = rawoop->fast_out(i); duke@435: if (init->is_Initialize()) { duke@435: assert(init->as_Initialize()->allocation() == this, "2-way link"); duke@435: return init->as_Initialize(); duke@435: } duke@435: } duke@435: return NULL; duke@435: } ysr@777: ysr@777: void GraphKit::g1_write_barrier_pre(Node* obj, ysr@777: Node* adr, ysr@777: uint alias_idx, ysr@777: Node* val, ysr@777: const Type* val_type, ysr@777: BasicType bt) { ysr@777: IdealKit ideal(gvn(), control(), merged_memory(), true); ysr@777: #define __ ideal. ysr@777: __ declares_done(); ysr@777: ysr@777: Node* thread = __ thread(); ysr@777: ysr@777: Node* no_ctrl = NULL; ysr@777: Node* no_base = __ top(); ysr@777: Node* zero = __ ConI(0); ysr@777: ysr@777: float likely = PROB_LIKELY(0.999); ysr@777: float unlikely = PROB_UNLIKELY(0.999); ysr@777: ysr@777: BasicType active_type = in_bytes(PtrQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE; ysr@777: assert(in_bytes(PtrQueue::byte_width_of_active()) == 4 || in_bytes(PtrQueue::byte_width_of_active()) == 1, "flag width"); ysr@777: ysr@777: // Offsets into the thread ysr@777: const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 648 ysr@777: PtrQueue::byte_offset_of_active()); ysr@777: const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 656 ysr@777: PtrQueue::byte_offset_of_index()); ysr@777: const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + // 652 ysr@777: PtrQueue::byte_offset_of_buf()); ysr@777: // Now the actual pointers into the thread ysr@777: ysr@777: // set_control( ctl); ysr@777: ysr@777: Node* marking_adr = __ AddP(no_base, thread, __ ConX(marking_offset)); ysr@777: Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset)); ysr@777: Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset)); ysr@777: ysr@777: // Now some of the values ysr@777: never@979: Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); ysr@777: ysr@777: // if (!marking) ysr@777: __ if_then(marking, BoolTest::ne, zero); { never@979: Node* index = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); ysr@777: ysr@777: const Type* t1 = adr->bottom_type(); ysr@777: const Type* t2 = val->bottom_type(); ysr@777: ysr@777: Node* orig = __ load(no_ctrl, adr, val_type, bt, alias_idx); ysr@777: // if (orig != NULL) ysr@777: __ if_then(orig, BoolTest::ne, null()); { never@979: Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); ysr@777: ysr@777: // load original value ysr@777: // alias_idx correct?? ysr@777: ysr@777: // is the queue for this thread full? ysr@777: __ if_then(index, BoolTest::ne, zero, likely); { ysr@777: ysr@777: // decrement the index ysr@777: Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); ysr@777: Node* next_indexX = next_index; ysr@777: #ifdef _LP64 ysr@777: // We could refine the type for what it's worth ysr@777: // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); ysr@777: next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); ysr@777: #endif // _LP64 ysr@777: ysr@777: // Now get the buffer location we will log the original value into and store it ysr@777: ysr@777: Node *log_addr = __ AddP(no_base, buffer, next_indexX); ysr@777: // __ store(__ ctrl(), log_addr, orig, T_OBJECT, C->get_alias_index(TypeOopPtr::BOTTOM)); ysr@777: __ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw); ysr@777: ysr@777: ysr@777: // update the index ysr@777: // __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); ysr@777: // This is a hack to force this store to occur before the oop store that is coming up ysr@777: __ store(__ ctrl(), index_adr, next_index, T_INT, C->get_alias_index(TypeOopPtr::BOTTOM)); ysr@777: ysr@777: } __ else_(); { ysr@777: ysr@777: // logging buffer is full, call the runtime ysr@777: const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type(); ysr@777: // __ make_leaf_call(tf, OptoRuntime::g1_wb_pre_Java(), "g1_wb_pre", orig, thread); ysr@777: __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, thread); ysr@777: } __ end_if(); ysr@777: } __ end_if(); ysr@777: } __ end_if(); ysr@777: ysr@777: __ drain_delay_transform(); ysr@777: set_control( __ ctrl()); ysr@777: set_all_memory( __ merged_memory()); ysr@777: ysr@777: #undef __ ysr@777: } ysr@777: ysr@777: // ysr@777: // Update the card table and add card address to the queue ysr@777: // ysr@777: void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store, Node* index, Node* index_adr, Node* buffer, const TypeFunc* tf) { ysr@777: #define __ ideal-> ysr@777: Node* zero = __ ConI(0); ysr@777: Node* no_base = __ top(); ysr@777: BasicType card_bt = T_BYTE; ysr@777: // Smash zero into card. MUST BE ORDERED WRT TO STORE ysr@777: __ storeCM(__ ctrl(), card_adr, zero, store, card_bt, Compile::AliasIdxRaw); ysr@777: ysr@777: // Now do the queue work ysr@777: __ if_then(index, BoolTest::ne, zero); { ysr@777: ysr@777: Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t))); ysr@777: Node* next_indexX = next_index; ysr@777: #ifdef _LP64 ysr@777: // We could refine the type for what it's worth ysr@777: // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue); ysr@777: next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) ); ysr@777: #endif // _LP64 ysr@777: Node* log_addr = __ AddP(no_base, buffer, next_indexX); ysr@777: ysr@777: __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw); ysr@777: __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw); ysr@777: ysr@777: } __ else_(); { ysr@777: __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread()); ysr@777: } __ end_if(); ysr@777: #undef __ ysr@777: } ysr@777: ysr@777: void GraphKit::g1_write_barrier_post(Node* store, ysr@777: Node* obj, ysr@777: Node* adr, ysr@777: uint alias_idx, ysr@777: Node* val, ysr@777: BasicType bt, ysr@777: bool use_precise) { ysr@777: // If we are writing a NULL then we need no post barrier ysr@777: ysr@777: if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) { ysr@777: // Must be NULL ysr@777: const Type* t = val->bottom_type(); ysr@777: assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL"); ysr@777: // No post barrier if writing NULLx ysr@777: return; ysr@777: } ysr@777: ysr@777: if (!use_precise) { ysr@777: // All card marks for a (non-array) instance are in one place: ysr@777: adr = obj; ysr@777: } ysr@777: // (Else it's an array (or unknown), and we want more precise card marks.) ysr@777: assert(adr != NULL, ""); ysr@777: ysr@777: IdealKit ideal(gvn(), control(), merged_memory(), true); ysr@777: #define __ ideal. ysr@777: __ declares_done(); ysr@777: ysr@777: Node* thread = __ thread(); ysr@777: ysr@777: Node* no_ctrl = NULL; ysr@777: Node* no_base = __ top(); ysr@777: float likely = PROB_LIKELY(0.999); ysr@777: float unlikely = PROB_UNLIKELY(0.999); ysr@777: Node* zero = __ ConI(0); ysr@777: Node* zeroX = __ ConX(0); ysr@777: ysr@777: // Get the alias_index for raw card-mark memory ysr@777: const TypePtr* card_type = TypeRawPtr::BOTTOM; ysr@777: ysr@777: const TypeFunc *tf = OptoRuntime::g1_wb_post_Type(); ysr@777: ysr@777: // Offsets into the thread ysr@777: const int index_offset = in_bytes(JavaThread::dirty_card_queue_offset() + ysr@777: PtrQueue::byte_offset_of_index()); ysr@777: const int buffer_offset = in_bytes(JavaThread::dirty_card_queue_offset() + ysr@777: PtrQueue::byte_offset_of_buf()); ysr@777: ysr@777: // Pointers into the thread ysr@777: ysr@777: Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset)); ysr@777: Node* index_adr = __ AddP(no_base, thread, __ ConX(index_offset)); ysr@777: ysr@777: // Now some values ysr@777: ysr@777: Node* index = __ load(no_ctrl, index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw); ysr@777: Node* buffer = __ load(no_ctrl, buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw); ysr@777: ysr@777: ysr@777: // Convert the store obj pointer to an int prior to doing math on it ysr@777: // Use addr not obj gets accurate card marks ysr@777: ysr@777: // Node* cast = __ CastPX(no_ctrl, adr /* obj */); ysr@777: ysr@777: // Must use ctrl to prevent "integerized oop" existing across safepoint ysr@777: Node* cast = __ CastPX(__ ctrl(), ( use_precise ? adr : obj )); ysr@777: ysr@777: // Divide pointer by card size ysr@777: Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) ); ysr@777: ysr@777: // Combine card table base and card offset never@998: Node *card_adr = __ AddP(no_base, byte_map_base_node(), card_offset ); ysr@777: ysr@777: // If we know the value being stored does it cross regions? ysr@777: ysr@777: if (val != NULL) { ysr@777: // Does the store cause us to cross regions? ysr@777: ysr@777: // Should be able to do an unsigned compare of region_size instead of ysr@777: // and extra shift. Do we have an unsigned compare?? ysr@777: // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes); ysr@777: Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes)); ysr@777: ysr@777: // if (xor_res == 0) same region so skip ysr@777: __ if_then(xor_res, BoolTest::ne, zeroX); { ysr@777: ysr@777: // No barrier if we are storing a NULL ysr@777: __ if_then(val, BoolTest::ne, null(), unlikely); { ysr@777: ysr@777: // Ok must mark the card if not already dirty ysr@777: ysr@777: // load the original value of the card ysr@777: Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); ysr@777: ysr@777: __ if_then(card_val, BoolTest::ne, zero); { ysr@777: g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf); ysr@777: } __ end_if(); ysr@777: } __ end_if(); ysr@777: } __ end_if(); ysr@777: } else { ysr@777: g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf); ysr@777: } ysr@777: ysr@777: ysr@777: __ drain_delay_transform(); ysr@777: set_control( __ ctrl()); ysr@777: set_all_memory( __ merged_memory()); ysr@777: #undef __ ysr@777: ysr@777: }