Wed, 09 Dec 2009 16:40:45 -0800
6895383: JCK test throws NPE for method compiled with Escape Analysis
Summary: Add missing checks for MemBar nodes in EA.
Reviewed-by: never
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class FastLockNode;
26 class FastUnlockNode;
27 class IdealKit;
28 class Parse;
29 class RootNode;
31 //-----------------------------------------------------------------------------
32 //----------------------------GraphKit-----------------------------------------
33 // Toolkit for building the common sorts of subgraphs.
34 // Does not know about bytecode parsing or type-flow results.
35 // It is able to create graphs implementing the semantics of most
36 // or all bytecodes, so that it can expand intrinsics and calls.
37 // It may depend on JVMState structure, but it must not depend
38 // on specific bytecode streams.
39 class GraphKit : public Phase {
40 friend class PreserveJVMState;
42 protected:
43 ciEnv* _env; // Compilation environment
44 PhaseGVN &_gvn; // Some optimizations while parsing
45 SafePointNode* _map; // Parser map from JVM to Nodes
46 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
47 int _sp; // JVM Expression Stack Pointer
48 int _bci; // JVM Bytecode Pointer
49 ciMethod* _method; // JVM Current Method
51 private:
52 SafePointNode* map_not_null() const {
53 assert(_map != NULL, "must call stopped() to test for reset compiler map");
54 return _map;
55 }
57 public:
58 GraphKit(); // empty constructor
59 GraphKit(JVMState* jvms); // the JVM state on which to operate
61 #ifdef ASSERT
62 ~GraphKit() {
63 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
64 }
65 #endif
67 virtual Parse* is_Parse() const { return NULL; }
69 ciEnv* env() const { return _env; }
70 PhaseGVN& gvn() const { return _gvn; }
72 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
74 // Handy well-known nodes:
75 Node* null() const { return zerocon(T_OBJECT); }
76 Node* top() const { return C->top(); }
77 RootNode* root() const { return C->root(); }
79 // Create or find a constant node
80 Node* intcon(jint con) const { return _gvn.intcon(con); }
81 Node* longcon(jlong con) const { return _gvn.longcon(con); }
82 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
83 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
84 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
86 // Helper for byte_map_base
87 Node* byte_map_base_node() {
88 // Get base of card map
89 CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
90 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
91 if (ct->byte_map_base != NULL) {
92 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
93 } else {
94 return null();
95 }
96 }
98 jint find_int_con(Node* n, jint value_if_unknown) {
99 return _gvn.find_int_con(n, value_if_unknown);
100 }
101 jlong find_long_con(Node* n, jlong value_if_unknown) {
102 return _gvn.find_long_con(n, value_if_unknown);
103 }
104 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
106 // JVM State accessors:
107 // Parser mapping from JVM indices into Nodes.
108 // Low slots are accessed by the StartNode::enum.
109 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
110 // Then come JVM stack slots.
111 // Finally come the monitors, if any.
112 // See layout accessors in class JVMState.
114 SafePointNode* map() const { return _map; }
115 bool has_exceptions() const { return _exceptions != NULL; }
116 JVMState* jvms() const { return map_not_null()->_jvms; }
117 int sp() const { return _sp; }
118 int bci() const { return _bci; }
119 Bytecodes::Code java_bc() const;
120 ciMethod* method() const { return _method; }
122 void set_jvms(JVMState* jvms) { set_map(jvms->map());
123 assert(jvms == this->jvms(), "sanity");
124 _sp = jvms->sp();
125 _bci = jvms->bci();
126 _method = jvms->has_method() ? jvms->method() : NULL; }
127 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
128 void set_sp(int i) { assert(i >= 0, "must be non-negative"); _sp = i; }
129 void clean_stack(int from_sp); // clear garbage beyond from_sp to top
131 void inc_sp(int i) { set_sp(sp() + i); }
132 void set_bci(int bci) { _bci = bci; }
134 // Make sure jvms has current bci & sp.
135 JVMState* sync_jvms() const;
136 #ifdef ASSERT
137 // Make sure JVMS has an updated copy of bci and sp.
138 // Also sanity-check method, depth, and monitor depth.
139 bool jvms_in_sync() const;
141 // Make sure the map looks OK.
142 void verify_map() const;
144 // Make sure a proposed exception state looks OK.
145 static void verify_exception_state(SafePointNode* ex_map);
146 #endif
148 // Clone the existing map state. (Implements PreserveJVMState.)
149 SafePointNode* clone_map();
151 // Set the map to a clone of the given one.
152 void set_map_clone(SafePointNode* m);
154 // Tell if the compilation is failing.
155 bool failing() const { return C->failing(); }
157 // Set _map to NULL, signalling a stop to further bytecode execution.
158 // Preserve the map intact for future use, and return it back to the caller.
159 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
161 // Stop, but first smash the map's inputs to NULL, to mark it dead.
162 void stop_and_kill_map();
164 // Tell if _map is NULL, or control is top.
165 bool stopped();
167 // Tell if this method or any caller method has exception handlers.
168 bool has_ex_handler();
170 // Save an exception without blowing stack contents or other JVM state.
171 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
172 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
174 // Recover a saved exception from its map.
175 static Node* saved_ex_oop(SafePointNode* ex_map);
177 // Recover a saved exception from its map, and remove it from the map.
178 static Node* clear_saved_ex_oop(SafePointNode* ex_map);
180 #ifdef ASSERT
181 // Recover a saved exception from its map, and remove it from the map.
182 static bool has_saved_ex_oop(SafePointNode* ex_map);
183 #endif
185 // Push an exception in the canonical position for handlers (stack(0)).
186 void push_ex_oop(Node* ex_oop) {
187 ensure_stack(1); // ensure room to push the exception
188 set_stack(0, ex_oop);
189 set_sp(1);
190 clean_stack(1);
191 }
193 // Detach and return an exception state.
194 SafePointNode* pop_exception_state() {
195 SafePointNode* ex_map = _exceptions;
196 if (ex_map != NULL) {
197 _exceptions = ex_map->next_exception();
198 ex_map->set_next_exception(NULL);
199 debug_only(verify_exception_state(ex_map));
200 }
201 return ex_map;
202 }
204 // Add an exception, using the given JVM state, without commoning.
205 void push_exception_state(SafePointNode* ex_map) {
206 debug_only(verify_exception_state(ex_map));
207 ex_map->set_next_exception(_exceptions);
208 _exceptions = ex_map;
209 }
211 // Turn the current JVM state into an exception state, appending the ex_oop.
212 SafePointNode* make_exception_state(Node* ex_oop);
214 // Add an exception, using the given JVM state.
215 // Combine all exceptions with a common exception type into a single state.
216 // (This is done via combine_exception_states.)
217 void add_exception_state(SafePointNode* ex_map);
219 // Combine all exceptions of any sort whatever into a single master state.
220 SafePointNode* combine_and_pop_all_exception_states() {
221 if (_exceptions == NULL) return NULL;
222 SafePointNode* phi_map = pop_exception_state();
223 SafePointNode* ex_map;
224 while ((ex_map = pop_exception_state()) != NULL) {
225 combine_exception_states(ex_map, phi_map);
226 }
227 return phi_map;
228 }
230 // Combine the two exception states, building phis as necessary.
231 // The second argument is updated to include contributions from the first.
232 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
234 // Reset the map to the given state. If there are any half-finished phis
235 // in it (created by combine_exception_states), transform them now.
236 // Returns the exception oop. (Caller must call push_ex_oop if required.)
237 Node* use_exception_state(SafePointNode* ex_map);
239 // Collect exceptions from a given JVM state into my exception list.
240 void add_exception_states_from(JVMState* jvms);
242 // Collect all raised exceptions into the current JVM state.
243 // Clear the current exception list and map, returns the combined states.
244 JVMState* transfer_exceptions_into_jvms();
246 // Helper to throw a built-in exception.
247 // Range checks take the offending index.
248 // Cast and array store checks take the offending class.
249 // Others do not take the optional argument.
250 // The JVMS must allow the bytecode to be re-executed
251 // via an uncommon trap.
252 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
254 // Helper Functions for adding debug information
255 void kill_dead_locals();
256 #ifdef ASSERT
257 bool dead_locals_are_killed();
258 #endif
259 // The call may deoptimize. Supply required JVM state as debug info.
260 // If must_throw is true, the call is guaranteed not to return normally.
261 void add_safepoint_edges(SafePointNode* call,
262 bool must_throw = false);
264 // How many stack inputs does the current BC consume?
265 // And, how does the stack change after the bytecode?
266 // Returns false if unknown.
267 bool compute_stack_effects(int& inputs, int& depth);
269 // Add a fixed offset to a pointer
270 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
271 return basic_plus_adr(base, ptr, MakeConX(offset));
272 }
273 Node* basic_plus_adr(Node* base, intptr_t offset) {
274 return basic_plus_adr(base, base, MakeConX(offset));
275 }
276 // Add a variable offset to a pointer
277 Node* basic_plus_adr(Node* base, Node* offset) {
278 return basic_plus_adr(base, base, offset);
279 }
280 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
283 // Some convenient shortcuts for common nodes
284 Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C,1) IfTrueNode(iff)); }
285 Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C,1) IfFalseNode(iff)); }
287 Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C,3) AddINode(l, r)); }
288 Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C,3) SubINode(l, r)); }
289 Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C,3) MulINode(l, r)); }
290 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C,3) DivINode(ctl, l, r)); }
292 Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C,3) AndINode(l, r)); }
293 Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C,3) OrINode(l, r)); }
294 Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C,3) XorINode(l, r)); }
296 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C,3) MaxINode(l, r)); }
297 Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C,3) MinINode(l, r)); }
299 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) LShiftINode(l, r)); }
300 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) RShiftINode(l, r)); }
301 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C,3) URShiftINode(l, r)); }
303 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpINode(l, r)); }
304 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpLNode(l, r)); }
305 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C,3) CmpPNode(l, r)); }
306 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C,2) BoolNode(cmp, relop)); }
308 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C,4) AddPNode(b, a, o)); }
310 // Convert between int and long, and size_t.
311 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
312 Node* ConvI2L(Node* offset);
313 Node* ConvL2I(Node* offset);
314 // Find out the klass of an object.
315 Node* load_object_klass(Node* object);
316 // Find out the length of an array.
317 Node* load_array_length(Node* array);
318 // Helper function to do a NULL pointer check or ZERO check based on type.
319 Node* null_check_common(Node* value, BasicType type,
320 bool assert_null, Node* *null_control);
321 // Throw an exception if a given value is null.
322 // Return the value cast to not-null.
323 // Be clever about equivalent dominating null checks.
324 Node* do_null_check(Node* value, BasicType type) {
325 return null_check_common(value, type, false, NULL);
326 }
327 // Throw an uncommon trap if a given value is __not__ null.
328 // Return the value cast to null, and be clever about dominating checks.
329 Node* do_null_assert(Node* value, BasicType type) {
330 return null_check_common(value, type, true, NULL);
331 }
332 // Null check oop. Return null-path control into (*null_control).
333 // Return a cast-not-null node which depends on the not-null control.
334 // If never_see_null, use an uncommon trap (*null_control sees a top).
335 // The cast is not valid along the null path; keep a copy of the original.
336 Node* null_check_oop(Node* value, Node* *null_control,
337 bool never_see_null = false);
339 // Cast obj to not-null on this path
340 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
341 // Replace all occurrences of one node by another.
342 void replace_in_map(Node* old, Node* neww);
344 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms,_sp++,n); }
345 Node* pop() { map_not_null(); return _map->stack(_map->_jvms,--_sp); }
346 Node* peek(int off=0) { map_not_null(); return _map->stack(_map->_jvms, _sp - off - 1); }
348 void push_pair(Node* ldval) {
349 push(ldval);
350 push(top()); // the halfword is merely a placeholder
351 }
352 void push_pair_local(int i) {
353 // longs are stored in locals in "push" order
354 push( local(i+0) ); // the real value
355 assert(local(i+1) == top(), "");
356 push(top()); // halfword placeholder
357 }
358 Node* pop_pair() {
359 // the second half is pushed last & popped first; it contains exactly nothing
360 Node* halfword = pop();
361 assert(halfword == top(), "");
362 // the long bits are pushed first & popped last:
363 return pop();
364 }
365 void set_pair_local(int i, Node* lval) {
366 // longs are stored in locals as a value/half pair (like doubles)
367 set_local(i+0, lval);
368 set_local(i+1, top());
369 }
371 // Push the node, which may be zero, one, or two words.
372 void push_node(BasicType n_type, Node* n) {
373 int n_size = type2size[n_type];
374 if (n_size == 1) push( n ); // T_INT, ...
375 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
376 else { assert(n_size == 0, "must be T_VOID"); }
377 }
379 Node* pop_node(BasicType n_type) {
380 int n_size = type2size[n_type];
381 if (n_size == 1) return pop();
382 else if (n_size == 2) return pop_pair();
383 else return NULL;
384 }
386 Node* control() const { return map_not_null()->control(); }
387 Node* i_o() const { return map_not_null()->i_o(); }
388 Node* returnadr() const { return map_not_null()->returnadr(); }
389 Node* frameptr() const { return map_not_null()->frameptr(); }
390 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
391 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
392 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
393 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
394 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
396 void set_control (Node* c) { map_not_null()->set_control(c); }
397 void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
398 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
399 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
400 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
401 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
403 // Access unaliased memory
404 Node* memory(uint alias_idx);
405 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
406 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
408 // Access immutable memory
409 Node* immutable_memory() { return C->immutable_memory(); }
411 // Set unaliased memory
412 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
413 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
414 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
416 // Get the entire memory state (probably a MergeMemNode), and reset it
417 // (The resetting prevents somebody from using the dangling Node pointer.)
418 Node* reset_memory();
420 // Get the entire memory state, asserted to be a MergeMemNode.
421 MergeMemNode* merged_memory() {
422 Node* mem = map_not_null()->memory();
423 assert(mem->is_MergeMem(), "parse memory is always pre-split");
424 return mem->as_MergeMem();
425 }
427 // Set the entire memory state; produce a new MergeMemNode.
428 void set_all_memory(Node* newmem);
430 // Create a memory projection from the call, then set_all_memory.
431 void set_all_memory_call(Node* call, bool separate_io_proj = false);
433 // Create a LoadNode, reading from the parser's memory state.
434 // (Note: require_atomic_access is useful only with T_LONG.)
435 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
436 bool require_atomic_access = false) {
437 // This version computes alias_index from bottom_type
438 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
439 require_atomic_access);
440 }
441 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
442 // This version computes alias_index from an address type
443 assert(adr_type != NULL, "use other make_load factory");
444 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
445 require_atomic_access);
446 }
447 // This is the base version which is given an alias index.
448 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);
450 // Create & transform a StoreNode and store the effect into the
451 // parser's memory state.
452 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
453 const TypePtr* adr_type,
454 bool require_atomic_access = false) {
455 // This version computes alias_index from an address type
456 assert(adr_type != NULL, "use other store_to_memory factory");
457 return store_to_memory(ctl, adr, val, bt,
458 C->get_alias_index(adr_type),
459 require_atomic_access);
460 }
461 // This is the base version which is given alias index
462 // Return the new StoreXNode
463 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
464 int adr_idx,
465 bool require_atomic_access = false);
468 // All in one pre-barrier, store, post_barrier
469 // Insert a write-barrier'd store. This is to let generational GC
470 // work; we have to flag all oop-stores before the next GC point.
471 //
472 // It comes in 3 flavors of store to an object, array, or unknown.
473 // We use precise card marks for arrays to avoid scanning the entire
474 // array. We use imprecise for object. We use precise for unknown
475 // since we don't know if we have an array or and object or even
476 // where the object starts.
477 //
478 // If val==NULL, it is taken to be a completely unknown value. QQQ
480 Node* store_oop(Node* ctl,
481 Node* obj, // containing obj
482 Node* adr, // actual adress to store val at
483 const TypePtr* adr_type,
484 Node* val,
485 const TypeOopPtr* val_type,
486 BasicType bt,
487 bool use_precise);
489 Node* store_oop_to_object(Node* ctl,
490 Node* obj, // containing obj
491 Node* adr, // actual adress to store val at
492 const TypePtr* adr_type,
493 Node* val,
494 const TypeOopPtr* val_type,
495 BasicType bt) {
496 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
497 }
499 Node* store_oop_to_array(Node* ctl,
500 Node* obj, // containing obj
501 Node* adr, // actual adress to store val at
502 const TypePtr* adr_type,
503 Node* val,
504 const TypeOopPtr* val_type,
505 BasicType bt) {
506 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
507 }
509 // Could be an array or object we don't know at compile time (unsafe ref.)
510 Node* store_oop_to_unknown(Node* ctl,
511 Node* obj, // containing obj
512 Node* adr, // actual adress to store val at
513 const TypePtr* adr_type,
514 Node* val,
515 BasicType bt);
517 // For the few case where the barriers need special help
518 void pre_barrier(Node* ctl, Node* obj, Node* adr, uint adr_idx,
519 Node* val, const TypeOopPtr* val_type, BasicType bt);
521 void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
522 Node* val, BasicType bt, bool use_precise);
524 // Return addressing for an array element.
525 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
526 // Optional constraint on the array size:
527 const TypeInt* sizetype = NULL);
529 // Return a load of array element at idx.
530 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
532 //---------------- Dtrace support --------------------
533 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
534 void make_dtrace_method_entry(ciMethod* method) {
535 make_dtrace_method_entry_exit(method, true);
536 }
537 void make_dtrace_method_exit(ciMethod* method) {
538 make_dtrace_method_entry_exit(method, false);
539 }
541 //--------------- stub generation -------------------
542 public:
543 void gen_stub(address C_function,
544 const char *name,
545 int is_fancy_jump,
546 bool pass_tls,
547 bool return_pc);
549 //---------- help for generating calls --------------
551 // Do a null check on the receiver, which is in argument(0).
552 Node* null_check_receiver(ciMethod* callee) {
553 assert(!callee->is_static(), "must be a virtual method");
554 int nargs = 1 + callee->signature()->size();
555 // Null check on self without removing any arguments. The argument
556 // null check technically happens in the wrong place, which can lead to
557 // invalid stack traces when the primitive is inlined into a method
558 // which handles NullPointerExceptions.
559 Node* receiver = argument(0);
560 _sp += nargs;
561 receiver = do_null_check(receiver, T_OBJECT);
562 _sp -= nargs;
563 return receiver;
564 }
566 // Fill in argument edges for the call from argument(0), argument(1), ...
567 // (The next step is to call set_edges_for_java_call.)
568 void set_arguments_for_java_call(CallJavaNode* call);
570 // Fill in non-argument edges for the call.
571 // Transform the call, and update the basics: control, i_o, memory.
572 // (The next step is usually to call set_results_for_java_call.)
573 void set_edges_for_java_call(CallJavaNode* call,
574 bool must_throw = false, bool separate_io_proj = false);
576 // Finish up a java call that was started by set_edges_for_java_call.
577 // Call add_exception on any throw arising from the call.
578 // Return the call result (transformed).
579 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
581 // Similar to set_edges_for_java_call, but simplified for runtime calls.
582 void set_predefined_output_for_runtime_call(Node* call) {
583 set_predefined_output_for_runtime_call(call, NULL, NULL);
584 }
585 void set_predefined_output_for_runtime_call(Node* call,
586 Node* keep_mem,
587 const TypePtr* hook_mem);
588 Node* set_predefined_input_for_runtime_call(SafePointNode* call);
590 // Replace the call with the current state of the kit. Requires
591 // that the call was generated with separate io_projs so that
592 // exceptional control flow can be handled properly.
593 void replace_call(CallNode* call, Node* result);
595 // helper functions for statistics
596 void increment_counter(address counter_addr); // increment a debug counter
597 void increment_counter(Node* counter_addr); // increment a debug counter
599 // Bail out to the interpreter right now
600 // The optional klass is the one causing the trap.
601 // The optional reason is debug information written to the compile log.
602 // Optional must_throw is the same as with add_safepoint_edges.
603 void uncommon_trap(int trap_request,
604 ciKlass* klass = NULL, const char* reason_string = NULL,
605 bool must_throw = false, bool keep_exact_action = false);
607 // Shorthand, to avoid saying "Deoptimization::" so many times.
608 void uncommon_trap(Deoptimization::DeoptReason reason,
609 Deoptimization::DeoptAction action,
610 ciKlass* klass = NULL, const char* reason_string = NULL,
611 bool must_throw = false, bool keep_exact_action = false) {
612 uncommon_trap(Deoptimization::make_trap_request(reason, action),
613 klass, reason_string, must_throw, keep_exact_action);
614 }
616 // Report if there were too many traps at the current method and bci.
617 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
618 // If there is no MDO at all, report no trap unless told to assume it.
619 bool too_many_traps(Deoptimization::DeoptReason reason) {
620 return C->too_many_traps(method(), bci(), reason);
621 }
623 // Report if there were too many recompiles at the current method and bci.
624 bool too_many_recompiles(Deoptimization::DeoptReason reason) {
625 return C->too_many_recompiles(method(), bci(), reason);
626 }
628 // Returns the object (if any) which was created the moment before.
629 Node* just_allocated_object(Node* current_control);
631 static bool use_ReduceInitialCardMarks() {
632 return (ReduceInitialCardMarks
633 && Universe::heap()->can_elide_tlab_store_barriers());
634 }
636 void sync_kit(IdealKit& ideal);
638 // vanilla/CMS post barrier
639 void write_barrier_post(Node *store, Node* obj,
640 Node* adr, uint adr_idx, Node* val, bool use_precise);
642 // G1 pre/post barriers
643 void g1_write_barrier_pre(Node* obj,
644 Node* adr,
645 uint alias_idx,
646 Node* val,
647 const TypeOopPtr* val_type,
648 BasicType bt);
650 void g1_write_barrier_post(Node* store,
651 Node* obj,
652 Node* adr,
653 uint alias_idx,
654 Node* val,
655 BasicType bt,
656 bool use_precise);
657 // Helper function for g1
658 private:
659 void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
660 Node* index, Node* index_adr,
661 Node* buffer, const TypeFunc* tf);
663 public:
664 // Helper function to round double arguments before a call
665 void round_double_arguments(ciMethod* dest_method);
666 void round_double_result(ciMethod* dest_method);
668 // rounding for strict float precision conformance
669 Node* precision_rounding(Node* n);
671 // rounding for strict double precision conformance
672 Node* dprecision_rounding(Node* n);
674 // rounding for non-strict double stores
675 Node* dstore_rounding(Node* n);
677 // Helper functions for fast/slow path codes
678 Node* opt_iff(Node* region, Node* iff);
679 Node* make_runtime_call(int flags,
680 const TypeFunc* call_type, address call_addr,
681 const char* call_name,
682 const TypePtr* adr_type, // NULL if no memory effects
683 Node* parm0 = NULL, Node* parm1 = NULL,
684 Node* parm2 = NULL, Node* parm3 = NULL,
685 Node* parm4 = NULL, Node* parm5 = NULL,
686 Node* parm6 = NULL, Node* parm7 = NULL);
687 enum { // flag values for make_runtime_call
688 RC_NO_FP = 1, // CallLeafNoFPNode
689 RC_NO_IO = 2, // do not hook IO edges
690 RC_NO_LEAF = 4, // CallStaticJavaNode
691 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
692 RC_NARROW_MEM = 16, // input memory is same as output
693 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
694 RC_LEAF = 0 // null value: no flags set
695 };
697 // merge in all memory slices from new_mem, along the given path
698 void merge_memory(Node* new_mem, Node* region, int new_path);
699 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj);
701 // Helper functions to build synchronizations
702 int next_monitor();
703 Node* insert_mem_bar(int opcode, Node* precedent = NULL);
704 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
705 // Optional 'precedent' is appended as an extra edge, to force ordering.
706 FastLockNode* shared_lock(Node* obj);
707 void shared_unlock(Node* box, Node* obj);
709 // helper functions for the fast path/slow path idioms
710 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, klassOop ex_klass, Node* slow_result);
712 // Generate an instance-of idiom. Used by both the instance-of bytecode
713 // and the reflective instance-of call.
714 Node* gen_instanceof( Node *subobj, Node* superkls );
716 // Generate a check-cast idiom. Used by both the check-cast bytecode
717 // and the array-store bytecode
718 Node* gen_checkcast( Node *subobj, Node* superkls,
719 Node* *failure_control = NULL );
721 // Generate a subtyping check. Takes as input the subtype and supertype.
722 // Returns 2 values: sets the default control() to the true path and
723 // returns the false path. Only reads from constant memory taken from the
724 // default memory; does not write anything. It also doesn't take in an
725 // Object; if you wish to check an Object you need to load the Object's
726 // class prior to coming here.
727 Node* gen_subtype_check(Node* subklass, Node* superklass);
729 // Static parse-time type checking logic for gen_subtype_check:
730 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
731 int static_subtype_check(ciKlass* superk, ciKlass* subk);
733 // Exact type check used for predicted calls and casts.
734 // Rewrites (*casted_receiver) to be casted to the stronger type.
735 // (Caller is responsible for doing replace_in_map.)
736 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
737 Node* *casted_receiver);
739 // implementation of object creation
740 Node* set_output_for_allocation(AllocateNode* alloc,
741 const TypeOopPtr* oop_type,
742 bool raw_mem_only);
743 Node* get_layout_helper(Node* klass_node, jint& constant_value);
744 Node* new_instance(Node* klass_node,
745 Node* slow_test = NULL,
746 bool raw_mem_only = false,
747 Node* *return_size_val = NULL);
748 Node* new_array(Node* klass_node, Node* count_val, int nargs,
749 bool raw_mem_only = false, Node* *return_size_val = NULL);
751 // Handy for making control flow
752 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
753 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
754 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
755 // Place 'if' on worklist if it will be in graph
756 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
757 return iff;
758 }
760 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
761 IfNode* iff = new (C, 2) IfNode(ctrl, tst, prob, cnt);// New IfNode's
762 _gvn.transform(iff); // Value may be known at parse-time
763 // Place 'if' on worklist if it will be in graph
764 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
765 return iff;
766 }
767 };
769 // Helper class to support building of control flow branches. Upon
770 // creation the map and sp at bci are cloned and restored upon de-
771 // struction. Typical use:
772 //
773 // { PreserveJVMState pjvms(this);
774 // // code of new branch
775 // }
776 // // here the JVM state at bci is established
778 class PreserveJVMState: public StackObj {
779 protected:
780 GraphKit* _kit;
781 #ifdef ASSERT
782 int _block; // PO of current block, if a Parse
783 int _bci;
784 #endif
785 SafePointNode* _map;
786 uint _sp;
788 public:
789 PreserveJVMState(GraphKit* kit, bool clone_map = true);
790 ~PreserveJVMState();
791 };
793 // Helper class to build cutouts of the form if (p) ; else {x...}.
794 // The code {x...} must not fall through.
795 // The kit's main flow of control is set to the "then" continuation of if(p).
796 class BuildCutout: public PreserveJVMState {
797 public:
798 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
799 ~BuildCutout();
800 };
802 // Helper class to preserve the original _reexecute bit and _sp and restore
803 // them back
804 class PreserveReexecuteState: public StackObj {
805 protected:
806 GraphKit* _kit;
807 uint _sp;
808 JVMState::ReexecuteState _reexecute;
810 public:
811 PreserveReexecuteState(GraphKit* kit);
812 ~PreserveReexecuteState();
813 };