Mon, 25 May 2020 14:24:27 +0800
8244407: JVM crashes after transformation in C2 IdealLoopTree::split_fall_in
Reviewed-by: thartmann, kvn, andrew
Contributed-by: zhouyong44@huawei.com
1 /*
2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "interpreter/linkResolver.hpp"
31 #include "memory/universe.inline.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/matcher.hpp"
36 #include "opto/memnode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/runtime.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/sharedRuntime.hpp"
43 extern int explicit_null_checks_inserted,
44 explicit_null_checks_elided;
46 //---------------------------------array_load----------------------------------
47 void Parse::array_load(BasicType elem_type) {
48 const Type* elem = Type::TOP;
49 Node* adr = array_addressing(elem_type, 0, &elem);
50 if (stopped()) return; // guaranteed null or range check
51 dec_sp(2); // Pop array and index
52 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
53 Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered);
54 push(ld);
55 }
58 //--------------------------------array_store----------------------------------
59 void Parse::array_store(BasicType elem_type) {
60 const Type* elem = Type::TOP;
61 Node* adr = array_addressing(elem_type, 1, &elem);
62 if (stopped()) return; // guaranteed null or range check
63 Node* val = pop();
64 dec_sp(2); // Pop array and index
65 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
66 if (elem == TypeInt::BOOL) {
67 elem_type = T_BOOLEAN;
68 }
69 store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type));
70 }
73 //------------------------------array_addressing-------------------------------
74 // Pull array and index from the stack. Compute pointer-to-element.
75 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
76 Node *idx = peek(0+vals); // Get from stack without popping
77 Node *ary = peek(1+vals); // in case of exception
79 // Null check the array base, with correct stack contents
80 ary = null_check(ary, T_ARRAY);
81 // Compile-time detect of null-exception?
82 if (stopped()) return top();
84 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
85 const TypeInt* sizetype = arytype->size();
86 const Type* elemtype = arytype->elem();
88 if (UseUniqueSubclasses && result2 != NULL) {
89 const Type* el = elemtype->make_ptr();
90 if (el && el->isa_instptr()) {
91 const TypeInstPtr* toop = el->is_instptr();
92 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
93 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
94 const Type* subklass = Type::get_const_type(toop->klass());
95 elemtype = subklass->join_speculative(el);
96 }
97 }
98 }
100 // Check for big class initializers with all constant offsets
101 // feeding into a known-size array.
102 const TypeInt* idxtype = _gvn.type(idx)->is_int();
103 // See if the highest idx value is less than the lowest array bound,
104 // and if the idx value cannot be negative:
105 bool need_range_check = true;
106 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
107 need_range_check = false;
108 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
109 }
111 ciKlass * arytype_klass = arytype->klass();
112 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) {
113 // Only fails for some -Xcomp runs
114 // The class is unloaded. We have to run this bytecode in the interpreter.
115 uncommon_trap(Deoptimization::Reason_unloaded,
116 Deoptimization::Action_reinterpret,
117 arytype->klass(), "!loaded array");
118 return top();
119 }
121 // Do the range check
122 if (GenerateRangeChecks && need_range_check) {
123 Node* tst;
124 if (sizetype->_hi <= 0) {
125 // The greatest array bound is negative, so we can conclude that we're
126 // compiling unreachable code, but the unsigned compare trick used below
127 // only works with non-negative lengths. Instead, hack "tst" to be zero so
128 // the uncommon_trap path will always be taken.
129 tst = _gvn.intcon(0);
130 } else {
131 // Range is constant in array-oop, so we can use the original state of mem
132 Node* len = load_array_length(ary);
134 // Test length vs index (standard trick using unsigned compare)
135 Node* chk = _gvn.transform( new (C) CmpUNode(idx, len) );
136 BoolTest::mask btest = BoolTest::lt;
137 tst = _gvn.transform( new (C) BoolNode(chk, btest) );
138 }
139 // Branch to failure if out of bounds
140 { BuildCutout unless(this, tst, PROB_MAX);
141 if (C->allow_range_check_smearing()) {
142 // Do not use builtin_throw, since range checks are sometimes
143 // made more stringent by an optimistic transformation.
144 // This creates "tentative" range checks at this point,
145 // which are not guaranteed to throw exceptions.
146 // See IfNode::Ideal, is_range_check, adjust_check.
147 uncommon_trap(Deoptimization::Reason_range_check,
148 Deoptimization::Action_make_not_entrant,
149 NULL, "range_check");
150 } else {
151 // If we have already recompiled with the range-check-widening
152 // heroic optimization turned off, then we must really be throwing
153 // range check exceptions.
154 builtin_throw(Deoptimization::Reason_range_check, idx);
155 }
156 }
157 }
158 // Check for always knowing you are throwing a range-check exception
159 if (stopped()) return top();
161 // Make array address computation control dependent to prevent it
162 // from floating above the range check during loop optimizations.
163 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
165 if (result2 != NULL) *result2 = elemtype;
167 assert(ptr != top(), "top should go hand-in-hand with stopped");
169 return ptr;
170 }
173 // returns IfNode
174 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
175 Node *cmp = _gvn.transform( new (C) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
176 Node *tst = _gvn.transform( new (C) BoolNode( cmp, mask));
177 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
178 return iff;
179 }
181 // return Region node
182 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
183 Node *region = new (C) RegionNode(3); // 2 results
184 record_for_igvn(region);
185 region->init_req(1, iffalse);
186 region->init_req(2, iftrue );
187 _gvn.set_type(region, Type::CONTROL);
188 region = _gvn.transform(region);
189 set_control (region);
190 return region;
191 }
194 //------------------------------helper for tableswitch-------------------------
195 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
196 // True branch, use existing map info
197 { PreserveJVMState pjvms(this);
198 Node *iftrue = _gvn.transform( new (C) IfTrueNode (iff) );
199 set_control( iftrue );
200 profile_switch_case(prof_table_index);
201 merge_new_path(dest_bci_if_true);
202 }
204 // False branch
205 Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff) );
206 set_control( iffalse );
207 }
209 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
210 // True branch, use existing map info
211 { PreserveJVMState pjvms(this);
212 Node *iffalse = _gvn.transform( new (C) IfFalseNode (iff) );
213 set_control( iffalse );
214 profile_switch_case(prof_table_index);
215 merge_new_path(dest_bci_if_true);
216 }
218 // False branch
219 Node *iftrue = _gvn.transform( new (C) IfTrueNode(iff) );
220 set_control( iftrue );
221 }
223 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
224 // False branch, use existing map and control()
225 profile_switch_case(prof_table_index);
226 merge_new_path(dest_bci);
227 }
230 extern "C" {
231 static int jint_cmp(const void *i, const void *j) {
232 int a = *(jint *)i;
233 int b = *(jint *)j;
234 return a > b ? 1 : a < b ? -1 : 0;
235 }
236 }
239 // Default value for methodData switch indexing. Must be a negative value to avoid
240 // conflict with any legal switch index.
241 #define NullTableIndex -1
243 class SwitchRange : public StackObj {
244 // a range of integers coupled with a bci destination
245 jint _lo; // inclusive lower limit
246 jint _hi; // inclusive upper limit
247 int _dest;
248 int _table_index; // index into method data table
250 public:
251 jint lo() const { return _lo; }
252 jint hi() const { return _hi; }
253 int dest() const { return _dest; }
254 int table_index() const { return _table_index; }
255 bool is_singleton() const { return _lo == _hi; }
257 void setRange(jint lo, jint hi, int dest, int table_index) {
258 assert(lo <= hi, "must be a non-empty range");
259 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
260 }
261 bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
262 assert(lo <= hi, "must be a non-empty range");
263 if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
264 _hi = hi;
265 return true;
266 }
267 return false;
268 }
270 void set (jint value, int dest, int table_index) {
271 setRange(value, value, dest, table_index);
272 }
273 bool adjoin(jint value, int dest, int table_index) {
274 return adjoinRange(value, value, dest, table_index);
275 }
277 void print() {
278 if (is_singleton())
279 tty->print(" {%d}=>%d", lo(), dest());
280 else if (lo() == min_jint)
281 tty->print(" {..%d}=>%d", hi(), dest());
282 else if (hi() == max_jint)
283 tty->print(" {%d..}=>%d", lo(), dest());
284 else
285 tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
286 }
287 };
290 //-------------------------------do_tableswitch--------------------------------
291 void Parse::do_tableswitch() {
292 Node* lookup = pop();
294 // Get information about tableswitch
295 int default_dest = iter().get_dest_table(0);
296 int lo_index = iter().get_int_table(1);
297 int hi_index = iter().get_int_table(2);
298 int len = hi_index - lo_index + 1;
300 if (len < 1) {
301 // If this is a backward branch, add safepoint
302 maybe_add_safepoint(default_dest);
303 merge(default_dest);
304 return;
305 }
307 // generate decision tree, using trichotomy when possible
308 int rnum = len+2;
309 bool makes_backward_branch = false;
310 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
311 int rp = -1;
312 if (lo_index != min_jint) {
313 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
314 }
315 for (int j = 0; j < len; j++) {
316 jint match_int = lo_index+j;
317 int dest = iter().get_dest_table(j+3);
318 makes_backward_branch |= (dest <= bci());
319 int table_index = method_data_update() ? j : NullTableIndex;
320 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
321 ranges[++rp].set(match_int, dest, table_index);
322 }
323 }
324 jint highest = lo_index+(len-1);
325 assert(ranges[rp].hi() == highest, "");
326 if (highest != max_jint
327 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
328 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
329 }
330 assert(rp < len+2, "not too many ranges");
332 // Safepoint in case if backward branch observed
333 if( makes_backward_branch && UseLoopSafepoints )
334 add_safepoint();
336 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
337 }
340 //------------------------------do_lookupswitch--------------------------------
341 void Parse::do_lookupswitch() {
342 Node *lookup = pop(); // lookup value
343 // Get information about lookupswitch
344 int default_dest = iter().get_dest_table(0);
345 int len = iter().get_int_table(1);
347 if (len < 1) { // If this is a backward branch, add safepoint
348 maybe_add_safepoint(default_dest);
349 merge(default_dest);
350 return;
351 }
353 // generate decision tree, using trichotomy when possible
354 jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
355 {
356 for( int j = 0; j < len; j++ ) {
357 table[j+j+0] = iter().get_int_table(2+j+j);
358 table[j+j+1] = iter().get_dest_table(2+j+j+1);
359 }
360 qsort( table, len, 2*sizeof(table[0]), jint_cmp );
361 }
363 int rnum = len*2+1;
364 bool makes_backward_branch = false;
365 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
366 int rp = -1;
367 for( int j = 0; j < len; j++ ) {
368 jint match_int = table[j+j+0];
369 int dest = table[j+j+1];
370 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
371 int table_index = method_data_update() ? j : NullTableIndex;
372 makes_backward_branch |= (dest <= bci());
373 if( match_int != next_lo ) {
374 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
375 }
376 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
377 ranges[++rp].set(match_int, dest, table_index);
378 }
379 }
380 jint highest = table[2*(len-1)];
381 assert(ranges[rp].hi() == highest, "");
382 if( highest != max_jint
383 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
384 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
385 }
386 assert(rp < rnum, "not too many ranges");
388 // Safepoint in case backward branch observed
389 if( makes_backward_branch && UseLoopSafepoints )
390 add_safepoint();
392 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
393 }
395 //----------------------------create_jump_tables-------------------------------
396 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
397 // Are jumptables enabled
398 if (!UseJumpTables) return false;
400 // Are jumptables supported
401 if (!Matcher::has_match_rule(Op_Jump)) return false;
403 // Don't make jump table if profiling
404 if (method_data_update()) return false;
406 // Decide if a guard is needed to lop off big ranges at either (or
407 // both) end(s) of the input set. We'll call this the default target
408 // even though we can't be sure that it is the true "default".
410 bool needs_guard = false;
411 int default_dest;
412 int64 total_outlier_size = 0;
413 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
414 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
416 if (lo->dest() == hi->dest()) {
417 total_outlier_size = hi_size + lo_size;
418 default_dest = lo->dest();
419 } else if (lo_size > hi_size) {
420 total_outlier_size = lo_size;
421 default_dest = lo->dest();
422 } else {
423 total_outlier_size = hi_size;
424 default_dest = hi->dest();
425 }
427 // If a guard test will eliminate very sparse end ranges, then
428 // it is worth the cost of an extra jump.
429 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
430 needs_guard = true;
431 if (default_dest == lo->dest()) lo++;
432 if (default_dest == hi->dest()) hi--;
433 }
435 // Find the total number of cases and ranges
436 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
437 int num_range = hi - lo + 1;
439 // Don't create table if: too large, too small, or too sparse.
440 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
441 return false;
442 if (num_cases > (MaxJumpTableSparseness * num_range))
443 return false;
445 // Normalize table lookups to zero
446 int lowval = lo->lo();
447 key_val = _gvn.transform( new (C) SubINode(key_val, _gvn.intcon(lowval)) );
449 // Generate a guard to protect against input keyvals that aren't
450 // in the switch domain.
451 if (needs_guard) {
452 Node* size = _gvn.intcon(num_cases);
453 Node* cmp = _gvn.transform( new (C) CmpUNode(key_val, size) );
454 Node* tst = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ge) );
455 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
456 jump_if_true_fork(iff, default_dest, NullTableIndex);
457 }
459 // Create an ideal node JumpTable that has projections
460 // of all possible ranges for a switch statement
461 // The key_val input must be converted to a pointer offset and scaled.
462 // Compare Parse::array_addressing above.
463 #ifdef _LP64
464 // Clean the 32-bit int into a real 64-bit offset.
465 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
466 const TypeInt* ikeytype = TypeInt::make(0, num_cases-1, Type::WidenMin);
467 // Make I2L conversion control dependent to prevent it from
468 // floating above the range check during loop optimizations.
469 key_val = C->constrained_convI2L(&_gvn, key_val, ikeytype, control());
470 #endif
472 // Shift the value by wordsize so we have an index into the table, rather
473 // than a switch value
474 Node *shiftWord = _gvn.MakeConX(wordSize);
475 key_val = _gvn.transform( new (C) MulXNode( key_val, shiftWord));
477 // Create the JumpNode
478 Node* jtn = _gvn.transform( new (C) JumpNode(control(), key_val, num_cases) );
480 // These are the switch destinations hanging off the jumpnode
481 int i = 0;
482 for (SwitchRange* r = lo; r <= hi; r++) {
483 for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
484 Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
485 {
486 PreserveJVMState pjvms(this);
487 set_control(input);
488 jump_if_always_fork(r->dest(), r->table_index());
489 }
490 }
491 }
492 assert(i == num_cases, "miscount of cases");
493 stop_and_kill_map(); // no more uses for this JVMS
494 return true;
495 }
497 //----------------------------jump_switch_ranges-------------------------------
498 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
499 Block* switch_block = block();
501 if (switch_depth == 0) {
502 // Do special processing for the top-level call.
503 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
504 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
506 // Decrement pred-numbers for the unique set of nodes.
507 #ifdef ASSERT
508 // Ensure that the block's successors are a (duplicate-free) set.
509 int successors_counted = 0; // block occurrences in [hi..lo]
510 int unique_successors = switch_block->num_successors();
511 for (int i = 0; i < unique_successors; i++) {
512 Block* target = switch_block->successor_at(i);
514 // Check that the set of successors is the same in both places.
515 int successors_found = 0;
516 for (SwitchRange* p = lo; p <= hi; p++) {
517 if (p->dest() == target->start()) successors_found++;
518 }
519 assert(successors_found > 0, "successor must be known");
520 successors_counted += successors_found;
521 }
522 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
523 #endif
525 // Maybe prune the inputs, based on the type of key_val.
526 jint min_val = min_jint;
527 jint max_val = max_jint;
528 const TypeInt* ti = key_val->bottom_type()->isa_int();
529 if (ti != NULL) {
530 min_val = ti->_lo;
531 max_val = ti->_hi;
532 assert(min_val <= max_val, "invalid int type");
533 }
534 while (lo->hi() < min_val) lo++;
535 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
536 while (hi->lo() > max_val) hi--;
537 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
538 }
540 #ifndef PRODUCT
541 if (switch_depth == 0) {
542 _max_switch_depth = 0;
543 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
544 }
545 #endif
547 assert(lo <= hi, "must be a non-empty set of ranges");
548 if (lo == hi) {
549 jump_if_always_fork(lo->dest(), lo->table_index());
550 } else {
551 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
552 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
554 if (create_jump_tables(key_val, lo, hi)) return;
556 int nr = hi - lo + 1;
558 SwitchRange* mid = lo + nr/2;
559 // if there is an easy choice, pivot at a singleton:
560 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
562 assert(lo < mid && mid <= hi, "good pivot choice");
563 assert(nr != 2 || mid == hi, "should pick higher of 2");
564 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
566 Node *test_val = _gvn.intcon(mid->lo());
568 if (mid->is_singleton()) {
569 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
570 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
572 // Special Case: If there are exactly three ranges, and the high
573 // and low range each go to the same place, omit the "gt" test,
574 // since it will not discriminate anything.
575 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
576 if (eq_test_only) {
577 assert(mid == hi-1, "");
578 }
580 // if there is a higher range, test for it and process it:
581 if (mid < hi && !eq_test_only) {
582 // two comparisons of same values--should enable 1 test for 2 branches
583 // Use BoolTest::le instead of BoolTest::gt
584 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le);
585 Node *iftrue = _gvn.transform( new (C) IfTrueNode(iff_le) );
586 Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff_le) );
587 { PreserveJVMState pjvms(this);
588 set_control(iffalse);
589 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
590 }
591 set_control(iftrue);
592 }
594 } else {
595 // mid is a range, not a singleton, so treat mid..hi as a unit
596 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
598 // if there is a higher range, test for it and process it:
599 if (mid == hi) {
600 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
601 } else {
602 Node *iftrue = _gvn.transform( new (C) IfTrueNode(iff_ge) );
603 Node *iffalse = _gvn.transform( new (C) IfFalseNode(iff_ge) );
604 { PreserveJVMState pjvms(this);
605 set_control(iftrue);
606 jump_switch_ranges(key_val, mid, hi, switch_depth+1);
607 }
608 set_control(iffalse);
609 }
610 }
612 // in any case, process the lower range
613 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
614 }
616 // Decrease pred_count for each successor after all is done.
617 if (switch_depth == 0) {
618 int unique_successors = switch_block->num_successors();
619 for (int i = 0; i < unique_successors; i++) {
620 Block* target = switch_block->successor_at(i);
621 // Throw away the pre-allocated path for each unique successor.
622 target->next_path_num();
623 }
624 }
626 #ifndef PRODUCT
627 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
628 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
629 SwitchRange* r;
630 int nsing = 0;
631 for( r = lo; r <= hi; r++ ) {
632 if( r->is_singleton() ) nsing++;
633 }
634 tty->print(">>> ");
635 _method->print_short_name();
636 tty->print_cr(" switch decision tree");
637 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
638 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
639 if (_max_switch_depth > _est_switch_depth) {
640 tty->print_cr("******** BAD SWITCH DEPTH ********");
641 }
642 tty->print(" ");
643 for( r = lo; r <= hi; r++ ) {
644 r->print();
645 }
646 tty->cr();
647 }
648 #endif
649 }
651 void Parse::modf() {
652 Node *f2 = pop();
653 Node *f1 = pop();
654 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
655 CAST_FROM_FN_PTR(address, SharedRuntime::frem),
656 "frem", NULL, //no memory effects
657 f1, f2);
658 Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0));
660 push(res);
661 }
663 void Parse::modd() {
664 Node *d2 = pop_pair();
665 Node *d1 = pop_pair();
666 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
667 CAST_FROM_FN_PTR(address, SharedRuntime::drem),
668 "drem", NULL, //no memory effects
669 d1, top(), d2, top());
670 Node* res_d = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0));
672 #ifdef ASSERT
673 Node* res_top = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 1));
674 assert(res_top == top(), "second value must be top");
675 #endif
677 push_pair(res_d);
678 }
680 void Parse::l2f() {
681 Node* f2 = pop();
682 Node* f1 = pop();
683 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
684 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
685 "l2f", NULL, //no memory effects
686 f1, f2);
687 Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms + 0));
689 push(res);
690 }
692 void Parse::do_irem() {
693 // Must keep both values on the expression-stack during null-check
694 zero_check_int(peek());
695 // Compile-time detect of null-exception?
696 if (stopped()) return;
698 Node* b = pop();
699 Node* a = pop();
701 const Type *t = _gvn.type(b);
702 if (t != Type::TOP) {
703 const TypeInt *ti = t->is_int();
704 if (ti->is_con()) {
705 int divisor = ti->get_con();
706 // check for positive power of 2
707 if (divisor > 0 &&
708 (divisor & ~(divisor-1)) == divisor) {
709 // yes !
710 Node *mask = _gvn.intcon((divisor - 1));
711 // Sigh, must handle negative dividends
712 Node *zero = _gvn.intcon(0);
713 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
714 Node *iff = _gvn.transform( new (C) IfFalseNode(ifff) );
715 Node *ift = _gvn.transform( new (C) IfTrueNode (ifff) );
716 Node *reg = jump_if_join(ift, iff);
717 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
718 // Negative path; negate/and/negate
719 Node *neg = _gvn.transform( new (C) SubINode(zero, a) );
720 Node *andn= _gvn.transform( new (C) AndINode(neg, mask) );
721 Node *negn= _gvn.transform( new (C) SubINode(zero, andn) );
722 phi->init_req(1, negn);
723 // Fast positive case
724 Node *andx = _gvn.transform( new (C) AndINode(a, mask) );
725 phi->init_req(2, andx);
726 // Push the merge
727 push( _gvn.transform(phi) );
728 return;
729 }
730 }
731 }
732 // Default case
733 push( _gvn.transform( new (C) ModINode(control(),a,b) ) );
734 }
736 // Handle jsr and jsr_w bytecode
737 void Parse::do_jsr() {
738 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
740 // Store information about current state, tagged with new _jsr_bci
741 int return_bci = iter().next_bci();
742 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
744 // Update method data
745 profile_taken_branch(jsr_bci);
747 // The way we do things now, there is only one successor block
748 // for the jsr, because the target code is cloned by ciTypeFlow.
749 Block* target = successor_for_bci(jsr_bci);
751 // What got pushed?
752 const Type* ret_addr = target->peek();
753 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
755 // Effect on jsr on stack
756 push(_gvn.makecon(ret_addr));
758 // Flow to the jsr.
759 merge(jsr_bci);
760 }
762 // Handle ret bytecode
763 void Parse::do_ret() {
764 // Find to whom we return.
765 assert(block()->num_successors() == 1, "a ret can only go one place now");
766 Block* target = block()->successor_at(0);
767 assert(!target->is_ready(), "our arrival must be expected");
768 profile_ret(target->flow()->start());
769 int pnum = target->next_path_num();
770 merge_common(target, pnum);
771 }
773 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
774 if (btest != BoolTest::eq && btest != BoolTest::ne) {
775 // Only ::eq and ::ne are supported for profile injection.
776 return false;
777 }
778 if (test->is_Cmp() &&
779 test->in(1)->Opcode() == Op_ProfileBoolean) {
780 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
781 int false_cnt = profile->false_count();
782 int true_cnt = profile->true_count();
784 // Counts matching depends on the actual test operation (::eq or ::ne).
785 // No need to scale the counts because profile injection was designed
786 // to feed exact counts into VM.
787 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt;
788 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt;
790 profile->consume();
791 return true;
792 }
793 return false;
794 }
795 //--------------------------dynamic_branch_prediction--------------------------
796 // Try to gather dynamic branch prediction behavior. Return a probability
797 // of the branch being taken and set the "cnt" field. Returns a -1.0
798 // if we need to use static prediction for some reason.
799 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
800 ResourceMark rm;
802 cnt = COUNT_UNKNOWN;
804 int taken = 0;
805 int not_taken = 0;
807 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
809 if (use_mdo) {
810 // Use MethodData information if it is available
811 // FIXME: free the ProfileData structure
812 ciMethodData* methodData = method()->method_data();
813 if (!methodData->is_mature()) return PROB_UNKNOWN;
814 ciProfileData* data = methodData->bci_to_data(bci());
815 if (data == NULL) {
816 return PROB_UNKNOWN;
817 }
818 if (!data->is_JumpData()) return PROB_UNKNOWN;
820 // get taken and not taken values
821 taken = data->as_JumpData()->taken();
822 not_taken = 0;
823 if (data->is_BranchData()) {
824 not_taken = data->as_BranchData()->not_taken();
825 }
827 // scale the counts to be commensurate with invocation counts:
828 taken = method()->scale_count(taken);
829 not_taken = method()->scale_count(not_taken);
830 }
832 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
833 // We also check that individual counters are positive first, otherwise the sum can become positive.
834 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
835 if (C->log() != NULL) {
836 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
837 }
838 return PROB_UNKNOWN;
839 }
841 // Compute frequency that we arrive here
842 float sum = taken + not_taken;
843 // Adjust, if this block is a cloned private block but the
844 // Jump counts are shared. Taken the private counts for
845 // just this path instead of the shared counts.
846 if( block()->count() > 0 )
847 sum = block()->count();
848 cnt = sum / FreqCountInvocations;
850 // Pin probability to sane limits
851 float prob;
852 if( !taken )
853 prob = (0+PROB_MIN) / 2;
854 else if( !not_taken )
855 prob = (1+PROB_MAX) / 2;
856 else { // Compute probability of true path
857 prob = (float)taken / (float)(taken + not_taken);
858 if (prob > PROB_MAX) prob = PROB_MAX;
859 if (prob < PROB_MIN) prob = PROB_MIN;
860 }
862 assert((cnt > 0.0f) && (prob > 0.0f),
863 "Bad frequency assignment in if");
865 if (C->log() != NULL) {
866 const char* prob_str = NULL;
867 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
868 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
869 char prob_str_buf[30];
870 if (prob_str == NULL) {
871 sprintf(prob_str_buf, "%g", prob);
872 prob_str = prob_str_buf;
873 }
874 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
875 iter().get_dest(), taken, not_taken, cnt, prob_str);
876 }
877 return prob;
878 }
880 //-----------------------------branch_prediction-------------------------------
881 float Parse::branch_prediction(float& cnt,
882 BoolTest::mask btest,
883 int target_bci,
884 Node* test) {
885 float prob = dynamic_branch_prediction(cnt, btest, test);
886 // If prob is unknown, switch to static prediction
887 if (prob != PROB_UNKNOWN) return prob;
889 prob = PROB_FAIR; // Set default value
890 if (btest == BoolTest::eq) // Exactly equal test?
891 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
892 else if (btest == BoolTest::ne)
893 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
895 // If this is a conditional test guarding a backwards branch,
896 // assume its a loop-back edge. Make it a likely taken branch.
897 if (target_bci < bci()) {
898 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
899 // Since it's an OSR, we probably have profile data, but since
900 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
901 // Let's make a special check here for completely zero counts.
902 ciMethodData* methodData = method()->method_data();
903 if (!methodData->is_empty()) {
904 ciProfileData* data = methodData->bci_to_data(bci());
905 // Only stop for truly zero counts, which mean an unknown part
906 // of the OSR-ed method, and we want to deopt to gather more stats.
907 // If you have ANY counts, then this loop is simply 'cold' relative
908 // to the OSR loop.
909 if (data == NULL ||
910 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) {
911 // This is the only way to return PROB_UNKNOWN:
912 return PROB_UNKNOWN;
913 }
914 }
915 }
916 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
917 }
919 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
920 return prob;
921 }
923 // The magic constants are chosen so as to match the output of
924 // branch_prediction() when the profile reports a zero taken count.
925 // It is important to distinguish zero counts unambiguously, because
926 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
927 // very small but nonzero probabilities, which if confused with zero
928 // counts would keep the program recompiling indefinitely.
929 bool Parse::seems_never_taken(float prob) const {
930 return prob < PROB_MIN;
931 }
933 // True if the comparison seems to be the kind that will not change its
934 // statistics from true to false. See comments in adjust_map_after_if.
935 // This question is only asked along paths which are already
936 // classifed as untaken (by seems_never_taken), so really,
937 // if a path is never taken, its controlling comparison is
938 // already acting in a stable fashion. If the comparison
939 // seems stable, we will put an expensive uncommon trap
940 // on the untaken path.
941 bool Parse::seems_stable_comparison() const {
942 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) {
943 return false;
944 }
945 return true;
946 }
948 //-------------------------------repush_if_args--------------------------------
949 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
950 inline int Parse::repush_if_args() {
951 #ifndef PRODUCT
952 if (PrintOpto && WizardMode) {
953 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
954 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
955 method()->print_name(); tty->cr();
956 }
957 #endif
958 int bc_depth = - Bytecodes::depth(iter().cur_bc());
959 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
960 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
961 assert(argument(0) != NULL, "must exist");
962 assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
963 inc_sp(bc_depth);
964 return bc_depth;
965 }
967 //----------------------------------do_ifnull----------------------------------
968 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
969 int target_bci = iter().get_dest();
971 Block* branch_block = successor_for_bci(target_bci);
972 Block* next_block = successor_for_bci(iter().next_bci());
974 float cnt;
975 float prob = branch_prediction(cnt, btest, target_bci, c);
976 if (prob == PROB_UNKNOWN) {
977 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
978 #ifndef PRODUCT
979 if (PrintOpto && Verbose)
980 tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
981 #endif
982 repush_if_args(); // to gather stats on loop
983 // We need to mark this branch as taken so that if we recompile we will
984 // see that it is possible. In the tiered system the interpreter doesn't
985 // do profiling and by the time we get to the lower tier from the interpreter
986 // the path may be cold again. Make sure it doesn't look untaken
987 profile_taken_branch(target_bci, !ProfileInterpreter);
988 uncommon_trap(Deoptimization::Reason_unreached,
989 Deoptimization::Action_reinterpret,
990 NULL, "cold");
991 if (C->eliminate_boxing()) {
992 // Mark the successor blocks as parsed
993 branch_block->next_path_num();
994 next_block->next_path_num();
995 }
996 return;
997 }
999 explicit_null_checks_inserted++;
1001 // Generate real control flow
1002 Node *tst = _gvn.transform( new (C) BoolNode( c, btest ) );
1004 // Sanity check the probability value
1005 assert(prob > 0.0f,"Bad probability in Parser");
1006 // Need xform to put node in hash table
1007 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1008 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1009 // True branch
1010 { PreserveJVMState pjvms(this);
1011 Node* iftrue = _gvn.transform( new (C) IfTrueNode (iff) );
1012 set_control(iftrue);
1014 if (stopped()) { // Path is dead?
1015 explicit_null_checks_elided++;
1016 if (C->eliminate_boxing()) {
1017 // Mark the successor block as parsed
1018 branch_block->next_path_num();
1019 }
1020 } else { // Path is live.
1021 // Update method data
1022 profile_taken_branch(target_bci);
1023 adjust_map_after_if(btest, c, prob, branch_block, next_block);
1024 if (!stopped()) {
1025 merge(target_bci);
1026 }
1027 }
1028 }
1030 // False branch
1031 Node* iffalse = _gvn.transform( new (C) IfFalseNode(iff) );
1032 set_control(iffalse);
1034 if (stopped()) { // Path is dead?
1035 explicit_null_checks_elided++;
1036 if (C->eliminate_boxing()) {
1037 // Mark the successor block as parsed
1038 next_block->next_path_num();
1039 }
1040 } else { // Path is live.
1041 // Update method data
1042 profile_not_taken_branch();
1043 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
1044 next_block, branch_block);
1045 }
1046 }
1048 //------------------------------------do_if------------------------------------
1049 void Parse::do_if(BoolTest::mask btest, Node* c) {
1050 int target_bci = iter().get_dest();
1052 Block* branch_block = successor_for_bci(target_bci);
1053 Block* next_block = successor_for_bci(iter().next_bci());
1055 float cnt;
1056 float prob = branch_prediction(cnt, btest, target_bci, c);
1057 float untaken_prob = 1.0 - prob;
1059 if (prob == PROB_UNKNOWN) {
1060 #ifndef PRODUCT
1061 if (PrintOpto && Verbose)
1062 tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
1063 #endif
1064 repush_if_args(); // to gather stats on loop
1065 // We need to mark this branch as taken so that if we recompile we will
1066 // see that it is possible. In the tiered system the interpreter doesn't
1067 // do profiling and by the time we get to the lower tier from the interpreter
1068 // the path may be cold again. Make sure it doesn't look untaken
1069 profile_taken_branch(target_bci, !ProfileInterpreter);
1070 uncommon_trap(Deoptimization::Reason_unreached,
1071 Deoptimization::Action_reinterpret,
1072 NULL, "cold");
1073 if (C->eliminate_boxing()) {
1074 // Mark the successor blocks as parsed
1075 branch_block->next_path_num();
1076 next_block->next_path_num();
1077 }
1078 return;
1079 }
1081 // Sanity check the probability value
1082 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1084 bool taken_if_true = true;
1085 // Convert BoolTest to canonical form:
1086 if (!BoolTest(btest).is_canonical()) {
1087 btest = BoolTest(btest).negate();
1088 taken_if_true = false;
1089 // prob is NOT updated here; it remains the probability of the taken
1090 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1091 }
1092 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1094 Node* tst0 = new (C) BoolNode(c, btest);
1095 Node* tst = _gvn.transform(tst0);
1096 BoolTest::mask taken_btest = BoolTest::illegal;
1097 BoolTest::mask untaken_btest = BoolTest::illegal;
1099 if (tst->is_Bool()) {
1100 // Refresh c from the transformed bool node, since it may be
1101 // simpler than the original c. Also re-canonicalize btest.
1102 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1103 // That can arise from statements like: if (x instanceof C) ...
1104 if (tst != tst0) {
1105 // Canonicalize one more time since transform can change it.
1106 btest = tst->as_Bool()->_test._test;
1107 if (!BoolTest(btest).is_canonical()) {
1108 // Reverse edges one more time...
1109 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1110 btest = tst->as_Bool()->_test._test;
1111 assert(BoolTest(btest).is_canonical(), "sanity");
1112 taken_if_true = !taken_if_true;
1113 }
1114 c = tst->in(1);
1115 }
1116 BoolTest::mask neg_btest = BoolTest(btest).negate();
1117 taken_btest = taken_if_true ? btest : neg_btest;
1118 untaken_btest = taken_if_true ? neg_btest : btest;
1119 }
1121 // Generate real control flow
1122 float true_prob = (taken_if_true ? prob : untaken_prob);
1123 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1124 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1125 Node* taken_branch = new (C) IfTrueNode(iff);
1126 Node* untaken_branch = new (C) IfFalseNode(iff);
1127 if (!taken_if_true) { // Finish conversion to canonical form
1128 Node* tmp = taken_branch;
1129 taken_branch = untaken_branch;
1130 untaken_branch = tmp;
1131 }
1133 // Branch is taken:
1134 { PreserveJVMState pjvms(this);
1135 taken_branch = _gvn.transform(taken_branch);
1136 set_control(taken_branch);
1138 if (stopped()) {
1139 if (C->eliminate_boxing()) {
1140 // Mark the successor block as parsed
1141 branch_block->next_path_num();
1142 }
1143 } else {
1144 // Update method data
1145 profile_taken_branch(target_bci);
1146 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1147 if (!stopped()) {
1148 merge(target_bci);
1149 }
1150 }
1151 }
1153 untaken_branch = _gvn.transform(untaken_branch);
1154 set_control(untaken_branch);
1156 // Branch not taken.
1157 if (stopped()) {
1158 if (C->eliminate_boxing()) {
1159 // Mark the successor block as parsed
1160 next_block->next_path_num();
1161 }
1162 } else {
1163 // Update method data
1164 profile_not_taken_branch();
1165 adjust_map_after_if(untaken_btest, c, untaken_prob,
1166 next_block, branch_block);
1167 }
1168 }
1170 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
1171 // Don't want to speculate on uncommon traps when running with -Xcomp
1172 if (!UseInterpreter) {
1173 return false;
1174 }
1175 return (seems_never_taken(prob) && seems_stable_comparison());
1176 }
1178 //----------------------------adjust_map_after_if------------------------------
1179 // Adjust the JVM state to reflect the result of taking this path.
1180 // Basically, it means inspecting the CmpNode controlling this
1181 // branch, seeing how it constrains a tested value, and then
1182 // deciding if it's worth our while to encode this constraint
1183 // as graph nodes in the current abstract interpretation map.
1184 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1185 Block* path, Block* other_path) {
1186 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1187 return; // nothing to do
1189 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1191 if (path_is_suitable_for_uncommon_trap(prob)) {
1192 repush_if_args();
1193 uncommon_trap(Deoptimization::Reason_unstable_if,
1194 Deoptimization::Action_reinterpret,
1195 NULL,
1196 (is_fallthrough ? "taken always" : "taken never"));
1197 return;
1198 }
1200 Node* val = c->in(1);
1201 Node* con = c->in(2);
1202 const Type* tcon = _gvn.type(con);
1203 const Type* tval = _gvn.type(val);
1204 bool have_con = tcon->singleton();
1205 if (tval->singleton()) {
1206 if (!have_con) {
1207 // Swap, so constant is in con.
1208 con = val;
1209 tcon = tval;
1210 val = c->in(2);
1211 tval = _gvn.type(val);
1212 btest = BoolTest(btest).commute();
1213 have_con = true;
1214 } else {
1215 // Do we have two constants? Then leave well enough alone.
1216 have_con = false;
1217 }
1218 }
1219 if (!have_con) // remaining adjustments need a con
1220 return;
1222 sharpen_type_after_if(btest, con, tcon, val, tval);
1223 }
1226 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
1227 Node* ldk;
1228 if (n->is_DecodeNKlass()) {
1229 if (n->in(1)->Opcode() != Op_LoadNKlass) {
1230 return NULL;
1231 } else {
1232 ldk = n->in(1);
1233 }
1234 } else if (n->Opcode() != Op_LoadKlass) {
1235 return NULL;
1236 } else {
1237 ldk = n;
1238 }
1239 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
1241 Node* adr = ldk->in(MemNode::Address);
1242 intptr_t off = 0;
1243 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
1244 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
1245 return NULL;
1246 const TypePtr* tp = gvn->type(obj)->is_ptr();
1247 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
1248 return NULL;
1250 return obj;
1251 }
1253 void Parse::sharpen_type_after_if(BoolTest::mask btest,
1254 Node* con, const Type* tcon,
1255 Node* val, const Type* tval) {
1256 // Look for opportunities to sharpen the type of a node
1257 // whose klass is compared with a constant klass.
1258 if (btest == BoolTest::eq && tcon->isa_klassptr()) {
1259 Node* obj = extract_obj_from_klass_load(&_gvn, val);
1260 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
1261 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
1262 // Found:
1263 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
1264 // or the narrowOop equivalent.
1265 const Type* obj_type = _gvn.type(obj);
1266 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
1267 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
1268 tboth->higher_equal(obj_type)) {
1269 // obj has to be of the exact type Foo if the CmpP succeeds.
1270 int obj_in_map = map()->find_edge(obj);
1271 JVMState* jvms = this->jvms();
1272 if (obj_in_map >= 0 &&
1273 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1274 TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
1275 const Type* tcc = ccast->as_Type()->type();
1276 assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
1277 // Delay transform() call to allow recovery of pre-cast value
1278 // at the control merge.
1279 _gvn.set_type_bottom(ccast);
1280 record_for_igvn(ccast);
1281 // Here's the payoff.
1282 replace_in_map(obj, ccast);
1283 }
1284 }
1285 }
1286 }
1288 int val_in_map = map()->find_edge(val);
1289 if (val_in_map < 0) return; // replace_in_map would be useless
1290 {
1291 JVMState* jvms = this->jvms();
1292 if (!(jvms->is_loc(val_in_map) ||
1293 jvms->is_stk(val_in_map)))
1294 return; // again, it would be useless
1295 }
1297 // Check for a comparison to a constant, and "know" that the compared
1298 // value is constrained on this path.
1299 assert(tcon->singleton(), "");
1300 ConstraintCastNode* ccast = NULL;
1301 Node* cast = NULL;
1303 switch (btest) {
1304 case BoolTest::eq: // Constant test?
1305 {
1306 const Type* tboth = tcon->join_speculative(tval);
1307 if (tboth == tval) break; // Nothing to gain.
1308 if (tcon->isa_int()) {
1309 ccast = new (C) CastIINode(val, tboth);
1310 } else if (tcon == TypePtr::NULL_PTR) {
1311 // Cast to null, but keep the pointer identity temporarily live.
1312 ccast = new (C) CastPPNode(val, tboth);
1313 } else {
1314 const TypeF* tf = tcon->isa_float_constant();
1315 const TypeD* td = tcon->isa_double_constant();
1316 // Exclude tests vs float/double 0 as these could be
1317 // either +0 or -0. Just because you are equal to +0
1318 // doesn't mean you ARE +0!
1319 // Note, following code also replaces Long and Oop values.
1320 if ((!tf || tf->_f != 0.0) &&
1321 (!td || td->_d != 0.0))
1322 cast = con; // Replace non-constant val by con.
1323 }
1324 }
1325 break;
1327 case BoolTest::ne:
1328 if (tcon == TypePtr::NULL_PTR) {
1329 cast = cast_not_null(val, false);
1330 }
1331 break;
1333 default:
1334 // (At this point we could record int range types with CastII.)
1335 break;
1336 }
1338 if (ccast != NULL) {
1339 const Type* tcc = ccast->as_Type()->type();
1340 assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
1341 // Delay transform() call to allow recovery of pre-cast value
1342 // at the control merge.
1343 ccast->set_req(0, control());
1344 _gvn.set_type_bottom(ccast);
1345 record_for_igvn(ccast);
1346 cast = ccast;
1347 }
1349 if (cast != NULL) { // Here's the payoff.
1350 replace_in_map(val, cast);
1351 }
1352 }
1354 /**
1355 * Use speculative type to optimize CmpP node: if comparison is
1356 * against the low level class, cast the object to the speculative
1357 * type if any. CmpP should then go away.
1358 *
1359 * @param c expected CmpP node
1360 * @return result of CmpP on object casted to speculative type
1361 *
1362 */
1363 Node* Parse::optimize_cmp_with_klass(Node* c) {
1364 // If this is transformed by the _gvn to a comparison with the low
1365 // level klass then we may be able to use speculation
1366 if (c->Opcode() == Op_CmpP &&
1367 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1368 c->in(2)->is_Con()) {
1369 Node* load_klass = NULL;
1370 Node* decode = NULL;
1371 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1372 decode = c->in(1);
1373 load_klass = c->in(1)->in(1);
1374 } else {
1375 load_klass = c->in(1);
1376 }
1377 if (load_klass->in(2)->is_AddP()) {
1378 Node* addp = load_klass->in(2);
1379 Node* obj = addp->in(AddPNode::Address);
1380 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1381 if (obj_type->speculative_type() != NULL) {
1382 ciKlass* k = obj_type->speculative_type();
1383 inc_sp(2);
1384 obj = maybe_cast_profiled_obj(obj, k);
1385 dec_sp(2);
1386 // Make the CmpP use the casted obj
1387 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1388 load_klass = load_klass->clone();
1389 load_klass->set_req(2, addp);
1390 load_klass = _gvn.transform(load_klass);
1391 if (decode != NULL) {
1392 decode = decode->clone();
1393 decode->set_req(1, load_klass);
1394 load_klass = _gvn.transform(decode);
1395 }
1396 c = c->clone();
1397 c->set_req(1, load_klass);
1398 c = _gvn.transform(c);
1399 }
1400 }
1401 }
1402 return c;
1403 }
1405 //------------------------------do_one_bytecode--------------------------------
1406 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1407 void Parse::do_one_bytecode() {
1408 Node *a, *b, *c, *d; // Handy temps
1409 BoolTest::mask btest;
1410 int i;
1412 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1414 if (C->check_node_count(NodeLimitFudgeFactor * 5,
1415 "out of nodes parsing method")) {
1416 return;
1417 }
1419 #ifdef ASSERT
1420 // for setting breakpoints
1421 if (TraceOptoParse) {
1422 tty->print(" @");
1423 dump_bci(bci());
1424 tty->cr();
1425 }
1426 #endif
1428 switch (bc()) {
1429 case Bytecodes::_nop:
1430 // do nothing
1431 break;
1432 case Bytecodes::_lconst_0:
1433 push_pair(longcon(0));
1434 break;
1436 case Bytecodes::_lconst_1:
1437 push_pair(longcon(1));
1438 break;
1440 case Bytecodes::_fconst_0:
1441 push(zerocon(T_FLOAT));
1442 break;
1444 case Bytecodes::_fconst_1:
1445 push(makecon(TypeF::ONE));
1446 break;
1448 case Bytecodes::_fconst_2:
1449 push(makecon(TypeF::make(2.0f)));
1450 break;
1452 case Bytecodes::_dconst_0:
1453 push_pair(zerocon(T_DOUBLE));
1454 break;
1456 case Bytecodes::_dconst_1:
1457 push_pair(makecon(TypeD::ONE));
1458 break;
1460 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1461 case Bytecodes::_iconst_0: push(intcon( 0)); break;
1462 case Bytecodes::_iconst_1: push(intcon( 1)); break;
1463 case Bytecodes::_iconst_2: push(intcon( 2)); break;
1464 case Bytecodes::_iconst_3: push(intcon( 3)); break;
1465 case Bytecodes::_iconst_4: push(intcon( 4)); break;
1466 case Bytecodes::_iconst_5: push(intcon( 5)); break;
1467 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break;
1468 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break;
1469 case Bytecodes::_aconst_null: push(null()); break;
1470 case Bytecodes::_ldc:
1471 case Bytecodes::_ldc_w:
1472 case Bytecodes::_ldc2_w:
1473 // If the constant is unresolved, run this BC once in the interpreter.
1474 {
1475 ciConstant constant = iter().get_constant();
1476 if (constant.basic_type() == T_OBJECT &&
1477 !constant.as_object()->is_loaded()) {
1478 int index = iter().get_constant_pool_index();
1479 constantTag tag = iter().get_constant_pool_tag(index);
1480 uncommon_trap(Deoptimization::make_trap_request
1481 (Deoptimization::Reason_unloaded,
1482 Deoptimization::Action_reinterpret,
1483 index),
1484 NULL, tag.internal_name());
1485 break;
1486 }
1487 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(),
1488 "must be java_mirror of klass");
1489 bool pushed = push_constant(constant, true);
1490 guarantee(pushed, "must be possible to push this constant");
1491 }
1493 break;
1495 case Bytecodes::_aload_0:
1496 push( local(0) );
1497 break;
1498 case Bytecodes::_aload_1:
1499 push( local(1) );
1500 break;
1501 case Bytecodes::_aload_2:
1502 push( local(2) );
1503 break;
1504 case Bytecodes::_aload_3:
1505 push( local(3) );
1506 break;
1507 case Bytecodes::_aload:
1508 push( local(iter().get_index()) );
1509 break;
1511 case Bytecodes::_fload_0:
1512 case Bytecodes::_iload_0:
1513 push( local(0) );
1514 break;
1515 case Bytecodes::_fload_1:
1516 case Bytecodes::_iload_1:
1517 push( local(1) );
1518 break;
1519 case Bytecodes::_fload_2:
1520 case Bytecodes::_iload_2:
1521 push( local(2) );
1522 break;
1523 case Bytecodes::_fload_3:
1524 case Bytecodes::_iload_3:
1525 push( local(3) );
1526 break;
1527 case Bytecodes::_fload:
1528 case Bytecodes::_iload:
1529 push( local(iter().get_index()) );
1530 break;
1531 case Bytecodes::_lload_0:
1532 push_pair_local( 0 );
1533 break;
1534 case Bytecodes::_lload_1:
1535 push_pair_local( 1 );
1536 break;
1537 case Bytecodes::_lload_2:
1538 push_pair_local( 2 );
1539 break;
1540 case Bytecodes::_lload_3:
1541 push_pair_local( 3 );
1542 break;
1543 case Bytecodes::_lload:
1544 push_pair_local( iter().get_index() );
1545 break;
1547 case Bytecodes::_dload_0:
1548 push_pair_local(0);
1549 break;
1550 case Bytecodes::_dload_1:
1551 push_pair_local(1);
1552 break;
1553 case Bytecodes::_dload_2:
1554 push_pair_local(2);
1555 break;
1556 case Bytecodes::_dload_3:
1557 push_pair_local(3);
1558 break;
1559 case Bytecodes::_dload:
1560 push_pair_local(iter().get_index());
1561 break;
1562 case Bytecodes::_fstore_0:
1563 case Bytecodes::_istore_0:
1564 case Bytecodes::_astore_0:
1565 set_local( 0, pop() );
1566 break;
1567 case Bytecodes::_fstore_1:
1568 case Bytecodes::_istore_1:
1569 case Bytecodes::_astore_1:
1570 set_local( 1, pop() );
1571 break;
1572 case Bytecodes::_fstore_2:
1573 case Bytecodes::_istore_2:
1574 case Bytecodes::_astore_2:
1575 set_local( 2, pop() );
1576 break;
1577 case Bytecodes::_fstore_3:
1578 case Bytecodes::_istore_3:
1579 case Bytecodes::_astore_3:
1580 set_local( 3, pop() );
1581 break;
1582 case Bytecodes::_fstore:
1583 case Bytecodes::_istore:
1584 case Bytecodes::_astore:
1585 set_local( iter().get_index(), pop() );
1586 break;
1587 // long stores
1588 case Bytecodes::_lstore_0:
1589 set_pair_local( 0, pop_pair() );
1590 break;
1591 case Bytecodes::_lstore_1:
1592 set_pair_local( 1, pop_pair() );
1593 break;
1594 case Bytecodes::_lstore_2:
1595 set_pair_local( 2, pop_pair() );
1596 break;
1597 case Bytecodes::_lstore_3:
1598 set_pair_local( 3, pop_pair() );
1599 break;
1600 case Bytecodes::_lstore:
1601 set_pair_local( iter().get_index(), pop_pair() );
1602 break;
1604 // double stores
1605 case Bytecodes::_dstore_0:
1606 set_pair_local( 0, dstore_rounding(pop_pair()) );
1607 break;
1608 case Bytecodes::_dstore_1:
1609 set_pair_local( 1, dstore_rounding(pop_pair()) );
1610 break;
1611 case Bytecodes::_dstore_2:
1612 set_pair_local( 2, dstore_rounding(pop_pair()) );
1613 break;
1614 case Bytecodes::_dstore_3:
1615 set_pair_local( 3, dstore_rounding(pop_pair()) );
1616 break;
1617 case Bytecodes::_dstore:
1618 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1619 break;
1621 case Bytecodes::_pop: dec_sp(1); break;
1622 case Bytecodes::_pop2: dec_sp(2); break;
1623 case Bytecodes::_swap:
1624 a = pop();
1625 b = pop();
1626 push(a);
1627 push(b);
1628 break;
1629 case Bytecodes::_dup:
1630 a = pop();
1631 push(a);
1632 push(a);
1633 break;
1634 case Bytecodes::_dup_x1:
1635 a = pop();
1636 b = pop();
1637 push( a );
1638 push( b );
1639 push( a );
1640 break;
1641 case Bytecodes::_dup_x2:
1642 a = pop();
1643 b = pop();
1644 c = pop();
1645 push( a );
1646 push( c );
1647 push( b );
1648 push( a );
1649 break;
1650 case Bytecodes::_dup2:
1651 a = pop();
1652 b = pop();
1653 push( b );
1654 push( a );
1655 push( b );
1656 push( a );
1657 break;
1659 case Bytecodes::_dup2_x1:
1660 // before: .. c, b, a
1661 // after: .. b, a, c, b, a
1662 // not tested
1663 a = pop();
1664 b = pop();
1665 c = pop();
1666 push( b );
1667 push( a );
1668 push( c );
1669 push( b );
1670 push( a );
1671 break;
1672 case Bytecodes::_dup2_x2:
1673 // before: .. d, c, b, a
1674 // after: .. b, a, d, c, b, a
1675 // not tested
1676 a = pop();
1677 b = pop();
1678 c = pop();
1679 d = pop();
1680 push( b );
1681 push( a );
1682 push( d );
1683 push( c );
1684 push( b );
1685 push( a );
1686 break;
1688 case Bytecodes::_arraylength: {
1689 // Must do null-check with value on expression stack
1690 Node *ary = null_check(peek(), T_ARRAY);
1691 // Compile-time detect of null-exception?
1692 if (stopped()) return;
1693 a = pop();
1694 push(load_array_length(a));
1695 break;
1696 }
1698 case Bytecodes::_baload: array_load(T_BYTE); break;
1699 case Bytecodes::_caload: array_load(T_CHAR); break;
1700 case Bytecodes::_iaload: array_load(T_INT); break;
1701 case Bytecodes::_saload: array_load(T_SHORT); break;
1702 case Bytecodes::_faload: array_load(T_FLOAT); break;
1703 case Bytecodes::_aaload: array_load(T_OBJECT); break;
1704 case Bytecodes::_laload: {
1705 a = array_addressing(T_LONG, 0);
1706 if (stopped()) return; // guaranteed null or range check
1707 dec_sp(2); // Pop array and index
1708 push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered));
1709 break;
1710 }
1711 case Bytecodes::_daload: {
1712 a = array_addressing(T_DOUBLE, 0);
1713 if (stopped()) return; // guaranteed null or range check
1714 dec_sp(2); // Pop array and index
1715 push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered));
1716 break;
1717 }
1718 case Bytecodes::_bastore: array_store(T_BYTE); break;
1719 case Bytecodes::_castore: array_store(T_CHAR); break;
1720 case Bytecodes::_iastore: array_store(T_INT); break;
1721 case Bytecodes::_sastore: array_store(T_SHORT); break;
1722 case Bytecodes::_fastore: array_store(T_FLOAT); break;
1723 case Bytecodes::_aastore: {
1724 d = array_addressing(T_OBJECT, 1);
1725 if (stopped()) return; // guaranteed null or range check
1726 array_store_check();
1727 c = pop(); // Oop to store
1728 b = pop(); // index (already used)
1729 a = pop(); // the array itself
1730 const TypeOopPtr* elemtype = _gvn.type(a)->is_aryptr()->elem()->make_oopptr();
1731 const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1732 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT, MemNode::release);
1733 break;
1734 }
1735 case Bytecodes::_lastore: {
1736 a = array_addressing(T_LONG, 2);
1737 if (stopped()) return; // guaranteed null or range check
1738 c = pop_pair();
1739 dec_sp(2); // Pop array and index
1740 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered);
1741 break;
1742 }
1743 case Bytecodes::_dastore: {
1744 a = array_addressing(T_DOUBLE, 2);
1745 if (stopped()) return; // guaranteed null or range check
1746 c = pop_pair();
1747 dec_sp(2); // Pop array and index
1748 c = dstore_rounding(c);
1749 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered);
1750 break;
1751 }
1752 case Bytecodes::_getfield:
1753 do_getfield();
1754 break;
1756 case Bytecodes::_getstatic:
1757 do_getstatic();
1758 break;
1760 case Bytecodes::_putfield:
1761 do_putfield();
1762 break;
1764 case Bytecodes::_putstatic:
1765 do_putstatic();
1766 break;
1768 case Bytecodes::_irem:
1769 do_irem();
1770 break;
1771 case Bytecodes::_idiv:
1772 // Must keep both values on the expression-stack during null-check
1773 zero_check_int(peek());
1774 // Compile-time detect of null-exception?
1775 if (stopped()) return;
1776 b = pop();
1777 a = pop();
1778 push( _gvn.transform( new (C) DivINode(control(),a,b) ) );
1779 break;
1780 case Bytecodes::_imul:
1781 b = pop(); a = pop();
1782 push( _gvn.transform( new (C) MulINode(a,b) ) );
1783 break;
1784 case Bytecodes::_iadd:
1785 b = pop(); a = pop();
1786 push( _gvn.transform( new (C) AddINode(a,b) ) );
1787 break;
1788 case Bytecodes::_ineg:
1789 a = pop();
1790 push( _gvn.transform( new (C) SubINode(_gvn.intcon(0),a)) );
1791 break;
1792 case Bytecodes::_isub:
1793 b = pop(); a = pop();
1794 push( _gvn.transform( new (C) SubINode(a,b) ) );
1795 break;
1796 case Bytecodes::_iand:
1797 b = pop(); a = pop();
1798 push( _gvn.transform( new (C) AndINode(a,b) ) );
1799 break;
1800 case Bytecodes::_ior:
1801 b = pop(); a = pop();
1802 push( _gvn.transform( new (C) OrINode(a,b) ) );
1803 break;
1804 case Bytecodes::_ixor:
1805 b = pop(); a = pop();
1806 push( _gvn.transform( new (C) XorINode(a,b) ) );
1807 break;
1808 case Bytecodes::_ishl:
1809 b = pop(); a = pop();
1810 push( _gvn.transform( new (C) LShiftINode(a,b) ) );
1811 break;
1812 case Bytecodes::_ishr:
1813 b = pop(); a = pop();
1814 push( _gvn.transform( new (C) RShiftINode(a,b) ) );
1815 break;
1816 case Bytecodes::_iushr:
1817 b = pop(); a = pop();
1818 push( _gvn.transform( new (C) URShiftINode(a,b) ) );
1819 break;
1821 case Bytecodes::_fneg:
1822 a = pop();
1823 b = _gvn.transform(new (C) NegFNode (a));
1824 push(b);
1825 break;
1827 case Bytecodes::_fsub:
1828 b = pop();
1829 a = pop();
1830 c = _gvn.transform( new (C) SubFNode(a,b) );
1831 d = precision_rounding(c);
1832 push( d );
1833 break;
1835 case Bytecodes::_fadd:
1836 b = pop();
1837 a = pop();
1838 c = _gvn.transform( new (C) AddFNode(a,b) );
1839 d = precision_rounding(c);
1840 push( d );
1841 break;
1843 case Bytecodes::_fmul:
1844 b = pop();
1845 a = pop();
1846 c = _gvn.transform( new (C) MulFNode(a,b) );
1847 d = precision_rounding(c);
1848 push( d );
1849 break;
1851 case Bytecodes::_fdiv:
1852 b = pop();
1853 a = pop();
1854 c = _gvn.transform( new (C) DivFNode(0,a,b) );
1855 d = precision_rounding(c);
1856 push( d );
1857 break;
1859 case Bytecodes::_frem:
1860 if (Matcher::has_match_rule(Op_ModF)) {
1861 // Generate a ModF node.
1862 b = pop();
1863 a = pop();
1864 c = _gvn.transform( new (C) ModFNode(0,a,b) );
1865 d = precision_rounding(c);
1866 push( d );
1867 }
1868 else {
1869 // Generate a call.
1870 modf();
1871 }
1872 break;
1874 case Bytecodes::_fcmpl:
1875 b = pop();
1876 a = pop();
1877 c = _gvn.transform( new (C) CmpF3Node( a, b));
1878 push(c);
1879 break;
1880 case Bytecodes::_fcmpg:
1881 b = pop();
1882 a = pop();
1884 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
1885 // which negates the result sign except for unordered. Flip the unordered
1886 // as well by using CmpF3 which implements unordered-lesser instead of
1887 // unordered-greater semantics. Finally, commute the result bits. Result
1888 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1889 c = _gvn.transform( new (C) CmpF3Node( b, a));
1890 c = _gvn.transform( new (C) SubINode(_gvn.intcon(0),c) );
1891 push(c);
1892 break;
1894 case Bytecodes::_f2i:
1895 a = pop();
1896 push(_gvn.transform(new (C) ConvF2INode(a)));
1897 break;
1899 case Bytecodes::_d2i:
1900 a = pop_pair();
1901 b = _gvn.transform(new (C) ConvD2INode(a));
1902 push( b );
1903 break;
1905 case Bytecodes::_f2d:
1906 a = pop();
1907 b = _gvn.transform( new (C) ConvF2DNode(a));
1908 push_pair( b );
1909 break;
1911 case Bytecodes::_d2f:
1912 a = pop_pair();
1913 b = _gvn.transform( new (C) ConvD2FNode(a));
1914 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1915 //b = _gvn.transform(new (C) RoundFloatNode(0, b) );
1916 push( b );
1917 break;
1919 case Bytecodes::_l2f:
1920 if (Matcher::convL2FSupported()) {
1921 a = pop_pair();
1922 b = _gvn.transform( new (C) ConvL2FNode(a));
1923 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1924 // Rather than storing the result into an FP register then pushing
1925 // out to memory to round, the machine instruction that implements
1926 // ConvL2D is responsible for rounding.
1927 // c = precision_rounding(b);
1928 c = _gvn.transform(b);
1929 push(c);
1930 } else {
1931 l2f();
1932 }
1933 break;
1935 case Bytecodes::_l2d:
1936 a = pop_pair();
1937 b = _gvn.transform( new (C) ConvL2DNode(a));
1938 // For i486.ad, rounding is always necessary (see _l2f above).
1939 // c = dprecision_rounding(b);
1940 c = _gvn.transform(b);
1941 push_pair(c);
1942 break;
1944 case Bytecodes::_f2l:
1945 a = pop();
1946 b = _gvn.transform( new (C) ConvF2LNode(a));
1947 push_pair(b);
1948 break;
1950 case Bytecodes::_d2l:
1951 a = pop_pair();
1952 b = _gvn.transform( new (C) ConvD2LNode(a));
1953 push_pair(b);
1954 break;
1956 case Bytecodes::_dsub:
1957 b = pop_pair();
1958 a = pop_pair();
1959 c = _gvn.transform( new (C) SubDNode(a,b) );
1960 d = dprecision_rounding(c);
1961 push_pair( d );
1962 break;
1964 case Bytecodes::_dadd:
1965 b = pop_pair();
1966 a = pop_pair();
1967 c = _gvn.transform( new (C) AddDNode(a,b) );
1968 d = dprecision_rounding(c);
1969 push_pair( d );
1970 break;
1972 case Bytecodes::_dmul:
1973 b = pop_pair();
1974 a = pop_pair();
1975 c = _gvn.transform( new (C) MulDNode(a,b) );
1976 d = dprecision_rounding(c);
1977 push_pair( d );
1978 break;
1980 case Bytecodes::_ddiv:
1981 b = pop_pair();
1982 a = pop_pair();
1983 c = _gvn.transform( new (C) DivDNode(0,a,b) );
1984 d = dprecision_rounding(c);
1985 push_pair( d );
1986 break;
1988 case Bytecodes::_dneg:
1989 a = pop_pair();
1990 b = _gvn.transform(new (C) NegDNode (a));
1991 push_pair(b);
1992 break;
1994 case Bytecodes::_drem:
1995 if (Matcher::has_match_rule(Op_ModD)) {
1996 // Generate a ModD node.
1997 b = pop_pair();
1998 a = pop_pair();
1999 // a % b
2001 c = _gvn.transform( new (C) ModDNode(0,a,b) );
2002 d = dprecision_rounding(c);
2003 push_pair( d );
2004 }
2005 else {
2006 // Generate a call.
2007 modd();
2008 }
2009 break;
2011 case Bytecodes::_dcmpl:
2012 b = pop_pair();
2013 a = pop_pair();
2014 c = _gvn.transform( new (C) CmpD3Node( a, b));
2015 push(c);
2016 break;
2018 case Bytecodes::_dcmpg:
2019 b = pop_pair();
2020 a = pop_pair();
2021 // Same as dcmpl but need to flip the unordered case.
2022 // Commute the inputs, which negates the result sign except for unordered.
2023 // Flip the unordered as well by using CmpD3 which implements
2024 // unordered-lesser instead of unordered-greater semantics.
2025 // Finally, negate the result bits. Result is same as using a
2026 // CmpD3Greater except we did it with CmpD3 alone.
2027 c = _gvn.transform( new (C) CmpD3Node( b, a));
2028 c = _gvn.transform( new (C) SubINode(_gvn.intcon(0),c) );
2029 push(c);
2030 break;
2033 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
2034 case Bytecodes::_land:
2035 b = pop_pair();
2036 a = pop_pair();
2037 c = _gvn.transform( new (C) AndLNode(a,b) );
2038 push_pair(c);
2039 break;
2040 case Bytecodes::_lor:
2041 b = pop_pair();
2042 a = pop_pair();
2043 c = _gvn.transform( new (C) OrLNode(a,b) );
2044 push_pair(c);
2045 break;
2046 case Bytecodes::_lxor:
2047 b = pop_pair();
2048 a = pop_pair();
2049 c = _gvn.transform( new (C) XorLNode(a,b) );
2050 push_pair(c);
2051 break;
2053 case Bytecodes::_lshl:
2054 b = pop(); // the shift count
2055 a = pop_pair(); // value to be shifted
2056 c = _gvn.transform( new (C) LShiftLNode(a,b) );
2057 push_pair(c);
2058 break;
2059 case Bytecodes::_lshr:
2060 b = pop(); // the shift count
2061 a = pop_pair(); // value to be shifted
2062 c = _gvn.transform( new (C) RShiftLNode(a,b) );
2063 push_pair(c);
2064 break;
2065 case Bytecodes::_lushr:
2066 b = pop(); // the shift count
2067 a = pop_pair(); // value to be shifted
2068 c = _gvn.transform( new (C) URShiftLNode(a,b) );
2069 push_pair(c);
2070 break;
2071 case Bytecodes::_lmul:
2072 b = pop_pair();
2073 a = pop_pair();
2074 c = _gvn.transform( new (C) MulLNode(a,b) );
2075 push_pair(c);
2076 break;
2078 case Bytecodes::_lrem:
2079 // Must keep both values on the expression-stack during null-check
2080 assert(peek(0) == top(), "long word order");
2081 zero_check_long(peek(1));
2082 // Compile-time detect of null-exception?
2083 if (stopped()) return;
2084 b = pop_pair();
2085 a = pop_pair();
2086 c = _gvn.transform( new (C) ModLNode(control(),a,b) );
2087 push_pair(c);
2088 break;
2090 case Bytecodes::_ldiv:
2091 // Must keep both values on the expression-stack during null-check
2092 assert(peek(0) == top(), "long word order");
2093 zero_check_long(peek(1));
2094 // Compile-time detect of null-exception?
2095 if (stopped()) return;
2096 b = pop_pair();
2097 a = pop_pair();
2098 c = _gvn.transform( new (C) DivLNode(control(),a,b) );
2099 push_pair(c);
2100 break;
2102 case Bytecodes::_ladd:
2103 b = pop_pair();
2104 a = pop_pair();
2105 c = _gvn.transform( new (C) AddLNode(a,b) );
2106 push_pair(c);
2107 break;
2108 case Bytecodes::_lsub:
2109 b = pop_pair();
2110 a = pop_pair();
2111 c = _gvn.transform( new (C) SubLNode(a,b) );
2112 push_pair(c);
2113 break;
2114 case Bytecodes::_lcmp:
2115 // Safepoints are now inserted _before_ branches. The long-compare
2116 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
2117 // slew of control flow. These are usually followed by a CmpI vs zero and
2118 // a branch; this pattern then optimizes to the obvious long-compare and
2119 // branch. However, if the branch is backwards there's a Safepoint
2120 // inserted. The inserted Safepoint captures the JVM state at the
2121 // pre-branch point, i.e. it captures the 3-way value. Thus if a
2122 // long-compare is used to control a loop the debug info will force
2123 // computation of the 3-way value, even though the generated code uses a
2124 // long-compare and branch. We try to rectify the situation by inserting
2125 // a SafePoint here and have it dominate and kill the safepoint added at a
2126 // following backwards branch. At this point the JVM state merely holds 2
2127 // longs but not the 3-way value.
2128 if( UseLoopSafepoints ) {
2129 switch( iter().next_bc() ) {
2130 case Bytecodes::_ifgt:
2131 case Bytecodes::_iflt:
2132 case Bytecodes::_ifge:
2133 case Bytecodes::_ifle:
2134 case Bytecodes::_ifne:
2135 case Bytecodes::_ifeq:
2136 // If this is a backwards branch in the bytecodes, add Safepoint
2137 maybe_add_safepoint(iter().next_get_dest());
2138 }
2139 }
2140 b = pop_pair();
2141 a = pop_pair();
2142 c = _gvn.transform( new (C) CmpL3Node( a, b ));
2143 push(c);
2144 break;
2146 case Bytecodes::_lneg:
2147 a = pop_pair();
2148 b = _gvn.transform( new (C) SubLNode(longcon(0),a));
2149 push_pair(b);
2150 break;
2151 case Bytecodes::_l2i:
2152 a = pop_pair();
2153 push( _gvn.transform( new (C) ConvL2INode(a)));
2154 break;
2155 case Bytecodes::_i2l:
2156 a = pop();
2157 b = _gvn.transform( new (C) ConvI2LNode(a));
2158 push_pair(b);
2159 break;
2160 case Bytecodes::_i2b:
2161 // Sign extend
2162 a = pop();
2163 a = _gvn.transform( new (C) LShiftINode(a,_gvn.intcon(24)) );
2164 a = _gvn.transform( new (C) RShiftINode(a,_gvn.intcon(24)) );
2165 push( a );
2166 break;
2167 case Bytecodes::_i2s:
2168 a = pop();
2169 a = _gvn.transform( new (C) LShiftINode(a,_gvn.intcon(16)) );
2170 a = _gvn.transform( new (C) RShiftINode(a,_gvn.intcon(16)) );
2171 push( a );
2172 break;
2173 case Bytecodes::_i2c:
2174 a = pop();
2175 push( _gvn.transform( new (C) AndINode(a,_gvn.intcon(0xFFFF)) ) );
2176 break;
2178 case Bytecodes::_i2f:
2179 a = pop();
2180 b = _gvn.transform( new (C) ConvI2FNode(a) ) ;
2181 c = precision_rounding(b);
2182 push (b);
2183 break;
2185 case Bytecodes::_i2d:
2186 a = pop();
2187 b = _gvn.transform( new (C) ConvI2DNode(a));
2188 push_pair(b);
2189 break;
2191 case Bytecodes::_iinc: // Increment local
2192 i = iter().get_index(); // Get local index
2193 set_local( i, _gvn.transform( new (C) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2194 break;
2196 // Exit points of synchronized methods must have an unlock node
2197 case Bytecodes::_return:
2198 return_current(NULL);
2199 break;
2201 case Bytecodes::_ireturn:
2202 case Bytecodes::_areturn:
2203 case Bytecodes::_freturn:
2204 return_current(pop());
2205 break;
2206 case Bytecodes::_lreturn:
2207 return_current(pop_pair());
2208 break;
2209 case Bytecodes::_dreturn:
2210 return_current(pop_pair());
2211 break;
2213 case Bytecodes::_athrow:
2214 // null exception oop throws NULL pointer exception
2215 null_check(peek());
2216 if (stopped()) return;
2217 // Hook the thrown exception directly to subsequent handlers.
2218 if (BailoutToInterpreterForThrows) {
2219 // Keep method interpreted from now on.
2220 uncommon_trap(Deoptimization::Reason_unhandled,
2221 Deoptimization::Action_make_not_compilable);
2222 return;
2223 }
2224 if (env()->jvmti_can_post_on_exceptions()) {
2225 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
2226 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
2227 }
2228 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
2229 add_exception_state(make_exception_state(peek()));
2230 break;
2232 case Bytecodes::_goto: // fall through
2233 case Bytecodes::_goto_w: {
2234 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2236 // If this is a backwards branch in the bytecodes, add Safepoint
2237 maybe_add_safepoint(target_bci);
2239 // Update method data
2240 profile_taken_branch(target_bci);
2242 // Merge the current control into the target basic block
2243 merge(target_bci);
2245 // See if we can get some profile data and hand it off to the next block
2246 Block *target_block = block()->successor_for_bci(target_bci);
2247 if (target_block->pred_count() != 1) break;
2248 ciMethodData* methodData = method()->method_data();
2249 if (!methodData->is_mature()) break;
2250 ciProfileData* data = methodData->bci_to_data(bci());
2251 assert( data->is_JumpData(), "" );
2252 int taken = ((ciJumpData*)data)->taken();
2253 taken = method()->scale_count(taken);
2254 target_block->set_count(taken);
2255 break;
2256 }
2258 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
2259 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2260 handle_if_null:
2261 // If this is a backwards branch in the bytecodes, add Safepoint
2262 maybe_add_safepoint(iter().get_dest());
2263 a = null();
2264 b = pop();
2265 c = _gvn.transform( new (C) CmpPNode(b, a) );
2266 do_ifnull(btest, c);
2267 break;
2269 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2270 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2271 handle_if_acmp:
2272 // If this is a backwards branch in the bytecodes, add Safepoint
2273 maybe_add_safepoint(iter().get_dest());
2274 a = pop();
2275 b = pop();
2276 c = _gvn.transform( new (C) CmpPNode(b, a) );
2277 c = optimize_cmp_with_klass(c);
2278 do_if(btest, c);
2279 break;
2281 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2282 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2283 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2284 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2285 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2286 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2287 handle_ifxx:
2288 // If this is a backwards branch in the bytecodes, add Safepoint
2289 maybe_add_safepoint(iter().get_dest());
2290 a = _gvn.intcon(0);
2291 b = pop();
2292 c = _gvn.transform( new (C) CmpINode(b, a) );
2293 do_if(btest, c);
2294 break;
2296 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2297 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2298 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2299 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2300 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2301 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2302 handle_if_icmp:
2303 // If this is a backwards branch in the bytecodes, add Safepoint
2304 maybe_add_safepoint(iter().get_dest());
2305 a = pop();
2306 b = pop();
2307 c = _gvn.transform( new (C) CmpINode( b, a ) );
2308 do_if(btest, c);
2309 break;
2311 case Bytecodes::_tableswitch:
2312 do_tableswitch();
2313 break;
2315 case Bytecodes::_lookupswitch:
2316 do_lookupswitch();
2317 break;
2319 case Bytecodes::_invokestatic:
2320 case Bytecodes::_invokedynamic:
2321 case Bytecodes::_invokespecial:
2322 case Bytecodes::_invokevirtual:
2323 case Bytecodes::_invokeinterface:
2324 do_call();
2325 break;
2326 case Bytecodes::_checkcast:
2327 do_checkcast();
2328 break;
2329 case Bytecodes::_instanceof:
2330 do_instanceof();
2331 break;
2332 case Bytecodes::_anewarray:
2333 do_anewarray();
2334 break;
2335 case Bytecodes::_newarray:
2336 do_newarray((BasicType)iter().get_index());
2337 break;
2338 case Bytecodes::_multianewarray:
2339 do_multianewarray();
2340 break;
2341 case Bytecodes::_new:
2342 do_new();
2343 break;
2345 case Bytecodes::_jsr:
2346 case Bytecodes::_jsr_w:
2347 do_jsr();
2348 break;
2350 case Bytecodes::_ret:
2351 do_ret();
2352 break;
2355 case Bytecodes::_monitorenter:
2356 do_monitor_enter();
2357 break;
2359 case Bytecodes::_monitorexit:
2360 do_monitor_exit();
2361 break;
2363 case Bytecodes::_breakpoint:
2364 // Breakpoint set concurrently to compile
2365 // %%% use an uncommon trap?
2366 C->record_failure("breakpoint in method");
2367 return;
2369 default:
2370 #ifndef PRODUCT
2371 map()->dump(99);
2372 #endif
2373 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2374 ShouldNotReachHere();
2375 }
2377 #ifndef PRODUCT
2378 IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2379 if(printer) {
2380 char buffer[256];
2381 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2382 bool old = printer->traverse_outs();
2383 printer->set_traverse_outs(true);
2384 printer->print_method(C, buffer, 4);
2385 printer->set_traverse_outs(old);
2386 }
2387 #endif
2388 }