Thu, 24 Apr 2008 14:02:13 -0700
6646019: array subscript expressions become top() with -d64
Summary: stop compilation after negative array allocation
Reviewed-by: never, jrose
1 /*
2 * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_parse2.cpp.incl"
28 extern int explicit_null_checks_inserted,
29 explicit_null_checks_elided;
31 //---------------------------------array_load----------------------------------
32 void Parse::array_load(BasicType elem_type) {
33 const Type* elem = Type::TOP;
34 Node* adr = array_addressing(elem_type, 0, &elem);
35 if (stopped()) return; // guarenteed null or range check
36 _sp -= 2; // Pop array and index
37 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
38 Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
39 push(ld);
40 }
43 //--------------------------------array_store----------------------------------
44 void Parse::array_store(BasicType elem_type) {
45 Node* adr = array_addressing(elem_type, 1);
46 if (stopped()) return; // guarenteed null or range check
47 Node* val = pop();
48 _sp -= 2; // Pop array and index
49 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
50 store_to_memory(control(), adr, val, elem_type, adr_type);
51 }
54 //------------------------------array_addressing-------------------------------
55 // Pull array and index from the stack. Compute pointer-to-element.
56 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
57 Node *idx = peek(0+vals); // Get from stack without popping
58 Node *ary = peek(1+vals); // in case of exception
60 // Null check the array base, with correct stack contents
61 ary = do_null_check(ary, T_ARRAY);
62 // Compile-time detect of null-exception?
63 if (stopped()) return top();
65 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
66 const TypeInt* sizetype = arytype->size();
67 const Type* elemtype = arytype->elem();
69 if (UseUniqueSubclasses && result2 != NULL) {
70 const Type* el = elemtype;
71 if (elemtype->isa_narrowoop()) {
72 el = elemtype->is_narrowoop()->make_oopptr();
73 }
74 const TypeInstPtr* toop = el->isa_instptr();
75 if (toop) {
76 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
77 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
78 const Type* subklass = Type::get_const_type(toop->klass());
79 elemtype = subklass->join(el);
80 }
81 }
82 }
84 // Check for big class initializers with all constant offsets
85 // feeding into a known-size array.
86 const TypeInt* idxtype = _gvn.type(idx)->is_int();
87 // See if the highest idx value is less than the lowest array bound,
88 // and if the idx value cannot be negative:
89 bool need_range_check = true;
90 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
91 need_range_check = false;
92 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
93 }
95 if (!arytype->klass()->is_loaded()) {
96 // Only fails for some -Xcomp runs
97 // The class is unloaded. We have to run this bytecode in the interpreter.
98 uncommon_trap(Deoptimization::Reason_unloaded,
99 Deoptimization::Action_reinterpret,
100 arytype->klass(), "!loaded array");
101 return top();
102 }
104 // Do the range check
105 if (GenerateRangeChecks && need_range_check) {
106 // Range is constant in array-oop, so we can use the original state of mem
107 Node* len = load_array_length(ary);
108 Node* tst;
109 if (sizetype->_hi <= 0) {
110 // If the greatest array bound is negative, we can conclude that we're
111 // compiling unreachable code, but the unsigned compare trick used below
112 // only works with non-negative lengths. Instead, hack "tst" to be zero so
113 // the uncommon_trap path will always be taken.
114 tst = _gvn.intcon(0);
115 } else {
116 // Test length vs index (standard trick using unsigned compare)
117 Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
118 BoolTest::mask btest = BoolTest::lt;
119 tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) );
120 }
121 // Branch to failure if out of bounds
122 { BuildCutout unless(this, tst, PROB_MAX);
123 if (C->allow_range_check_smearing()) {
124 // Do not use builtin_throw, since range checks are sometimes
125 // made more stringent by an optimistic transformation.
126 // This creates "tentative" range checks at this point,
127 // which are not guaranteed to throw exceptions.
128 // See IfNode::Ideal, is_range_check, adjust_check.
129 uncommon_trap(Deoptimization::Reason_range_check,
130 Deoptimization::Action_make_not_entrant,
131 NULL, "range_check");
132 } else {
133 // If we have already recompiled with the range-check-widening
134 // heroic optimization turned off, then we must really be throwing
135 // range check exceptions.
136 builtin_throw(Deoptimization::Reason_range_check, idx);
137 }
138 }
139 }
140 // Check for always knowing you are throwing a range-check exception
141 if (stopped()) return top();
143 Node* ptr = array_element_address( ary, idx, type, sizetype);
145 if (result2 != NULL) *result2 = elemtype;
146 return ptr;
147 }
150 // returns IfNode
151 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
152 Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
153 Node *tst = _gvn.transform( new (C, 2) BoolNode( cmp, mask));
154 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
155 return iff;
156 }
158 // return Region node
159 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
160 Node *region = new (C, 3) RegionNode(3); // 2 results
161 record_for_igvn(region);
162 region->init_req(1, iffalse);
163 region->init_req(2, iftrue );
164 _gvn.set_type(region, Type::CONTROL);
165 region = _gvn.transform(region);
166 set_control (region);
167 return region;
168 }
171 //------------------------------helper for tableswitch-------------------------
172 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
173 // True branch, use existing map info
174 { PreserveJVMState pjvms(this);
175 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
176 set_control( iftrue );
177 profile_switch_case(prof_table_index);
178 merge_new_path(dest_bci_if_true);
179 }
181 // False branch
182 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
183 set_control( iffalse );
184 }
186 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
187 // True branch, use existing map info
188 { PreserveJVMState pjvms(this);
189 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode (iff) );
190 set_control( iffalse );
191 profile_switch_case(prof_table_index);
192 merge_new_path(dest_bci_if_true);
193 }
195 // False branch
196 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff) );
197 set_control( iftrue );
198 }
200 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
201 // False branch, use existing map and control()
202 profile_switch_case(prof_table_index);
203 merge_new_path(dest_bci);
204 }
207 extern "C" {
208 static int jint_cmp(const void *i, const void *j) {
209 int a = *(jint *)i;
210 int b = *(jint *)j;
211 return a > b ? 1 : a < b ? -1 : 0;
212 }
213 }
216 // Default value for methodData switch indexing. Must be a negative value to avoid
217 // conflict with any legal switch index.
218 #define NullTableIndex -1
220 class SwitchRange : public StackObj {
221 // a range of integers coupled with a bci destination
222 jint _lo; // inclusive lower limit
223 jint _hi; // inclusive upper limit
224 int _dest;
225 int _table_index; // index into method data table
227 public:
228 jint lo() const { return _lo; }
229 jint hi() const { return _hi; }
230 int dest() const { return _dest; }
231 int table_index() const { return _table_index; }
232 bool is_singleton() const { return _lo == _hi; }
234 void setRange(jint lo, jint hi, int dest, int table_index) {
235 assert(lo <= hi, "must be a non-empty range");
236 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
237 }
238 bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
239 assert(lo <= hi, "must be a non-empty range");
240 if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
241 _hi = hi;
242 return true;
243 }
244 return false;
245 }
247 void set (jint value, int dest, int table_index) {
248 setRange(value, value, dest, table_index);
249 }
250 bool adjoin(jint value, int dest, int table_index) {
251 return adjoinRange(value, value, dest, table_index);
252 }
254 void print(ciEnv* env) {
255 if (is_singleton())
256 tty->print(" {%d}=>%d", lo(), dest());
257 else if (lo() == min_jint)
258 tty->print(" {..%d}=>%d", hi(), dest());
259 else if (hi() == max_jint)
260 tty->print(" {%d..}=>%d", lo(), dest());
261 else
262 tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
263 }
264 };
267 //-------------------------------do_tableswitch--------------------------------
268 void Parse::do_tableswitch() {
269 Node* lookup = pop();
271 // Get information about tableswitch
272 int default_dest = iter().get_dest_table(0);
273 int lo_index = iter().get_int_table(1);
274 int hi_index = iter().get_int_table(2);
275 int len = hi_index - lo_index + 1;
277 if (len < 1) {
278 // If this is a backward branch, add safepoint
279 maybe_add_safepoint(default_dest);
280 merge(default_dest);
281 return;
282 }
284 // generate decision tree, using trichotomy when possible
285 int rnum = len+2;
286 bool makes_backward_branch = false;
287 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
288 int rp = -1;
289 if (lo_index != min_jint) {
290 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
291 }
292 for (int j = 0; j < len; j++) {
293 jint match_int = lo_index+j;
294 int dest = iter().get_dest_table(j+3);
295 makes_backward_branch |= (dest <= bci());
296 int table_index = method_data_update() ? j : NullTableIndex;
297 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
298 ranges[++rp].set(match_int, dest, table_index);
299 }
300 }
301 jint highest = lo_index+(len-1);
302 assert(ranges[rp].hi() == highest, "");
303 if (highest != max_jint
304 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
305 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
306 }
307 assert(rp < len+2, "not too many ranges");
309 // Safepoint in case if backward branch observed
310 if( makes_backward_branch && UseLoopSafepoints )
311 add_safepoint();
313 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
314 }
317 //------------------------------do_lookupswitch--------------------------------
318 void Parse::do_lookupswitch() {
319 Node *lookup = pop(); // lookup value
320 // Get information about lookupswitch
321 int default_dest = iter().get_dest_table(0);
322 int len = iter().get_int_table(1);
324 if (len < 1) { // If this is a backward branch, add safepoint
325 maybe_add_safepoint(default_dest);
326 merge(default_dest);
327 return;
328 }
330 // generate decision tree, using trichotomy when possible
331 jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
332 {
333 for( int j = 0; j < len; j++ ) {
334 table[j+j+0] = iter().get_int_table(2+j+j);
335 table[j+j+1] = iter().get_dest_table(2+j+j+1);
336 }
337 qsort( table, len, 2*sizeof(table[0]), jint_cmp );
338 }
340 int rnum = len*2+1;
341 bool makes_backward_branch = false;
342 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
343 int rp = -1;
344 for( int j = 0; j < len; j++ ) {
345 jint match_int = table[j+j+0];
346 int dest = table[j+j+1];
347 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
348 int table_index = method_data_update() ? j : NullTableIndex;
349 makes_backward_branch |= (dest <= bci());
350 if( match_int != next_lo ) {
351 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
352 }
353 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
354 ranges[++rp].set(match_int, dest, table_index);
355 }
356 }
357 jint highest = table[2*(len-1)];
358 assert(ranges[rp].hi() == highest, "");
359 if( highest != max_jint
360 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
361 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
362 }
363 assert(rp < rnum, "not too many ranges");
365 // Safepoint in case backward branch observed
366 if( makes_backward_branch && UseLoopSafepoints )
367 add_safepoint();
369 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
370 }
372 //----------------------------create_jump_tables-------------------------------
373 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
374 // Are jumptables enabled
375 if (!UseJumpTables) return false;
377 // Are jumptables supported
378 if (!Matcher::has_match_rule(Op_Jump)) return false;
380 // Don't make jump table if profiling
381 if (method_data_update()) return false;
383 // Decide if a guard is needed to lop off big ranges at either (or
384 // both) end(s) of the input set. We'll call this the default target
385 // even though we can't be sure that it is the true "default".
387 bool needs_guard = false;
388 int default_dest;
389 int64 total_outlier_size = 0;
390 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
391 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
393 if (lo->dest() == hi->dest()) {
394 total_outlier_size = hi_size + lo_size;
395 default_dest = lo->dest();
396 } else if (lo_size > hi_size) {
397 total_outlier_size = lo_size;
398 default_dest = lo->dest();
399 } else {
400 total_outlier_size = hi_size;
401 default_dest = hi->dest();
402 }
404 // If a guard test will eliminate very sparse end ranges, then
405 // it is worth the cost of an extra jump.
406 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
407 needs_guard = true;
408 if (default_dest == lo->dest()) lo++;
409 if (default_dest == hi->dest()) hi--;
410 }
412 // Find the total number of cases and ranges
413 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
414 int num_range = hi - lo + 1;
416 // Don't create table if: too large, too small, or too sparse.
417 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
418 return false;
419 if (num_cases > (MaxJumpTableSparseness * num_range))
420 return false;
422 // Normalize table lookups to zero
423 int lowval = lo->lo();
424 key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) );
426 // Generate a guard to protect against input keyvals that aren't
427 // in the switch domain.
428 if (needs_guard) {
429 Node* size = _gvn.intcon(num_cases);
430 Node* cmp = _gvn.transform( new (C, 3) CmpUNode(key_val, size) );
431 Node* tst = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ge) );
432 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
433 jump_if_true_fork(iff, default_dest, NullTableIndex);
434 }
436 // Create an ideal node JumpTable that has projections
437 // of all possible ranges for a switch statement
438 // The key_val input must be converted to a pointer offset and scaled.
439 // Compare Parse::array_addressing above.
440 #ifdef _LP64
441 // Clean the 32-bit int into a real 64-bit offset.
442 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
443 const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
444 key_val = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) );
445 #endif
446 // Shift the value by wordsize so we have an index into the table, rather
447 // than a switch value
448 Node *shiftWord = _gvn.MakeConX(wordSize);
449 key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord));
451 // Create the JumpNode
452 Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) );
454 // These are the switch destinations hanging off the jumpnode
455 int i = 0;
456 for (SwitchRange* r = lo; r <= hi; r++) {
457 for (int j = r->lo(); j <= r->hi(); j++, i++) {
458 Node* input = _gvn.transform(new (C, 1) JumpProjNode(jtn, i, r->dest(), j - lowval));
459 {
460 PreserveJVMState pjvms(this);
461 set_control(input);
462 jump_if_always_fork(r->dest(), r->table_index());
463 }
464 }
465 }
466 assert(i == num_cases, "miscount of cases");
467 stop_and_kill_map(); // no more uses for this JVMS
468 return true;
469 }
471 //----------------------------jump_switch_ranges-------------------------------
472 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
473 Block* switch_block = block();
475 if (switch_depth == 0) {
476 // Do special processing for the top-level call.
477 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
478 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
480 // Decrement pred-numbers for the unique set of nodes.
481 #ifdef ASSERT
482 // Ensure that the block's successors are a (duplicate-free) set.
483 int successors_counted = 0; // block occurrences in [hi..lo]
484 int unique_successors = switch_block->num_successors();
485 for (int i = 0; i < unique_successors; i++) {
486 Block* target = switch_block->successor_at(i);
488 // Check that the set of successors is the same in both places.
489 int successors_found = 0;
490 for (SwitchRange* p = lo; p <= hi; p++) {
491 if (p->dest() == target->start()) successors_found++;
492 }
493 assert(successors_found > 0, "successor must be known");
494 successors_counted += successors_found;
495 }
496 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
497 #endif
499 // Maybe prune the inputs, based on the type of key_val.
500 jint min_val = min_jint;
501 jint max_val = max_jint;
502 const TypeInt* ti = key_val->bottom_type()->isa_int();
503 if (ti != NULL) {
504 min_val = ti->_lo;
505 max_val = ti->_hi;
506 assert(min_val <= max_val, "invalid int type");
507 }
508 while (lo->hi() < min_val) lo++;
509 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
510 while (hi->lo() > max_val) hi--;
511 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
512 }
514 #ifndef PRODUCT
515 if (switch_depth == 0) {
516 _max_switch_depth = 0;
517 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
518 }
519 #endif
521 assert(lo <= hi, "must be a non-empty set of ranges");
522 if (lo == hi) {
523 jump_if_always_fork(lo->dest(), lo->table_index());
524 } else {
525 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
526 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
528 if (create_jump_tables(key_val, lo, hi)) return;
530 int nr = hi - lo + 1;
532 SwitchRange* mid = lo + nr/2;
533 // if there is an easy choice, pivot at a singleton:
534 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
536 assert(lo < mid && mid <= hi, "good pivot choice");
537 assert(nr != 2 || mid == hi, "should pick higher of 2");
538 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
540 Node *test_val = _gvn.intcon(mid->lo());
542 if (mid->is_singleton()) {
543 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
544 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
546 // Special Case: If there are exactly three ranges, and the high
547 // and low range each go to the same place, omit the "gt" test,
548 // since it will not discriminate anything.
549 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
550 if (eq_test_only) {
551 assert(mid == hi-1, "");
552 }
554 // if there is a higher range, test for it and process it:
555 if (mid < hi && !eq_test_only) {
556 // two comparisons of same values--should enable 1 test for 2 branches
557 // Use BoolTest::le instead of BoolTest::gt
558 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le);
559 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_le) );
560 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_le) );
561 { PreserveJVMState pjvms(this);
562 set_control(iffalse);
563 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
564 }
565 set_control(iftrue);
566 }
568 } else {
569 // mid is a range, not a singleton, so treat mid..hi as a unit
570 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
572 // if there is a higher range, test for it and process it:
573 if (mid == hi) {
574 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
575 } else {
576 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_ge) );
577 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_ge) );
578 { PreserveJVMState pjvms(this);
579 set_control(iftrue);
580 jump_switch_ranges(key_val, mid, hi, switch_depth+1);
581 }
582 set_control(iffalse);
583 }
584 }
586 // in any case, process the lower range
587 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
588 }
590 // Decrease pred_count for each successor after all is done.
591 if (switch_depth == 0) {
592 int unique_successors = switch_block->num_successors();
593 for (int i = 0; i < unique_successors; i++) {
594 Block* target = switch_block->successor_at(i);
595 // Throw away the pre-allocated path for each unique successor.
596 target->next_path_num();
597 }
598 }
600 #ifndef PRODUCT
601 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
602 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
603 SwitchRange* r;
604 int nsing = 0;
605 for( r = lo; r <= hi; r++ ) {
606 if( r->is_singleton() ) nsing++;
607 }
608 tty->print(">>> ");
609 _method->print_short_name();
610 tty->print_cr(" switch decision tree");
611 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
612 hi-lo+1, nsing, _max_switch_depth, _est_switch_depth);
613 if (_max_switch_depth > _est_switch_depth) {
614 tty->print_cr("******** BAD SWITCH DEPTH ********");
615 }
616 tty->print(" ");
617 for( r = lo; r <= hi; r++ ) {
618 r->print(env());
619 }
620 tty->print_cr("");
621 }
622 #endif
623 }
625 void Parse::modf() {
626 Node *f2 = pop();
627 Node *f1 = pop();
628 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
629 CAST_FROM_FN_PTR(address, SharedRuntime::frem),
630 "frem", NULL, //no memory effects
631 f1, f2);
632 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
634 push(res);
635 }
637 void Parse::modd() {
638 Node *d2 = pop_pair();
639 Node *d1 = pop_pair();
640 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
641 CAST_FROM_FN_PTR(address, SharedRuntime::drem),
642 "drem", NULL, //no memory effects
643 d1, top(), d2, top());
644 Node* res_d = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
646 #ifdef ASSERT
647 Node* res_top = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 1));
648 assert(res_top == top(), "second value must be top");
649 #endif
651 push_pair(res_d);
652 }
654 void Parse::l2f() {
655 Node* f2 = pop();
656 Node* f1 = pop();
657 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
658 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
659 "l2f", NULL, //no memory effects
660 f1, f2);
661 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
663 push(res);
664 }
666 void Parse::do_irem() {
667 // Must keep both values on the expression-stack during null-check
668 do_null_check(peek(), T_INT);
669 // Compile-time detect of null-exception?
670 if (stopped()) return;
672 Node* b = pop();
673 Node* a = pop();
675 const Type *t = _gvn.type(b);
676 if (t != Type::TOP) {
677 const TypeInt *ti = t->is_int();
678 if (ti->is_con()) {
679 int divisor = ti->get_con();
680 // check for positive power of 2
681 if (divisor > 0 &&
682 (divisor & ~(divisor-1)) == divisor) {
683 // yes !
684 Node *mask = _gvn.intcon((divisor - 1));
685 // Sigh, must handle negative dividends
686 Node *zero = _gvn.intcon(0);
687 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
688 Node *iff = _gvn.transform( new (C, 1) IfFalseNode(ifff) );
689 Node *ift = _gvn.transform( new (C, 1) IfTrueNode (ifff) );
690 Node *reg = jump_if_join(ift, iff);
691 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
692 // Negative path; negate/and/negate
693 Node *neg = _gvn.transform( new (C, 3) SubINode(zero, a) );
694 Node *andn= _gvn.transform( new (C, 3) AndINode(neg, mask) );
695 Node *negn= _gvn.transform( new (C, 3) SubINode(zero, andn) );
696 phi->init_req(1, negn);
697 // Fast positive case
698 Node *andx = _gvn.transform( new (C, 3) AndINode(a, mask) );
699 phi->init_req(2, andx);
700 // Push the merge
701 push( _gvn.transform(phi) );
702 return;
703 }
704 }
705 }
706 // Default case
707 push( _gvn.transform( new (C, 3) ModINode(control(),a,b) ) );
708 }
710 // Handle jsr and jsr_w bytecode
711 void Parse::do_jsr() {
712 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
714 // Store information about current state, tagged with new _jsr_bci
715 int return_bci = iter().next_bci();
716 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
718 // Update method data
719 profile_taken_branch(jsr_bci);
721 // The way we do things now, there is only one successor block
722 // for the jsr, because the target code is cloned by ciTypeFlow.
723 Block* target = successor_for_bci(jsr_bci);
725 // What got pushed?
726 const Type* ret_addr = target->peek();
727 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
729 // Effect on jsr on stack
730 push(_gvn.makecon(ret_addr));
732 // Flow to the jsr.
733 merge(jsr_bci);
734 }
736 // Handle ret bytecode
737 void Parse::do_ret() {
738 // Find to whom we return.
739 #if 0 // %%%% MAKE THIS WORK
740 Node* con = local();
741 const TypePtr* tp = con->bottom_type()->isa_ptr();
742 assert(tp && tp->singleton(), "");
743 int return_bci = (int) tp->get_con();
744 merge(return_bci);
745 #else
746 assert(block()->num_successors() == 1, "a ret can only go one place now");
747 Block* target = block()->successor_at(0);
748 assert(!target->is_ready(), "our arrival must be expected");
749 profile_ret(target->flow()->start());
750 int pnum = target->next_path_num();
751 merge_common(target, pnum);
752 #endif
753 }
755 //--------------------------dynamic_branch_prediction--------------------------
756 // Try to gather dynamic branch prediction behavior. Return a probability
757 // of the branch being taken and set the "cnt" field. Returns a -1.0
758 // if we need to use static prediction for some reason.
759 float Parse::dynamic_branch_prediction(float &cnt) {
760 ResourceMark rm;
762 cnt = COUNT_UNKNOWN;
764 // Use MethodData information if it is available
765 // FIXME: free the ProfileData structure
766 ciMethodData* methodData = method()->method_data();
767 if (!methodData->is_mature()) return PROB_UNKNOWN;
768 ciProfileData* data = methodData->bci_to_data(bci());
769 if (!data->is_JumpData()) return PROB_UNKNOWN;
771 // get taken and not taken values
772 int taken = data->as_JumpData()->taken();
773 int not_taken = 0;
774 if (data->is_BranchData()) {
775 not_taken = data->as_BranchData()->not_taken();
776 }
778 // scale the counts to be commensurate with invocation counts:
779 taken = method()->scale_count(taken);
780 not_taken = method()->scale_count(not_taken);
782 // Give up if too few counts to be meaningful
783 if (taken + not_taken < 40) {
784 if (C->log() != NULL) {
785 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
786 }
787 return PROB_UNKNOWN;
788 }
790 // Compute frequency that we arrive here
791 int sum = taken + not_taken;
792 // Adjust, if this block is a cloned private block but the
793 // Jump counts are shared. Taken the private counts for
794 // just this path instead of the shared counts.
795 if( block()->count() > 0 )
796 sum = block()->count();
797 cnt = (float)sum / (float)FreqCountInvocations;
799 // Pin probability to sane limits
800 float prob;
801 if( !taken )
802 prob = (0+PROB_MIN) / 2;
803 else if( !not_taken )
804 prob = (1+PROB_MAX) / 2;
805 else { // Compute probability of true path
806 prob = (float)taken / (float)(taken + not_taken);
807 if (prob > PROB_MAX) prob = PROB_MAX;
808 if (prob < PROB_MIN) prob = PROB_MIN;
809 }
811 assert((cnt > 0.0f) && (prob > 0.0f),
812 "Bad frequency assignment in if");
814 if (C->log() != NULL) {
815 const char* prob_str = NULL;
816 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
817 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
818 char prob_str_buf[30];
819 if (prob_str == NULL) {
820 sprintf(prob_str_buf, "%g", prob);
821 prob_str = prob_str_buf;
822 }
823 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
824 iter().get_dest(), taken, not_taken, cnt, prob_str);
825 }
826 return prob;
827 }
829 //-----------------------------branch_prediction-------------------------------
830 float Parse::branch_prediction(float& cnt,
831 BoolTest::mask btest,
832 int target_bci) {
833 float prob = dynamic_branch_prediction(cnt);
834 // If prob is unknown, switch to static prediction
835 if (prob != PROB_UNKNOWN) return prob;
837 prob = PROB_FAIR; // Set default value
838 if (btest == BoolTest::eq) // Exactly equal test?
839 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
840 else if (btest == BoolTest::ne)
841 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
843 // If this is a conditional test guarding a backwards branch,
844 // assume its a loop-back edge. Make it a likely taken branch.
845 if (target_bci < bci()) {
846 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
847 // Since it's an OSR, we probably have profile data, but since
848 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
849 // Let's make a special check here for completely zero counts.
850 ciMethodData* methodData = method()->method_data();
851 if (!methodData->is_empty()) {
852 ciProfileData* data = methodData->bci_to_data(bci());
853 // Only stop for truly zero counts, which mean an unknown part
854 // of the OSR-ed method, and we want to deopt to gather more stats.
855 // If you have ANY counts, then this loop is simply 'cold' relative
856 // to the OSR loop.
857 if (data->as_BranchData()->taken() +
858 data->as_BranchData()->not_taken() == 0 ) {
859 // This is the only way to return PROB_UNKNOWN:
860 return PROB_UNKNOWN;
861 }
862 }
863 }
864 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
865 }
867 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
868 return prob;
869 }
871 // The magic constants are chosen so as to match the output of
872 // branch_prediction() when the profile reports a zero taken count.
873 // It is important to distinguish zero counts unambiguously, because
874 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
875 // very small but nonzero probabilities, which if confused with zero
876 // counts would keep the program recompiling indefinitely.
877 bool Parse::seems_never_taken(float prob) {
878 return prob < PROB_MIN;
879 }
881 inline void Parse::repush_if_args() {
882 #ifndef PRODUCT
883 if (PrintOpto && WizardMode) {
884 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
885 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
886 method()->print_name(); tty->cr();
887 }
888 #endif
889 int bc_depth = - Bytecodes::depth(iter().cur_bc());
890 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
891 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
892 assert(argument(0) != NULL, "must exist");
893 assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
894 _sp += bc_depth;
895 }
897 //----------------------------------do_ifnull----------------------------------
898 void Parse::do_ifnull(BoolTest::mask btest) {
899 int target_bci = iter().get_dest();
901 Block* branch_block = successor_for_bci(target_bci);
902 Block* next_block = successor_for_bci(iter().next_bci());
904 float cnt;
905 float prob = branch_prediction(cnt, btest, target_bci);
906 if (prob == PROB_UNKNOWN) {
907 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
908 #ifndef PRODUCT
909 if (PrintOpto && Verbose)
910 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
911 #endif
912 repush_if_args(); // to gather stats on loop
913 // We need to mark this branch as taken so that if we recompile we will
914 // see that it is possible. In the tiered system the interpreter doesn't
915 // do profiling and by the time we get to the lower tier from the interpreter
916 // the path may be cold again. Make sure it doesn't look untaken
917 profile_taken_branch(target_bci, !ProfileInterpreter);
918 uncommon_trap(Deoptimization::Reason_unreached,
919 Deoptimization::Action_reinterpret,
920 NULL, "cold");
921 if (EliminateAutoBox) {
922 // Mark the successor blocks as parsed
923 branch_block->next_path_num();
924 next_block->next_path_num();
925 }
926 return;
927 }
929 // If this is a backwards branch in the bytecodes, add Safepoint
930 maybe_add_safepoint(target_bci);
932 explicit_null_checks_inserted++;
933 Node* a = null();
934 Node* b = pop();
935 Node* c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
937 // Make a cast-away-nullness that is control dependent on the test
938 const Type *t = _gvn.type(b);
939 const Type *t_not_null = t->join(TypePtr::NOTNULL);
940 Node *cast = new (C, 2) CastPPNode(b,t_not_null);
942 // Generate real control flow
943 Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) );
945 // Sanity check the probability value
946 assert(prob > 0.0f,"Bad probability in Parser");
947 // Need xform to put node in hash table
948 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
949 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
950 // True branch
951 { PreserveJVMState pjvms(this);
952 Node* iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
953 set_control(iftrue);
955 if (stopped()) { // Path is dead?
956 explicit_null_checks_elided++;
957 if (EliminateAutoBox) {
958 // Mark the successor block as parsed
959 branch_block->next_path_num();
960 }
961 } else { // Path is live.
962 // Update method data
963 profile_taken_branch(target_bci);
964 adjust_map_after_if(btest, c, prob, branch_block, next_block);
965 if (!stopped())
966 merge(target_bci);
967 }
968 }
970 // False branch
971 Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
972 set_control(iffalse);
974 if (stopped()) { // Path is dead?
975 explicit_null_checks_elided++;
976 if (EliminateAutoBox) {
977 // Mark the successor block as parsed
978 next_block->next_path_num();
979 }
980 } else { // Path is live.
981 // Update method data
982 profile_not_taken_branch();
983 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
984 next_block, branch_block);
985 }
986 }
988 //------------------------------------do_if------------------------------------
989 void Parse::do_if(BoolTest::mask btest, Node* c) {
990 int target_bci = iter().get_dest();
992 Block* branch_block = successor_for_bci(target_bci);
993 Block* next_block = successor_for_bci(iter().next_bci());
995 float cnt;
996 float prob = branch_prediction(cnt, btest, target_bci);
997 float untaken_prob = 1.0 - prob;
999 if (prob == PROB_UNKNOWN) {
1000 #ifndef PRODUCT
1001 if (PrintOpto && Verbose)
1002 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
1003 #endif
1004 repush_if_args(); // to gather stats on loop
1005 // We need to mark this branch as taken so that if we recompile we will
1006 // see that it is possible. In the tiered system the interpreter doesn't
1007 // do profiling and by the time we get to the lower tier from the interpreter
1008 // the path may be cold again. Make sure it doesn't look untaken
1009 profile_taken_branch(target_bci, !ProfileInterpreter);
1010 uncommon_trap(Deoptimization::Reason_unreached,
1011 Deoptimization::Action_reinterpret,
1012 NULL, "cold");
1013 if (EliminateAutoBox) {
1014 // Mark the successor blocks as parsed
1015 branch_block->next_path_num();
1016 next_block->next_path_num();
1017 }
1018 return;
1019 }
1021 // Sanity check the probability value
1022 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1024 bool taken_if_true = true;
1025 // Convert BoolTest to canonical form:
1026 if (!BoolTest(btest).is_canonical()) {
1027 btest = BoolTest(btest).negate();
1028 taken_if_true = false;
1029 // prob is NOT updated here; it remains the probability of the taken
1030 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1031 }
1032 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1034 Node* tst0 = new (C, 2) BoolNode(c, btest);
1035 Node* tst = _gvn.transform(tst0);
1036 BoolTest::mask taken_btest = BoolTest::illegal;
1037 BoolTest::mask untaken_btest = BoolTest::illegal;
1039 if (tst->is_Bool()) {
1040 // Refresh c from the transformed bool node, since it may be
1041 // simpler than the original c. Also re-canonicalize btest.
1042 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1043 // That can arise from statements like: if (x instanceof C) ...
1044 if (tst != tst0) {
1045 // Canonicalize one more time since transform can change it.
1046 btest = tst->as_Bool()->_test._test;
1047 if (!BoolTest(btest).is_canonical()) {
1048 // Reverse edges one more time...
1049 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1050 btest = tst->as_Bool()->_test._test;
1051 assert(BoolTest(btest).is_canonical(), "sanity");
1052 taken_if_true = !taken_if_true;
1053 }
1054 c = tst->in(1);
1055 }
1056 BoolTest::mask neg_btest = BoolTest(btest).negate();
1057 taken_btest = taken_if_true ? btest : neg_btest;
1058 untaken_btest = taken_if_true ? neg_btest : btest;
1059 }
1061 // Generate real control flow
1062 float true_prob = (taken_if_true ? prob : untaken_prob);
1063 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1064 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1065 Node* taken_branch = new (C, 1) IfTrueNode(iff);
1066 Node* untaken_branch = new (C, 1) IfFalseNode(iff);
1067 if (!taken_if_true) { // Finish conversion to canonical form
1068 Node* tmp = taken_branch;
1069 taken_branch = untaken_branch;
1070 untaken_branch = tmp;
1071 }
1073 // Branch is taken:
1074 { PreserveJVMState pjvms(this);
1075 taken_branch = _gvn.transform(taken_branch);
1076 set_control(taken_branch);
1078 if (stopped()) {
1079 if (EliminateAutoBox) {
1080 // Mark the successor block as parsed
1081 branch_block->next_path_num();
1082 }
1083 } else {
1084 // Update method data
1085 profile_taken_branch(target_bci);
1086 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1087 if (!stopped())
1088 merge(target_bci);
1089 }
1090 }
1092 untaken_branch = _gvn.transform(untaken_branch);
1093 set_control(untaken_branch);
1095 // Branch not taken.
1096 if (stopped()) {
1097 if (EliminateAutoBox) {
1098 // Mark the successor block as parsed
1099 next_block->next_path_num();
1100 }
1101 } else {
1102 // Update method data
1103 profile_not_taken_branch();
1104 adjust_map_after_if(untaken_btest, c, untaken_prob,
1105 next_block, branch_block);
1106 }
1107 }
1109 //----------------------------adjust_map_after_if------------------------------
1110 // Adjust the JVM state to reflect the result of taking this path.
1111 // Basically, it means inspecting the CmpNode controlling this
1112 // branch, seeing how it constrains a tested value, and then
1113 // deciding if it's worth our while to encode this constraint
1114 // as graph nodes in the current abstract interpretation map.
1115 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1116 Block* path, Block* other_path) {
1117 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1118 return; // nothing to do
1120 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1122 int cop = c->Opcode();
1123 if (seems_never_taken(prob) && cop == Op_CmpP && btest == BoolTest::eq) {
1124 // (An earlier version of do_if omitted '&& btest == BoolTest::eq'.)
1125 //
1126 // If this might possibly turn into an implicit null check,
1127 // and the null has never yet been seen, we need to generate
1128 // an uncommon trap, so as to recompile instead of suffering
1129 // with very slow branches. (We'll get the slow branches if
1130 // the program ever changes phase and starts seeing nulls here.)
1131 //
1132 // The tests we worry about are of the form (p == null).
1133 // We do not simply inspect for a null constant, since a node may
1134 // optimize to 'null' later on.
1135 repush_if_args();
1136 // We need to mark this branch as taken so that if we recompile we will
1137 // see that it is possible. In the tiered system the interpreter doesn't
1138 // do profiling and by the time we get to the lower tier from the interpreter
1139 // the path may be cold again. Make sure it doesn't look untaken
1140 if (is_fallthrough) {
1141 profile_not_taken_branch(!ProfileInterpreter);
1142 } else {
1143 profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
1144 }
1145 uncommon_trap(Deoptimization::Reason_unreached,
1146 Deoptimization::Action_reinterpret,
1147 NULL,
1148 (is_fallthrough ? "taken always" : "taken never"));
1149 return;
1150 }
1152 Node* val = c->in(1);
1153 Node* con = c->in(2);
1154 const Type* tcon = _gvn.type(con);
1155 const Type* tval = _gvn.type(val);
1156 bool have_con = tcon->singleton();
1157 if (tval->singleton()) {
1158 if (!have_con) {
1159 // Swap, so constant is in con.
1160 con = val;
1161 tcon = tval;
1162 val = c->in(2);
1163 tval = _gvn.type(val);
1164 btest = BoolTest(btest).commute();
1165 have_con = true;
1166 } else {
1167 // Do we have two constants? Then leave well enough alone.
1168 have_con = false;
1169 }
1170 }
1171 if (!have_con) // remaining adjustments need a con
1172 return;
1175 int val_in_map = map()->find_edge(val);
1176 if (val_in_map < 0) return; // replace_in_map would be useless
1177 {
1178 JVMState* jvms = this->jvms();
1179 if (!(jvms->is_loc(val_in_map) ||
1180 jvms->is_stk(val_in_map)))
1181 return; // again, it would be useless
1182 }
1184 // Check for a comparison to a constant, and "know" that the compared
1185 // value is constrained on this path.
1186 assert(tcon->singleton(), "");
1187 ConstraintCastNode* ccast = NULL;
1188 Node* cast = NULL;
1190 switch (btest) {
1191 case BoolTest::eq: // Constant test?
1192 {
1193 const Type* tboth = tcon->join(tval);
1194 if (tboth == tval) break; // Nothing to gain.
1195 if (tcon->isa_int()) {
1196 ccast = new (C, 2) CastIINode(val, tboth);
1197 } else if (tcon == TypePtr::NULL_PTR) {
1198 // Cast to null, but keep the pointer identity temporarily live.
1199 ccast = new (C, 2) CastPPNode(val, tboth);
1200 } else {
1201 const TypeF* tf = tcon->isa_float_constant();
1202 const TypeD* td = tcon->isa_double_constant();
1203 // Exclude tests vs float/double 0 as these could be
1204 // either +0 or -0. Just because you are equal to +0
1205 // doesn't mean you ARE +0!
1206 if ((!tf || tf->_f != 0.0) &&
1207 (!td || td->_d != 0.0))
1208 cast = con; // Replace non-constant val by con.
1209 }
1210 }
1211 break;
1213 case BoolTest::ne:
1214 if (tcon == TypePtr::NULL_PTR) {
1215 cast = cast_not_null(val, false);
1216 }
1217 break;
1219 default:
1220 // (At this point we could record int range types with CastII.)
1221 break;
1222 }
1224 if (ccast != NULL) {
1225 const Type* tcc = ccast->as_Type()->type();
1226 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1227 // Delay transform() call to allow recovery of pre-cast value
1228 // at the control merge.
1229 ccast->set_req(0, control());
1230 _gvn.set_type_bottom(ccast);
1231 record_for_igvn(ccast);
1232 cast = ccast;
1233 }
1235 if (cast != NULL) { // Here's the payoff.
1236 replace_in_map(val, cast);
1237 }
1238 }
1241 //------------------------------do_one_bytecode--------------------------------
1242 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1243 void Parse::do_one_bytecode() {
1244 Node *a, *b, *c, *d; // Handy temps
1245 BoolTest::mask btest;
1246 int i;
1248 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1250 if (C->check_node_count(NodeLimitFudgeFactor * 5,
1251 "out of nodes parsing method")) {
1252 return;
1253 }
1255 #ifdef ASSERT
1256 // for setting breakpoints
1257 if (TraceOptoParse) {
1258 tty->print(" @");
1259 dump_bci(bci());
1260 }
1261 #endif
1263 switch (bc()) {
1264 case Bytecodes::_nop:
1265 // do nothing
1266 break;
1267 case Bytecodes::_lconst_0:
1268 push_pair(longcon(0));
1269 break;
1271 case Bytecodes::_lconst_1:
1272 push_pair(longcon(1));
1273 break;
1275 case Bytecodes::_fconst_0:
1276 push(zerocon(T_FLOAT));
1277 break;
1279 case Bytecodes::_fconst_1:
1280 push(makecon(TypeF::ONE));
1281 break;
1283 case Bytecodes::_fconst_2:
1284 push(makecon(TypeF::make(2.0f)));
1285 break;
1287 case Bytecodes::_dconst_0:
1288 push_pair(zerocon(T_DOUBLE));
1289 break;
1291 case Bytecodes::_dconst_1:
1292 push_pair(makecon(TypeD::ONE));
1293 break;
1295 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1296 case Bytecodes::_iconst_0: push(intcon( 0)); break;
1297 case Bytecodes::_iconst_1: push(intcon( 1)); break;
1298 case Bytecodes::_iconst_2: push(intcon( 2)); break;
1299 case Bytecodes::_iconst_3: push(intcon( 3)); break;
1300 case Bytecodes::_iconst_4: push(intcon( 4)); break;
1301 case Bytecodes::_iconst_5: push(intcon( 5)); break;
1302 case Bytecodes::_bipush: push(intcon( iter().get_byte())); break;
1303 case Bytecodes::_sipush: push(intcon( iter().get_short())); break;
1304 case Bytecodes::_aconst_null: push(null()); break;
1305 case Bytecodes::_ldc:
1306 case Bytecodes::_ldc_w:
1307 case Bytecodes::_ldc2_w:
1308 // If the constant is unresolved, run this BC once in the interpreter.
1309 if (iter().is_unresolved_string()) {
1310 uncommon_trap(Deoptimization::make_trap_request
1311 (Deoptimization::Reason_unloaded,
1312 Deoptimization::Action_reinterpret,
1313 iter().get_constant_index()),
1314 NULL, "unresolved_string");
1315 break;
1316 } else {
1317 ciConstant constant = iter().get_constant();
1318 if (constant.basic_type() == T_OBJECT) {
1319 ciObject* c = constant.as_object();
1320 if (c->is_klass()) {
1321 // The constant returned for a klass is the ciKlass for the
1322 // entry. We want the java_mirror so get it.
1323 ciKlass* klass = c->as_klass();
1324 if (klass->is_loaded()) {
1325 constant = ciConstant(T_OBJECT, klass->java_mirror());
1326 } else {
1327 uncommon_trap(Deoptimization::make_trap_request
1328 (Deoptimization::Reason_unloaded,
1329 Deoptimization::Action_reinterpret,
1330 iter().get_constant_index()),
1331 NULL, "unresolved_klass");
1332 break;
1333 }
1334 }
1335 }
1336 push_constant(constant);
1337 }
1339 break;
1341 case Bytecodes::_aload_0:
1342 push( local(0) );
1343 break;
1344 case Bytecodes::_aload_1:
1345 push( local(1) );
1346 break;
1347 case Bytecodes::_aload_2:
1348 push( local(2) );
1349 break;
1350 case Bytecodes::_aload_3:
1351 push( local(3) );
1352 break;
1353 case Bytecodes::_aload:
1354 push( local(iter().get_index()) );
1355 break;
1357 case Bytecodes::_fload_0:
1358 case Bytecodes::_iload_0:
1359 push( local(0) );
1360 break;
1361 case Bytecodes::_fload_1:
1362 case Bytecodes::_iload_1:
1363 push( local(1) );
1364 break;
1365 case Bytecodes::_fload_2:
1366 case Bytecodes::_iload_2:
1367 push( local(2) );
1368 break;
1369 case Bytecodes::_fload_3:
1370 case Bytecodes::_iload_3:
1371 push( local(3) );
1372 break;
1373 case Bytecodes::_fload:
1374 case Bytecodes::_iload:
1375 push( local(iter().get_index()) );
1376 break;
1377 case Bytecodes::_lload_0:
1378 push_pair_local( 0 );
1379 break;
1380 case Bytecodes::_lload_1:
1381 push_pair_local( 1 );
1382 break;
1383 case Bytecodes::_lload_2:
1384 push_pair_local( 2 );
1385 break;
1386 case Bytecodes::_lload_3:
1387 push_pair_local( 3 );
1388 break;
1389 case Bytecodes::_lload:
1390 push_pair_local( iter().get_index() );
1391 break;
1393 case Bytecodes::_dload_0:
1394 push_pair_local(0);
1395 break;
1396 case Bytecodes::_dload_1:
1397 push_pair_local(1);
1398 break;
1399 case Bytecodes::_dload_2:
1400 push_pair_local(2);
1401 break;
1402 case Bytecodes::_dload_3:
1403 push_pair_local(3);
1404 break;
1405 case Bytecodes::_dload:
1406 push_pair_local(iter().get_index());
1407 break;
1408 case Bytecodes::_fstore_0:
1409 case Bytecodes::_istore_0:
1410 case Bytecodes::_astore_0:
1411 set_local( 0, pop() );
1412 break;
1413 case Bytecodes::_fstore_1:
1414 case Bytecodes::_istore_1:
1415 case Bytecodes::_astore_1:
1416 set_local( 1, pop() );
1417 break;
1418 case Bytecodes::_fstore_2:
1419 case Bytecodes::_istore_2:
1420 case Bytecodes::_astore_2:
1421 set_local( 2, pop() );
1422 break;
1423 case Bytecodes::_fstore_3:
1424 case Bytecodes::_istore_3:
1425 case Bytecodes::_astore_3:
1426 set_local( 3, pop() );
1427 break;
1428 case Bytecodes::_fstore:
1429 case Bytecodes::_istore:
1430 case Bytecodes::_astore:
1431 set_local( iter().get_index(), pop() );
1432 break;
1433 // long stores
1434 case Bytecodes::_lstore_0:
1435 set_pair_local( 0, pop_pair() );
1436 break;
1437 case Bytecodes::_lstore_1:
1438 set_pair_local( 1, pop_pair() );
1439 break;
1440 case Bytecodes::_lstore_2:
1441 set_pair_local( 2, pop_pair() );
1442 break;
1443 case Bytecodes::_lstore_3:
1444 set_pair_local( 3, pop_pair() );
1445 break;
1446 case Bytecodes::_lstore:
1447 set_pair_local( iter().get_index(), pop_pair() );
1448 break;
1450 // double stores
1451 case Bytecodes::_dstore_0:
1452 set_pair_local( 0, dstore_rounding(pop_pair()) );
1453 break;
1454 case Bytecodes::_dstore_1:
1455 set_pair_local( 1, dstore_rounding(pop_pair()) );
1456 break;
1457 case Bytecodes::_dstore_2:
1458 set_pair_local( 2, dstore_rounding(pop_pair()) );
1459 break;
1460 case Bytecodes::_dstore_3:
1461 set_pair_local( 3, dstore_rounding(pop_pair()) );
1462 break;
1463 case Bytecodes::_dstore:
1464 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1465 break;
1467 case Bytecodes::_pop: _sp -= 1; break;
1468 case Bytecodes::_pop2: _sp -= 2; break;
1469 case Bytecodes::_swap:
1470 a = pop();
1471 b = pop();
1472 push(a);
1473 push(b);
1474 break;
1475 case Bytecodes::_dup:
1476 a = pop();
1477 push(a);
1478 push(a);
1479 break;
1480 case Bytecodes::_dup_x1:
1481 a = pop();
1482 b = pop();
1483 push( a );
1484 push( b );
1485 push( a );
1486 break;
1487 case Bytecodes::_dup_x2:
1488 a = pop();
1489 b = pop();
1490 c = pop();
1491 push( a );
1492 push( c );
1493 push( b );
1494 push( a );
1495 break;
1496 case Bytecodes::_dup2:
1497 a = pop();
1498 b = pop();
1499 push( b );
1500 push( a );
1501 push( b );
1502 push( a );
1503 break;
1505 case Bytecodes::_dup2_x1:
1506 // before: .. c, b, a
1507 // after: .. b, a, c, b, a
1508 // not tested
1509 a = pop();
1510 b = pop();
1511 c = pop();
1512 push( b );
1513 push( a );
1514 push( c );
1515 push( b );
1516 push( a );
1517 break;
1518 case Bytecodes::_dup2_x2:
1519 // before: .. d, c, b, a
1520 // after: .. b, a, d, c, b, a
1521 // not tested
1522 a = pop();
1523 b = pop();
1524 c = pop();
1525 d = pop();
1526 push( b );
1527 push( a );
1528 push( d );
1529 push( c );
1530 push( b );
1531 push( a );
1532 break;
1534 case Bytecodes::_arraylength: {
1535 // Must do null-check with value on expression stack
1536 Node *ary = do_null_check(peek(), T_ARRAY);
1537 // Compile-time detect of null-exception?
1538 if (stopped()) return;
1539 a = pop();
1540 push(load_array_length(a));
1541 break;
1542 }
1544 case Bytecodes::_baload: array_load(T_BYTE); break;
1545 case Bytecodes::_caload: array_load(T_CHAR); break;
1546 case Bytecodes::_iaload: array_load(T_INT); break;
1547 case Bytecodes::_saload: array_load(T_SHORT); break;
1548 case Bytecodes::_faload: array_load(T_FLOAT); break;
1549 case Bytecodes::_aaload: array_load(T_OBJECT); break;
1550 case Bytecodes::_laload: {
1551 a = array_addressing(T_LONG, 0);
1552 if (stopped()) return; // guarenteed null or range check
1553 _sp -= 2; // Pop array and index
1554 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
1555 break;
1556 }
1557 case Bytecodes::_daload: {
1558 a = array_addressing(T_DOUBLE, 0);
1559 if (stopped()) return; // guarenteed null or range check
1560 _sp -= 2; // Pop array and index
1561 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
1562 break;
1563 }
1564 case Bytecodes::_bastore: array_store(T_BYTE); break;
1565 case Bytecodes::_castore: array_store(T_CHAR); break;
1566 case Bytecodes::_iastore: array_store(T_INT); break;
1567 case Bytecodes::_sastore: array_store(T_SHORT); break;
1568 case Bytecodes::_fastore: array_store(T_FLOAT); break;
1569 case Bytecodes::_aastore: {
1570 d = array_addressing(T_OBJECT, 1);
1571 if (stopped()) return; // guarenteed null or range check
1572 array_store_check();
1573 c = pop(); // Oop to store
1574 b = pop(); // index (already used)
1575 a = pop(); // the array itself
1576 const Type* elemtype = _gvn.type(a)->is_aryptr()->elem();
1577 const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1578 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
1579 break;
1580 }
1581 case Bytecodes::_lastore: {
1582 a = array_addressing(T_LONG, 2);
1583 if (stopped()) return; // guarenteed null or range check
1584 c = pop_pair();
1585 _sp -= 2; // Pop array and index
1586 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
1587 break;
1588 }
1589 case Bytecodes::_dastore: {
1590 a = array_addressing(T_DOUBLE, 2);
1591 if (stopped()) return; // guarenteed null or range check
1592 c = pop_pair();
1593 _sp -= 2; // Pop array and index
1594 c = dstore_rounding(c);
1595 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
1596 break;
1597 }
1598 case Bytecodes::_getfield:
1599 do_getfield();
1600 break;
1602 case Bytecodes::_getstatic:
1603 do_getstatic();
1604 break;
1606 case Bytecodes::_putfield:
1607 do_putfield();
1608 break;
1610 case Bytecodes::_putstatic:
1611 do_putstatic();
1612 break;
1614 case Bytecodes::_irem:
1615 do_irem();
1616 break;
1617 case Bytecodes::_idiv:
1618 // Must keep both values on the expression-stack during null-check
1619 do_null_check(peek(), T_INT);
1620 // Compile-time detect of null-exception?
1621 if (stopped()) return;
1622 b = pop();
1623 a = pop();
1624 push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) );
1625 break;
1626 case Bytecodes::_imul:
1627 b = pop(); a = pop();
1628 push( _gvn.transform( new (C, 3) MulINode(a,b) ) );
1629 break;
1630 case Bytecodes::_iadd:
1631 b = pop(); a = pop();
1632 push( _gvn.transform( new (C, 3) AddINode(a,b) ) );
1633 break;
1634 case Bytecodes::_ineg:
1635 a = pop();
1636 push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) );
1637 break;
1638 case Bytecodes::_isub:
1639 b = pop(); a = pop();
1640 push( _gvn.transform( new (C, 3) SubINode(a,b) ) );
1641 break;
1642 case Bytecodes::_iand:
1643 b = pop(); a = pop();
1644 push( _gvn.transform( new (C, 3) AndINode(a,b) ) );
1645 break;
1646 case Bytecodes::_ior:
1647 b = pop(); a = pop();
1648 push( _gvn.transform( new (C, 3) OrINode(a,b) ) );
1649 break;
1650 case Bytecodes::_ixor:
1651 b = pop(); a = pop();
1652 push( _gvn.transform( new (C, 3) XorINode(a,b) ) );
1653 break;
1654 case Bytecodes::_ishl:
1655 b = pop(); a = pop();
1656 push( _gvn.transform( new (C, 3) LShiftINode(a,b) ) );
1657 break;
1658 case Bytecodes::_ishr:
1659 b = pop(); a = pop();
1660 push( _gvn.transform( new (C, 3) RShiftINode(a,b) ) );
1661 break;
1662 case Bytecodes::_iushr:
1663 b = pop(); a = pop();
1664 push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) );
1665 break;
1667 case Bytecodes::_fneg:
1668 a = pop();
1669 b = _gvn.transform(new (C, 2) NegFNode (a));
1670 push(b);
1671 break;
1673 case Bytecodes::_fsub:
1674 b = pop();
1675 a = pop();
1676 c = _gvn.transform( new (C, 3) SubFNode(a,b) );
1677 d = precision_rounding(c);
1678 push( d );
1679 break;
1681 case Bytecodes::_fadd:
1682 b = pop();
1683 a = pop();
1684 c = _gvn.transform( new (C, 3) AddFNode(a,b) );
1685 d = precision_rounding(c);
1686 push( d );
1687 break;
1689 case Bytecodes::_fmul:
1690 b = pop();
1691 a = pop();
1692 c = _gvn.transform( new (C, 3) MulFNode(a,b) );
1693 d = precision_rounding(c);
1694 push( d );
1695 break;
1697 case Bytecodes::_fdiv:
1698 b = pop();
1699 a = pop();
1700 c = _gvn.transform( new (C, 3) DivFNode(0,a,b) );
1701 d = precision_rounding(c);
1702 push( d );
1703 break;
1705 case Bytecodes::_frem:
1706 if (Matcher::has_match_rule(Op_ModF)) {
1707 // Generate a ModF node.
1708 b = pop();
1709 a = pop();
1710 c = _gvn.transform( new (C, 3) ModFNode(0,a,b) );
1711 d = precision_rounding(c);
1712 push( d );
1713 }
1714 else {
1715 // Generate a call.
1716 modf();
1717 }
1718 break;
1720 case Bytecodes::_fcmpl:
1721 b = pop();
1722 a = pop();
1723 c = _gvn.transform( new (C, 3) CmpF3Node( a, b));
1724 push(c);
1725 break;
1726 case Bytecodes::_fcmpg:
1727 b = pop();
1728 a = pop();
1730 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
1731 // which negates the result sign except for unordered. Flip the unordered
1732 // as well by using CmpF3 which implements unordered-lesser instead of
1733 // unordered-greater semantics. Finally, commute the result bits. Result
1734 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1735 c = _gvn.transform( new (C, 3) CmpF3Node( b, a));
1736 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1737 push(c);
1738 break;
1740 case Bytecodes::_f2i:
1741 a = pop();
1742 push(_gvn.transform(new (C, 2) ConvF2INode(a)));
1743 break;
1745 case Bytecodes::_d2i:
1746 a = pop_pair();
1747 b = _gvn.transform(new (C, 2) ConvD2INode(a));
1748 push( b );
1749 break;
1751 case Bytecodes::_f2d:
1752 a = pop();
1753 b = _gvn.transform( new (C, 2) ConvF2DNode(a));
1754 push_pair( b );
1755 break;
1757 case Bytecodes::_d2f:
1758 a = pop_pair();
1759 b = _gvn.transform( new (C, 2) ConvD2FNode(a));
1760 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1761 //b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) );
1762 push( b );
1763 break;
1765 case Bytecodes::_l2f:
1766 if (Matcher::convL2FSupported()) {
1767 a = pop_pair();
1768 b = _gvn.transform( new (C, 2) ConvL2FNode(a));
1769 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1770 // Rather than storing the result into an FP register then pushing
1771 // out to memory to round, the machine instruction that implements
1772 // ConvL2D is responsible for rounding.
1773 // c = precision_rounding(b);
1774 c = _gvn.transform(b);
1775 push(c);
1776 } else {
1777 l2f();
1778 }
1779 break;
1781 case Bytecodes::_l2d:
1782 a = pop_pair();
1783 b = _gvn.transform( new (C, 2) ConvL2DNode(a));
1784 // For i486.ad, rounding is always necessary (see _l2f above).
1785 // c = dprecision_rounding(b);
1786 c = _gvn.transform(b);
1787 push_pair(c);
1788 break;
1790 case Bytecodes::_f2l:
1791 a = pop();
1792 b = _gvn.transform( new (C, 2) ConvF2LNode(a));
1793 push_pair(b);
1794 break;
1796 case Bytecodes::_d2l:
1797 a = pop_pair();
1798 b = _gvn.transform( new (C, 2) ConvD2LNode(a));
1799 push_pair(b);
1800 break;
1802 case Bytecodes::_dsub:
1803 b = pop_pair();
1804 a = pop_pair();
1805 c = _gvn.transform( new (C, 3) SubDNode(a,b) );
1806 d = dprecision_rounding(c);
1807 push_pair( d );
1808 break;
1810 case Bytecodes::_dadd:
1811 b = pop_pair();
1812 a = pop_pair();
1813 c = _gvn.transform( new (C, 3) AddDNode(a,b) );
1814 d = dprecision_rounding(c);
1815 push_pair( d );
1816 break;
1818 case Bytecodes::_dmul:
1819 b = pop_pair();
1820 a = pop_pair();
1821 c = _gvn.transform( new (C, 3) MulDNode(a,b) );
1822 d = dprecision_rounding(c);
1823 push_pair( d );
1824 break;
1826 case Bytecodes::_ddiv:
1827 b = pop_pair();
1828 a = pop_pair();
1829 c = _gvn.transform( new (C, 3) DivDNode(0,a,b) );
1830 d = dprecision_rounding(c);
1831 push_pair( d );
1832 break;
1834 case Bytecodes::_dneg:
1835 a = pop_pair();
1836 b = _gvn.transform(new (C, 2) NegDNode (a));
1837 push_pair(b);
1838 break;
1840 case Bytecodes::_drem:
1841 if (Matcher::has_match_rule(Op_ModD)) {
1842 // Generate a ModD node.
1843 b = pop_pair();
1844 a = pop_pair();
1845 // a % b
1847 c = _gvn.transform( new (C, 3) ModDNode(0,a,b) );
1848 d = dprecision_rounding(c);
1849 push_pair( d );
1850 }
1851 else {
1852 // Generate a call.
1853 modd();
1854 }
1855 break;
1857 case Bytecodes::_dcmpl:
1858 b = pop_pair();
1859 a = pop_pair();
1860 c = _gvn.transform( new (C, 3) CmpD3Node( a, b));
1861 push(c);
1862 break;
1864 case Bytecodes::_dcmpg:
1865 b = pop_pair();
1866 a = pop_pair();
1867 // Same as dcmpl but need to flip the unordered case.
1868 // Commute the inputs, which negates the result sign except for unordered.
1869 // Flip the unordered as well by using CmpD3 which implements
1870 // unordered-lesser instead of unordered-greater semantics.
1871 // Finally, negate the result bits. Result is same as using a
1872 // CmpD3Greater except we did it with CmpD3 alone.
1873 c = _gvn.transform( new (C, 3) CmpD3Node( b, a));
1874 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1875 push(c);
1876 break;
1879 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
1880 case Bytecodes::_land:
1881 b = pop_pair();
1882 a = pop_pair();
1883 c = _gvn.transform( new (C, 3) AndLNode(a,b) );
1884 push_pair(c);
1885 break;
1886 case Bytecodes::_lor:
1887 b = pop_pair();
1888 a = pop_pair();
1889 c = _gvn.transform( new (C, 3) OrLNode(a,b) );
1890 push_pair(c);
1891 break;
1892 case Bytecodes::_lxor:
1893 b = pop_pair();
1894 a = pop_pair();
1895 c = _gvn.transform( new (C, 3) XorLNode(a,b) );
1896 push_pair(c);
1897 break;
1899 case Bytecodes::_lshl:
1900 b = pop(); // the shift count
1901 a = pop_pair(); // value to be shifted
1902 c = _gvn.transform( new (C, 3) LShiftLNode(a,b) );
1903 push_pair(c);
1904 break;
1905 case Bytecodes::_lshr:
1906 b = pop(); // the shift count
1907 a = pop_pair(); // value to be shifted
1908 c = _gvn.transform( new (C, 3) RShiftLNode(a,b) );
1909 push_pair(c);
1910 break;
1911 case Bytecodes::_lushr:
1912 b = pop(); // the shift count
1913 a = pop_pair(); // value to be shifted
1914 c = _gvn.transform( new (C, 3) URShiftLNode(a,b) );
1915 push_pair(c);
1916 break;
1917 case Bytecodes::_lmul:
1918 b = pop_pair();
1919 a = pop_pair();
1920 c = _gvn.transform( new (C, 3) MulLNode(a,b) );
1921 push_pair(c);
1922 break;
1924 case Bytecodes::_lrem:
1925 // Must keep both values on the expression-stack during null-check
1926 assert(peek(0) == top(), "long word order");
1927 do_null_check(peek(1), T_LONG);
1928 // Compile-time detect of null-exception?
1929 if (stopped()) return;
1930 b = pop_pair();
1931 a = pop_pair();
1932 c = _gvn.transform( new (C, 3) ModLNode(control(),a,b) );
1933 push_pair(c);
1934 break;
1936 case Bytecodes::_ldiv:
1937 // Must keep both values on the expression-stack during null-check
1938 assert(peek(0) == top(), "long word order");
1939 do_null_check(peek(1), T_LONG);
1940 // Compile-time detect of null-exception?
1941 if (stopped()) return;
1942 b = pop_pair();
1943 a = pop_pair();
1944 c = _gvn.transform( new (C, 3) DivLNode(control(),a,b) );
1945 push_pair(c);
1946 break;
1948 case Bytecodes::_ladd:
1949 b = pop_pair();
1950 a = pop_pair();
1951 c = _gvn.transform( new (C, 3) AddLNode(a,b) );
1952 push_pair(c);
1953 break;
1954 case Bytecodes::_lsub:
1955 b = pop_pair();
1956 a = pop_pair();
1957 c = _gvn.transform( new (C, 3) SubLNode(a,b) );
1958 push_pair(c);
1959 break;
1960 case Bytecodes::_lcmp:
1961 // Safepoints are now inserted _before_ branches. The long-compare
1962 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
1963 // slew of control flow. These are usually followed by a CmpI vs zero and
1964 // a branch; this pattern then optimizes to the obvious long-compare and
1965 // branch. However, if the branch is backwards there's a Safepoint
1966 // inserted. The inserted Safepoint captures the JVM state at the
1967 // pre-branch point, i.e. it captures the 3-way value. Thus if a
1968 // long-compare is used to control a loop the debug info will force
1969 // computation of the 3-way value, even though the generated code uses a
1970 // long-compare and branch. We try to rectify the situation by inserting
1971 // a SafePoint here and have it dominate and kill the safepoint added at a
1972 // following backwards branch. At this point the JVM state merely holds 2
1973 // longs but not the 3-way value.
1974 if( UseLoopSafepoints ) {
1975 switch( iter().next_bc() ) {
1976 case Bytecodes::_ifgt:
1977 case Bytecodes::_iflt:
1978 case Bytecodes::_ifge:
1979 case Bytecodes::_ifle:
1980 case Bytecodes::_ifne:
1981 case Bytecodes::_ifeq:
1982 // If this is a backwards branch in the bytecodes, add Safepoint
1983 maybe_add_safepoint(iter().next_get_dest());
1984 }
1985 }
1986 b = pop_pair();
1987 a = pop_pair();
1988 c = _gvn.transform( new (C, 3) CmpL3Node( a, b ));
1989 push(c);
1990 break;
1992 case Bytecodes::_lneg:
1993 a = pop_pair();
1994 b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a));
1995 push_pair(b);
1996 break;
1997 case Bytecodes::_l2i:
1998 a = pop_pair();
1999 push( _gvn.transform( new (C, 2) ConvL2INode(a)));
2000 break;
2001 case Bytecodes::_i2l:
2002 a = pop();
2003 b = _gvn.transform( new (C, 2) ConvI2LNode(a));
2004 push_pair(b);
2005 break;
2006 case Bytecodes::_i2b:
2007 // Sign extend
2008 a = pop();
2009 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(24)) );
2010 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(24)) );
2011 push( a );
2012 break;
2013 case Bytecodes::_i2s:
2014 a = pop();
2015 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(16)) );
2016 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(16)) );
2017 push( a );
2018 break;
2019 case Bytecodes::_i2c:
2020 a = pop();
2021 push( _gvn.transform( new (C, 3) AndINode(a,_gvn.intcon(0xFFFF)) ) );
2022 break;
2024 case Bytecodes::_i2f:
2025 a = pop();
2026 b = _gvn.transform( new (C, 2) ConvI2FNode(a) ) ;
2027 c = precision_rounding(b);
2028 push (b);
2029 break;
2031 case Bytecodes::_i2d:
2032 a = pop();
2033 b = _gvn.transform( new (C, 2) ConvI2DNode(a));
2034 push_pair(b);
2035 break;
2037 case Bytecodes::_iinc: // Increment local
2038 i = iter().get_index(); // Get local index
2039 set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2040 break;
2042 // Exit points of synchronized methods must have an unlock node
2043 case Bytecodes::_return:
2044 return_current(NULL);
2045 break;
2047 case Bytecodes::_ireturn:
2048 case Bytecodes::_areturn:
2049 case Bytecodes::_freturn:
2050 return_current(pop());
2051 break;
2052 case Bytecodes::_lreturn:
2053 return_current(pop_pair());
2054 break;
2055 case Bytecodes::_dreturn:
2056 return_current(pop_pair());
2057 break;
2059 case Bytecodes::_athrow:
2060 // null exception oop throws NULL pointer exception
2061 do_null_check(peek(), T_OBJECT);
2062 if (stopped()) return;
2063 if (JvmtiExport::can_post_exceptions()) {
2064 // "Full-speed throwing" is not necessary here,
2065 // since we're notifying the VM on every throw.
2066 uncommon_trap(Deoptimization::Reason_unhandled,
2067 Deoptimization::Action_none);
2068 return;
2069 }
2070 // Hook the thrown exception directly to subsequent handlers.
2071 if (BailoutToInterpreterForThrows) {
2072 // Keep method interpreted from now on.
2073 uncommon_trap(Deoptimization::Reason_unhandled,
2074 Deoptimization::Action_make_not_compilable);
2075 return;
2076 }
2077 add_exception_state(make_exception_state(peek()));
2078 break;
2080 case Bytecodes::_goto: // fall through
2081 case Bytecodes::_goto_w: {
2082 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2084 // If this is a backwards branch in the bytecodes, add Safepoint
2085 maybe_add_safepoint(target_bci);
2087 // Update method data
2088 profile_taken_branch(target_bci);
2090 // Merge the current control into the target basic block
2091 merge(target_bci);
2093 // See if we can get some profile data and hand it off to the next block
2094 Block *target_block = block()->successor_for_bci(target_bci);
2095 if (target_block->pred_count() != 1) break;
2096 ciMethodData* methodData = method()->method_data();
2097 if (!methodData->is_mature()) break;
2098 ciProfileData* data = methodData->bci_to_data(bci());
2099 assert( data->is_JumpData(), "" );
2100 int taken = ((ciJumpData*)data)->taken();
2101 taken = method()->scale_count(taken);
2102 target_block->set_count(taken);
2103 break;
2104 }
2106 case Bytecodes::_ifnull:
2107 do_ifnull(BoolTest::eq);
2108 break;
2109 case Bytecodes::_ifnonnull:
2110 do_ifnull(BoolTest::ne);
2111 break;
2113 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2114 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2115 handle_if_acmp:
2116 // If this is a backwards branch in the bytecodes, add Safepoint
2117 maybe_add_safepoint(iter().get_dest());
2118 a = pop();
2119 b = pop();
2120 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2121 do_if(btest, c);
2122 break;
2124 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2125 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2126 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2127 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2128 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2129 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2130 handle_ifxx:
2131 // If this is a backwards branch in the bytecodes, add Safepoint
2132 maybe_add_safepoint(iter().get_dest());
2133 a = _gvn.intcon(0);
2134 b = pop();
2135 c = _gvn.transform( new (C, 3) CmpINode(b, a) );
2136 do_if(btest, c);
2137 break;
2139 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2140 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2141 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2142 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2143 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2144 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2145 handle_if_icmp:
2146 // If this is a backwards branch in the bytecodes, add Safepoint
2147 maybe_add_safepoint(iter().get_dest());
2148 a = pop();
2149 b = pop();
2150 c = _gvn.transform( new (C, 3) CmpINode( b, a ) );
2151 do_if(btest, c);
2152 break;
2154 case Bytecodes::_tableswitch:
2155 do_tableswitch();
2156 break;
2158 case Bytecodes::_lookupswitch:
2159 do_lookupswitch();
2160 break;
2162 case Bytecodes::_invokestatic:
2163 case Bytecodes::_invokespecial:
2164 case Bytecodes::_invokevirtual:
2165 case Bytecodes::_invokeinterface:
2166 do_call();
2167 break;
2168 case Bytecodes::_checkcast:
2169 do_checkcast();
2170 break;
2171 case Bytecodes::_instanceof:
2172 do_instanceof();
2173 break;
2174 case Bytecodes::_anewarray:
2175 do_anewarray();
2176 break;
2177 case Bytecodes::_newarray:
2178 do_newarray((BasicType)iter().get_index());
2179 break;
2180 case Bytecodes::_multianewarray:
2181 do_multianewarray();
2182 break;
2183 case Bytecodes::_new:
2184 do_new();
2185 break;
2187 case Bytecodes::_jsr:
2188 case Bytecodes::_jsr_w:
2189 do_jsr();
2190 break;
2192 case Bytecodes::_ret:
2193 do_ret();
2194 break;
2197 case Bytecodes::_monitorenter:
2198 do_monitor_enter();
2199 break;
2201 case Bytecodes::_monitorexit:
2202 do_monitor_exit();
2203 break;
2205 case Bytecodes::_breakpoint:
2206 // Breakpoint set concurrently to compile
2207 // %%% use an uncommon trap?
2208 C->record_failure("breakpoint in method");
2209 return;
2211 default:
2212 #ifndef PRODUCT
2213 map()->dump(99);
2214 #endif
2215 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2216 ShouldNotReachHere();
2217 }
2219 #ifndef PRODUCT
2220 IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2221 if(printer) {
2222 char buffer[256];
2223 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2224 bool old = printer->traverse_outs();
2225 printer->set_traverse_outs(true);
2226 printer->print_method(C, buffer, 3);
2227 printer->set_traverse_outs(old);
2228 }
2229 #endif
2230 }