Wed, 05 Dec 2007 09:01:00 -0800
6395208: Elide autoboxing for calls to HashMap.get(int) and HashMap.get(long)
Reviewed-by: kvn, rasbold
1 /*
2 * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_parse2.cpp.incl"
28 extern int explicit_null_checks_inserted,
29 explicit_null_checks_elided;
31 //---------------------------------array_load----------------------------------
32 void Parse::array_load(BasicType elem_type) {
33 const Type* elem = Type::TOP;
34 Node* adr = array_addressing(elem_type, 0, &elem);
35 if (stopped()) return; // guarenteed null or range check
36 _sp -= 2; // Pop array and index
37 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
38 Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
39 push(ld);
40 }
43 //--------------------------------array_store----------------------------------
44 void Parse::array_store(BasicType elem_type) {
45 Node* adr = array_addressing(elem_type, 1);
46 if (stopped()) return; // guarenteed null or range check
47 Node* val = pop();
48 _sp -= 2; // Pop array and index
49 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
50 store_to_memory(control(), adr, val, elem_type, adr_type);
51 }
54 //------------------------------array_addressing-------------------------------
55 // Pull array and index from the stack. Compute pointer-to-element.
56 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
57 Node *idx = peek(0+vals); // Get from stack without popping
58 Node *ary = peek(1+vals); // in case of exception
60 // Null check the array base, with correct stack contents
61 ary = do_null_check(ary, T_ARRAY);
62 // Compile-time detect of null-exception?
63 if (stopped()) return top();
65 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
66 const TypeInt* sizetype = arytype->size();
67 const Type* elemtype = arytype->elem();
69 if (UseUniqueSubclasses && result2 != NULL) {
70 const TypeInstPtr* toop = elemtype->isa_instptr();
71 if (toop) {
72 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
73 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
74 const Type* subklass = Type::get_const_type(toop->klass());
75 elemtype = subklass->join(elemtype);
76 }
77 }
78 }
80 // Check for big class initializers with all constant offsets
81 // feeding into a known-size array.
82 const TypeInt* idxtype = _gvn.type(idx)->is_int();
83 // See if the highest idx value is less than the lowest array bound,
84 // and if the idx value cannot be negative:
85 bool need_range_check = true;
86 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
87 need_range_check = false;
88 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
89 }
91 if (!arytype->klass()->is_loaded()) {
92 // Only fails for some -Xcomp runs
93 // The class is unloaded. We have to run this bytecode in the interpreter.
94 uncommon_trap(Deoptimization::Reason_unloaded,
95 Deoptimization::Action_reinterpret,
96 arytype->klass(), "!loaded array");
97 return top();
98 }
100 // Do the range check
101 if (GenerateRangeChecks && need_range_check) {
102 // Range is constant in array-oop, so we can use the original state of mem
103 Node* len = load_array_length(ary);
104 // Test length vs index (standard trick using unsigned compare)
105 Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
106 BoolTest::mask btest = BoolTest::lt;
107 Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) );
108 // Branch to failure if out of bounds
109 { BuildCutout unless(this, tst, PROB_MAX);
110 if (C->allow_range_check_smearing()) {
111 // Do not use builtin_throw, since range checks are sometimes
112 // made more stringent by an optimistic transformation.
113 // This creates "tentative" range checks at this point,
114 // which are not guaranteed to throw exceptions.
115 // See IfNode::Ideal, is_range_check, adjust_check.
116 uncommon_trap(Deoptimization::Reason_range_check,
117 Deoptimization::Action_make_not_entrant,
118 NULL, "range_check");
119 } else {
120 // If we have already recompiled with the range-check-widening
121 // heroic optimization turned off, then we must really be throwing
122 // range check exceptions.
123 builtin_throw(Deoptimization::Reason_range_check, idx);
124 }
125 }
126 }
127 // Check for always knowing you are throwing a range-check exception
128 if (stopped()) return top();
130 Node* ptr = array_element_address( ary, idx, type, sizetype);
132 if (result2 != NULL) *result2 = elemtype;
133 return ptr;
134 }
137 // returns IfNode
138 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
139 Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
140 Node *tst = _gvn.transform( new (C, 2) BoolNode( cmp, mask));
141 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
142 return iff;
143 }
145 // return Region node
146 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
147 Node *region = new (C, 3) RegionNode(3); // 2 results
148 record_for_igvn(region);
149 region->init_req(1, iffalse);
150 region->init_req(2, iftrue );
151 _gvn.set_type(region, Type::CONTROL);
152 region = _gvn.transform(region);
153 set_control (region);
154 return region;
155 }
158 //------------------------------helper for tableswitch-------------------------
159 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
160 // True branch, use existing map info
161 { PreserveJVMState pjvms(this);
162 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
163 set_control( iftrue );
164 profile_switch_case(prof_table_index);
165 merge_new_path(dest_bci_if_true);
166 }
168 // False branch
169 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
170 set_control( iffalse );
171 }
173 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
174 // True branch, use existing map info
175 { PreserveJVMState pjvms(this);
176 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode (iff) );
177 set_control( iffalse );
178 profile_switch_case(prof_table_index);
179 merge_new_path(dest_bci_if_true);
180 }
182 // False branch
183 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff) );
184 set_control( iftrue );
185 }
187 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
188 // False branch, use existing map and control()
189 profile_switch_case(prof_table_index);
190 merge_new_path(dest_bci);
191 }
194 extern "C" {
195 static int jint_cmp(const void *i, const void *j) {
196 int a = *(jint *)i;
197 int b = *(jint *)j;
198 return a > b ? 1 : a < b ? -1 : 0;
199 }
200 }
203 // Default value for methodData switch indexing. Must be a negative value to avoid
204 // conflict with any legal switch index.
205 #define NullTableIndex -1
207 class SwitchRange : public StackObj {
208 // a range of integers coupled with a bci destination
209 jint _lo; // inclusive lower limit
210 jint _hi; // inclusive upper limit
211 int _dest;
212 int _table_index; // index into method data table
214 public:
215 jint lo() const { return _lo; }
216 jint hi() const { return _hi; }
217 int dest() const { return _dest; }
218 int table_index() const { return _table_index; }
219 bool is_singleton() const { return _lo == _hi; }
221 void setRange(jint lo, jint hi, int dest, int table_index) {
222 assert(lo <= hi, "must be a non-empty range");
223 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
224 }
225 bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
226 assert(lo <= hi, "must be a non-empty range");
227 if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
228 _hi = hi;
229 return true;
230 }
231 return false;
232 }
234 void set (jint value, int dest, int table_index) {
235 setRange(value, value, dest, table_index);
236 }
237 bool adjoin(jint value, int dest, int table_index) {
238 return adjoinRange(value, value, dest, table_index);
239 }
241 void print(ciEnv* env) {
242 if (is_singleton())
243 tty->print(" {%d}=>%d", lo(), dest());
244 else if (lo() == min_jint)
245 tty->print(" {..%d}=>%d", hi(), dest());
246 else if (hi() == max_jint)
247 tty->print(" {%d..}=>%d", lo(), dest());
248 else
249 tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
250 }
251 };
254 //-------------------------------do_tableswitch--------------------------------
255 void Parse::do_tableswitch() {
256 Node* lookup = pop();
258 // Get information about tableswitch
259 int default_dest = iter().get_dest_table(0);
260 int lo_index = iter().get_int_table(1);
261 int hi_index = iter().get_int_table(2);
262 int len = hi_index - lo_index + 1;
264 if (len < 1) {
265 // If this is a backward branch, add safepoint
266 maybe_add_safepoint(default_dest);
267 merge(default_dest);
268 return;
269 }
271 // generate decision tree, using trichotomy when possible
272 int rnum = len+2;
273 bool makes_backward_branch = false;
274 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
275 int rp = -1;
276 if (lo_index != min_jint) {
277 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
278 }
279 for (int j = 0; j < len; j++) {
280 jint match_int = lo_index+j;
281 int dest = iter().get_dest_table(j+3);
282 makes_backward_branch |= (dest <= bci());
283 int table_index = method_data_update() ? j : NullTableIndex;
284 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
285 ranges[++rp].set(match_int, dest, table_index);
286 }
287 }
288 jint highest = lo_index+(len-1);
289 assert(ranges[rp].hi() == highest, "");
290 if (highest != max_jint
291 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
292 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
293 }
294 assert(rp < len+2, "not too many ranges");
296 // Safepoint in case if backward branch observed
297 if( makes_backward_branch && UseLoopSafepoints )
298 add_safepoint();
300 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
301 }
304 //------------------------------do_lookupswitch--------------------------------
305 void Parse::do_lookupswitch() {
306 Node *lookup = pop(); // lookup value
307 // Get information about lookupswitch
308 int default_dest = iter().get_dest_table(0);
309 int len = iter().get_int_table(1);
311 if (len < 1) { // If this is a backward branch, add safepoint
312 maybe_add_safepoint(default_dest);
313 merge(default_dest);
314 return;
315 }
317 // generate decision tree, using trichotomy when possible
318 jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
319 {
320 for( int j = 0; j < len; j++ ) {
321 table[j+j+0] = iter().get_int_table(2+j+j);
322 table[j+j+1] = iter().get_dest_table(2+j+j+1);
323 }
324 qsort( table, len, 2*sizeof(table[0]), jint_cmp );
325 }
327 int rnum = len*2+1;
328 bool makes_backward_branch = false;
329 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
330 int rp = -1;
331 for( int j = 0; j < len; j++ ) {
332 jint match_int = table[j+j+0];
333 int dest = table[j+j+1];
334 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
335 int table_index = method_data_update() ? j : NullTableIndex;
336 makes_backward_branch |= (dest <= bci());
337 if( match_int != next_lo ) {
338 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
339 }
340 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
341 ranges[++rp].set(match_int, dest, table_index);
342 }
343 }
344 jint highest = table[2*(len-1)];
345 assert(ranges[rp].hi() == highest, "");
346 if( highest != max_jint
347 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
348 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
349 }
350 assert(rp < rnum, "not too many ranges");
352 // Safepoint in case backward branch observed
353 if( makes_backward_branch && UseLoopSafepoints )
354 add_safepoint();
356 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
357 }
359 //----------------------------create_jump_tables-------------------------------
360 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
361 // Are jumptables enabled
362 if (!UseJumpTables) return false;
364 // Are jumptables supported
365 if (!Matcher::has_match_rule(Op_Jump)) return false;
367 // Don't make jump table if profiling
368 if (method_data_update()) return false;
370 // Decide if a guard is needed to lop off big ranges at either (or
371 // both) end(s) of the input set. We'll call this the default target
372 // even though we can't be sure that it is the true "default".
374 bool needs_guard = false;
375 int default_dest;
376 int64 total_outlier_size = 0;
377 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
378 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
380 if (lo->dest() == hi->dest()) {
381 total_outlier_size = hi_size + lo_size;
382 default_dest = lo->dest();
383 } else if (lo_size > hi_size) {
384 total_outlier_size = lo_size;
385 default_dest = lo->dest();
386 } else {
387 total_outlier_size = hi_size;
388 default_dest = hi->dest();
389 }
391 // If a guard test will eliminate very sparse end ranges, then
392 // it is worth the cost of an extra jump.
393 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
394 needs_guard = true;
395 if (default_dest == lo->dest()) lo++;
396 if (default_dest == hi->dest()) hi--;
397 }
399 // Find the total number of cases and ranges
400 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
401 int num_range = hi - lo + 1;
403 // Don't create table if: too large, too small, or too sparse.
404 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
405 return false;
406 if (num_cases > (MaxJumpTableSparseness * num_range))
407 return false;
409 // Normalize table lookups to zero
410 int lowval = lo->lo();
411 key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) );
413 // Generate a guard to protect against input keyvals that aren't
414 // in the switch domain.
415 if (needs_guard) {
416 Node* size = _gvn.intcon(num_cases);
417 Node* cmp = _gvn.transform( new (C, 3) CmpUNode(key_val, size) );
418 Node* tst = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ge) );
419 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
420 jump_if_true_fork(iff, default_dest, NullTableIndex);
421 }
423 // Create an ideal node JumpTable that has projections
424 // of all possible ranges for a switch statement
425 // The key_val input must be converted to a pointer offset and scaled.
426 // Compare Parse::array_addressing above.
427 #ifdef _LP64
428 // Clean the 32-bit int into a real 64-bit offset.
429 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
430 const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
431 key_val = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) );
432 #endif
433 // Shift the value by wordsize so we have an index into the table, rather
434 // than a switch value
435 Node *shiftWord = _gvn.MakeConX(wordSize);
436 key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord));
438 // Create the JumpNode
439 Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) );
441 // These are the switch destinations hanging off the jumpnode
442 int i = 0;
443 for (SwitchRange* r = lo; r <= hi; r++) {
444 for (int j = r->lo(); j <= r->hi(); j++, i++) {
445 Node* input = _gvn.transform(new (C, 1) JumpProjNode(jtn, i, r->dest(), j - lowval));
446 {
447 PreserveJVMState pjvms(this);
448 set_control(input);
449 jump_if_always_fork(r->dest(), r->table_index());
450 }
451 }
452 }
453 assert(i == num_cases, "miscount of cases");
454 stop_and_kill_map(); // no more uses for this JVMS
455 return true;
456 }
458 //----------------------------jump_switch_ranges-------------------------------
459 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
460 Block* switch_block = block();
462 if (switch_depth == 0) {
463 // Do special processing for the top-level call.
464 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
465 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
467 // Decrement pred-numbers for the unique set of nodes.
468 #ifdef ASSERT
469 // Ensure that the block's successors are a (duplicate-free) set.
470 int successors_counted = 0; // block occurrences in [hi..lo]
471 int unique_successors = switch_block->num_successors();
472 for (int i = 0; i < unique_successors; i++) {
473 Block* target = switch_block->successor_at(i);
475 // Check that the set of successors is the same in both places.
476 int successors_found = 0;
477 for (SwitchRange* p = lo; p <= hi; p++) {
478 if (p->dest() == target->start()) successors_found++;
479 }
480 assert(successors_found > 0, "successor must be known");
481 successors_counted += successors_found;
482 }
483 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
484 #endif
486 // Maybe prune the inputs, based on the type of key_val.
487 jint min_val = min_jint;
488 jint max_val = max_jint;
489 const TypeInt* ti = key_val->bottom_type()->isa_int();
490 if (ti != NULL) {
491 min_val = ti->_lo;
492 max_val = ti->_hi;
493 assert(min_val <= max_val, "invalid int type");
494 }
495 while (lo->hi() < min_val) lo++;
496 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
497 while (hi->lo() > max_val) hi--;
498 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
499 }
501 #ifndef PRODUCT
502 if (switch_depth == 0) {
503 _max_switch_depth = 0;
504 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
505 }
506 #endif
508 assert(lo <= hi, "must be a non-empty set of ranges");
509 if (lo == hi) {
510 jump_if_always_fork(lo->dest(), lo->table_index());
511 } else {
512 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
513 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
515 if (create_jump_tables(key_val, lo, hi)) return;
517 int nr = hi - lo + 1;
519 SwitchRange* mid = lo + nr/2;
520 // if there is an easy choice, pivot at a singleton:
521 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
523 assert(lo < mid && mid <= hi, "good pivot choice");
524 assert(nr != 2 || mid == hi, "should pick higher of 2");
525 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
527 Node *test_val = _gvn.intcon(mid->lo());
529 if (mid->is_singleton()) {
530 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
531 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
533 // Special Case: If there are exactly three ranges, and the high
534 // and low range each go to the same place, omit the "gt" test,
535 // since it will not discriminate anything.
536 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
537 if (eq_test_only) {
538 assert(mid == hi-1, "");
539 }
541 // if there is a higher range, test for it and process it:
542 if (mid < hi && !eq_test_only) {
543 // two comparisons of same values--should enable 1 test for 2 branches
544 // Use BoolTest::le instead of BoolTest::gt
545 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le);
546 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_le) );
547 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_le) );
548 { PreserveJVMState pjvms(this);
549 set_control(iffalse);
550 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
551 }
552 set_control(iftrue);
553 }
555 } else {
556 // mid is a range, not a singleton, so treat mid..hi as a unit
557 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
559 // if there is a higher range, test for it and process it:
560 if (mid == hi) {
561 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
562 } else {
563 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_ge) );
564 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_ge) );
565 { PreserveJVMState pjvms(this);
566 set_control(iftrue);
567 jump_switch_ranges(key_val, mid, hi, switch_depth+1);
568 }
569 set_control(iffalse);
570 }
571 }
573 // in any case, process the lower range
574 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
575 }
577 // Decrease pred_count for each successor after all is done.
578 if (switch_depth == 0) {
579 int unique_successors = switch_block->num_successors();
580 for (int i = 0; i < unique_successors; i++) {
581 Block* target = switch_block->successor_at(i);
582 // Throw away the pre-allocated path for each unique successor.
583 target->next_path_num();
584 }
585 }
587 #ifndef PRODUCT
588 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
589 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
590 SwitchRange* r;
591 int nsing = 0;
592 for( r = lo; r <= hi; r++ ) {
593 if( r->is_singleton() ) nsing++;
594 }
595 tty->print(">>> ");
596 _method->print_short_name();
597 tty->print_cr(" switch decision tree");
598 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
599 hi-lo+1, nsing, _max_switch_depth, _est_switch_depth);
600 if (_max_switch_depth > _est_switch_depth) {
601 tty->print_cr("******** BAD SWITCH DEPTH ********");
602 }
603 tty->print(" ");
604 for( r = lo; r <= hi; r++ ) {
605 r->print(env());
606 }
607 tty->print_cr("");
608 }
609 #endif
610 }
612 void Parse::modf() {
613 Node *f2 = pop();
614 Node *f1 = pop();
615 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
616 CAST_FROM_FN_PTR(address, SharedRuntime::frem),
617 "frem", NULL, //no memory effects
618 f1, f2);
619 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
621 push(res);
622 }
624 void Parse::modd() {
625 Node *d2 = pop_pair();
626 Node *d1 = pop_pair();
627 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
628 CAST_FROM_FN_PTR(address, SharedRuntime::drem),
629 "drem", NULL, //no memory effects
630 d1, top(), d2, top());
631 Node* res_d = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
633 #ifdef ASSERT
634 Node* res_top = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 1));
635 assert(res_top == top(), "second value must be top");
636 #endif
638 push_pair(res_d);
639 }
641 void Parse::l2f() {
642 Node* f2 = pop();
643 Node* f1 = pop();
644 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
645 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
646 "l2f", NULL, //no memory effects
647 f1, f2);
648 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
650 push(res);
651 }
653 void Parse::do_irem() {
654 // Must keep both values on the expression-stack during null-check
655 do_null_check(peek(), T_INT);
656 // Compile-time detect of null-exception?
657 if (stopped()) return;
659 Node* b = pop();
660 Node* a = pop();
662 const Type *t = _gvn.type(b);
663 if (t != Type::TOP) {
664 const TypeInt *ti = t->is_int();
665 if (ti->is_con()) {
666 int divisor = ti->get_con();
667 // check for positive power of 2
668 if (divisor > 0 &&
669 (divisor & ~(divisor-1)) == divisor) {
670 // yes !
671 Node *mask = _gvn.intcon((divisor - 1));
672 // Sigh, must handle negative dividends
673 Node *zero = _gvn.intcon(0);
674 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
675 Node *iff = _gvn.transform( new (C, 1) IfFalseNode(ifff) );
676 Node *ift = _gvn.transform( new (C, 1) IfTrueNode (ifff) );
677 Node *reg = jump_if_join(ift, iff);
678 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
679 // Negative path; negate/and/negate
680 Node *neg = _gvn.transform( new (C, 3) SubINode(zero, a) );
681 Node *andn= _gvn.transform( new (C, 3) AndINode(neg, mask) );
682 Node *negn= _gvn.transform( new (C, 3) SubINode(zero, andn) );
683 phi->init_req(1, negn);
684 // Fast positive case
685 Node *andx = _gvn.transform( new (C, 3) AndINode(a, mask) );
686 phi->init_req(2, andx);
687 // Push the merge
688 push( _gvn.transform(phi) );
689 return;
690 }
691 }
692 }
693 // Default case
694 push( _gvn.transform( new (C, 3) ModINode(control(),a,b) ) );
695 }
697 // Handle jsr and jsr_w bytecode
698 void Parse::do_jsr() {
699 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
701 // Store information about current state, tagged with new _jsr_bci
702 int return_bci = iter().next_bci();
703 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
705 // Update method data
706 profile_taken_branch(jsr_bci);
708 // The way we do things now, there is only one successor block
709 // for the jsr, because the target code is cloned by ciTypeFlow.
710 Block* target = successor_for_bci(jsr_bci);
712 // What got pushed?
713 const Type* ret_addr = target->peek();
714 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
716 // Effect on jsr on stack
717 push(_gvn.makecon(ret_addr));
719 // Flow to the jsr.
720 merge(jsr_bci);
721 }
723 // Handle ret bytecode
724 void Parse::do_ret() {
725 // Find to whom we return.
726 #if 0 // %%%% MAKE THIS WORK
727 Node* con = local();
728 const TypePtr* tp = con->bottom_type()->isa_ptr();
729 assert(tp && tp->singleton(), "");
730 int return_bci = (int) tp->get_con();
731 merge(return_bci);
732 #else
733 assert(block()->num_successors() == 1, "a ret can only go one place now");
734 Block* target = block()->successor_at(0);
735 assert(!target->is_ready(), "our arrival must be expected");
736 profile_ret(target->flow()->start());
737 int pnum = target->next_path_num();
738 merge_common(target, pnum);
739 #endif
740 }
742 //--------------------------dynamic_branch_prediction--------------------------
743 // Try to gather dynamic branch prediction behavior. Return a probability
744 // of the branch being taken and set the "cnt" field. Returns a -1.0
745 // if we need to use static prediction for some reason.
746 float Parse::dynamic_branch_prediction(float &cnt) {
747 ResourceMark rm;
749 cnt = COUNT_UNKNOWN;
751 // Use MethodData information if it is available
752 // FIXME: free the ProfileData structure
753 ciMethodData* methodData = method()->method_data();
754 if (!methodData->is_mature()) return PROB_UNKNOWN;
755 ciProfileData* data = methodData->bci_to_data(bci());
756 if (!data->is_JumpData()) return PROB_UNKNOWN;
758 // get taken and not taken values
759 int taken = data->as_JumpData()->taken();
760 int not_taken = 0;
761 if (data->is_BranchData()) {
762 not_taken = data->as_BranchData()->not_taken();
763 }
765 // scale the counts to be commensurate with invocation counts:
766 taken = method()->scale_count(taken);
767 not_taken = method()->scale_count(not_taken);
769 // Give up if too few counts to be meaningful
770 if (taken + not_taken < 40) {
771 if (C->log() != NULL) {
772 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
773 }
774 return PROB_UNKNOWN;
775 }
777 // Compute frequency that we arrive here
778 int sum = taken + not_taken;
779 // Adjust, if this block is a cloned private block but the
780 // Jump counts are shared. Taken the private counts for
781 // just this path instead of the shared counts.
782 if( block()->count() > 0 )
783 sum = block()->count();
784 cnt = (float)sum / (float)FreqCountInvocations;
786 // Pin probability to sane limits
787 float prob;
788 if( !taken )
789 prob = (0+PROB_MIN) / 2;
790 else if( !not_taken )
791 prob = (1+PROB_MAX) / 2;
792 else { // Compute probability of true path
793 prob = (float)taken / (float)(taken + not_taken);
794 if (prob > PROB_MAX) prob = PROB_MAX;
795 if (prob < PROB_MIN) prob = PROB_MIN;
796 }
798 assert((cnt > 0.0f) && (prob > 0.0f),
799 "Bad frequency assignment in if");
801 if (C->log() != NULL) {
802 const char* prob_str = NULL;
803 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
804 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
805 char prob_str_buf[30];
806 if (prob_str == NULL) {
807 sprintf(prob_str_buf, "%g", prob);
808 prob_str = prob_str_buf;
809 }
810 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
811 iter().get_dest(), taken, not_taken, cnt, prob_str);
812 }
813 return prob;
814 }
816 //-----------------------------branch_prediction-------------------------------
817 float Parse::branch_prediction(float& cnt,
818 BoolTest::mask btest,
819 int target_bci) {
820 float prob = dynamic_branch_prediction(cnt);
821 // If prob is unknown, switch to static prediction
822 if (prob != PROB_UNKNOWN) return prob;
824 prob = PROB_FAIR; // Set default value
825 if (btest == BoolTest::eq) // Exactly equal test?
826 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
827 else if (btest == BoolTest::ne)
828 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
830 // If this is a conditional test guarding a backwards branch,
831 // assume its a loop-back edge. Make it a likely taken branch.
832 if (target_bci < bci()) {
833 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
834 // Since it's an OSR, we probably have profile data, but since
835 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
836 // Let's make a special check here for completely zero counts.
837 ciMethodData* methodData = method()->method_data();
838 if (!methodData->is_empty()) {
839 ciProfileData* data = methodData->bci_to_data(bci());
840 // Only stop for truly zero counts, which mean an unknown part
841 // of the OSR-ed method, and we want to deopt to gather more stats.
842 // If you have ANY counts, then this loop is simply 'cold' relative
843 // to the OSR loop.
844 if (data->as_BranchData()->taken() +
845 data->as_BranchData()->not_taken() == 0 ) {
846 // This is the only way to return PROB_UNKNOWN:
847 return PROB_UNKNOWN;
848 }
849 }
850 }
851 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
852 }
854 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
855 return prob;
856 }
858 // The magic constants are chosen so as to match the output of
859 // branch_prediction() when the profile reports a zero taken count.
860 // It is important to distinguish zero counts unambiguously, because
861 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
862 // very small but nonzero probabilities, which if confused with zero
863 // counts would keep the program recompiling indefinitely.
864 bool Parse::seems_never_taken(float prob) {
865 return prob < PROB_MIN;
866 }
868 inline void Parse::repush_if_args() {
869 #ifndef PRODUCT
870 if (PrintOpto && WizardMode) {
871 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
872 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
873 method()->print_name(); tty->cr();
874 }
875 #endif
876 int bc_depth = - Bytecodes::depth(iter().cur_bc());
877 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
878 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
879 assert(argument(0) != NULL, "must exist");
880 assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
881 _sp += bc_depth;
882 }
884 //----------------------------------do_ifnull----------------------------------
885 void Parse::do_ifnull(BoolTest::mask btest) {
886 int target_bci = iter().get_dest();
888 Block* branch_block = successor_for_bci(target_bci);
889 Block* next_block = successor_for_bci(iter().next_bci());
891 float cnt;
892 float prob = branch_prediction(cnt, btest, target_bci);
893 if (prob == PROB_UNKNOWN) {
894 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
895 #ifndef PRODUCT
896 if (PrintOpto && Verbose)
897 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
898 #endif
899 repush_if_args(); // to gather stats on loop
900 // We need to mark this branch as taken so that if we recompile we will
901 // see that it is possible. In the tiered system the interpreter doesn't
902 // do profiling and by the time we get to the lower tier from the interpreter
903 // the path may be cold again. Make sure it doesn't look untaken
904 profile_taken_branch(target_bci, !ProfileInterpreter);
905 uncommon_trap(Deoptimization::Reason_unreached,
906 Deoptimization::Action_reinterpret,
907 NULL, "cold");
908 if (EliminateAutoBox) {
909 // Mark the successor blocks as parsed
910 branch_block->next_path_num();
911 next_block->next_path_num();
912 }
913 return;
914 }
916 // If this is a backwards branch in the bytecodes, add Safepoint
917 maybe_add_safepoint(target_bci);
919 explicit_null_checks_inserted++;
920 Node* a = null();
921 Node* b = pop();
922 Node* c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
924 // Make a cast-away-nullness that is control dependent on the test
925 const Type *t = _gvn.type(b);
926 const Type *t_not_null = t->join(TypePtr::NOTNULL);
927 Node *cast = new (C, 2) CastPPNode(b,t_not_null);
929 // Generate real control flow
930 Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) );
932 // Sanity check the probability value
933 assert(prob > 0.0f,"Bad probability in Parser");
934 // Need xform to put node in hash table
935 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
936 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
937 // True branch
938 { PreserveJVMState pjvms(this);
939 Node* iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
940 set_control(iftrue);
942 if (stopped()) { // Path is dead?
943 explicit_null_checks_elided++;
944 if (EliminateAutoBox) {
945 // Mark the successor block as parsed
946 branch_block->next_path_num();
947 }
948 } else { // Path is live.
949 // Update method data
950 profile_taken_branch(target_bci);
951 adjust_map_after_if(btest, c, prob, branch_block, next_block);
952 if (!stopped())
953 merge(target_bci);
954 }
955 }
957 // False branch
958 Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
959 set_control(iffalse);
961 if (stopped()) { // Path is dead?
962 explicit_null_checks_elided++;
963 if (EliminateAutoBox) {
964 // Mark the successor block as parsed
965 next_block->next_path_num();
966 }
967 } else { // Path is live.
968 // Update method data
969 profile_not_taken_branch();
970 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
971 next_block, branch_block);
972 }
973 }
975 //------------------------------------do_if------------------------------------
976 void Parse::do_if(BoolTest::mask btest, Node* c) {
977 int target_bci = iter().get_dest();
979 Block* branch_block = successor_for_bci(target_bci);
980 Block* next_block = successor_for_bci(iter().next_bci());
982 float cnt;
983 float prob = branch_prediction(cnt, btest, target_bci);
984 float untaken_prob = 1.0 - prob;
986 if (prob == PROB_UNKNOWN) {
987 #ifndef PRODUCT
988 if (PrintOpto && Verbose)
989 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
990 #endif
991 repush_if_args(); // to gather stats on loop
992 // We need to mark this branch as taken so that if we recompile we will
993 // see that it is possible. In the tiered system the interpreter doesn't
994 // do profiling and by the time we get to the lower tier from the interpreter
995 // the path may be cold again. Make sure it doesn't look untaken
996 profile_taken_branch(target_bci, !ProfileInterpreter);
997 uncommon_trap(Deoptimization::Reason_unreached,
998 Deoptimization::Action_reinterpret,
999 NULL, "cold");
1000 if (EliminateAutoBox) {
1001 // Mark the successor blocks as parsed
1002 branch_block->next_path_num();
1003 next_block->next_path_num();
1004 }
1005 return;
1006 }
1008 // Sanity check the probability value
1009 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1011 bool taken_if_true = true;
1012 // Convert BoolTest to canonical form:
1013 if (!BoolTest(btest).is_canonical()) {
1014 btest = BoolTest(btest).negate();
1015 taken_if_true = false;
1016 // prob is NOT updated here; it remains the probability of the taken
1017 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1018 }
1019 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1021 Node* tst0 = new (C, 2) BoolNode(c, btest);
1022 Node* tst = _gvn.transform(tst0);
1023 BoolTest::mask taken_btest = BoolTest::illegal;
1024 BoolTest::mask untaken_btest = BoolTest::illegal;
1025 if (btest == BoolTest::ne) {
1026 // For now, these are the only cases of btest that matter. (More later.)
1027 taken_btest = taken_if_true ? btest : BoolTest::eq;
1028 untaken_btest = taken_if_true ? BoolTest::eq : btest;
1029 }
1031 // Generate real control flow
1032 float true_prob = (taken_if_true ? prob : untaken_prob);
1033 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1034 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1035 Node* taken_branch = new (C, 1) IfTrueNode(iff);
1036 Node* untaken_branch = new (C, 1) IfFalseNode(iff);
1037 if (!taken_if_true) { // Finish conversion to canonical form
1038 Node* tmp = taken_branch;
1039 taken_branch = untaken_branch;
1040 untaken_branch = tmp;
1041 }
1043 // Branch is taken:
1044 { PreserveJVMState pjvms(this);
1045 taken_branch = _gvn.transform(taken_branch);
1046 set_control(taken_branch);
1048 if (stopped()) {
1049 if (EliminateAutoBox) {
1050 // Mark the successor block as parsed
1051 branch_block->next_path_num();
1052 }
1053 } else {
1054 // Update method data
1055 profile_taken_branch(target_bci);
1056 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1057 if (!stopped())
1058 merge(target_bci);
1059 }
1060 }
1062 untaken_branch = _gvn.transform(untaken_branch);
1063 set_control(untaken_branch);
1065 // Branch not taken.
1066 if (stopped()) {
1067 if (EliminateAutoBox) {
1068 // Mark the successor block as parsed
1069 next_block->next_path_num();
1070 }
1071 } else {
1072 // Update method data
1073 profile_not_taken_branch();
1074 adjust_map_after_if(untaken_btest, c, untaken_prob,
1075 next_block, branch_block);
1076 }
1077 }
1079 //----------------------------adjust_map_after_if------------------------------
1080 // Adjust the JVM state to reflect the result of taking this path.
1081 // Basically, it means inspecting the CmpNode controlling this
1082 // branch, seeing how it constrains a tested value, and then
1083 // deciding if it's worth our while to encode this constraint
1084 // as graph nodes in the current abstract interpretation map.
1085 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1086 Block* path, Block* other_path) {
1087 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1088 return; // nothing to do
1090 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1092 int cop = c->Opcode();
1093 if (seems_never_taken(prob) && cop == Op_CmpP && btest == BoolTest::eq) {
1094 // (An earlier version of do_if omitted '&& btest == BoolTest::eq'.)
1095 //
1096 // If this might possibly turn into an implicit null check,
1097 // and the null has never yet been seen, we need to generate
1098 // an uncommon trap, so as to recompile instead of suffering
1099 // with very slow branches. (We'll get the slow branches if
1100 // the program ever changes phase and starts seeing nulls here.)
1101 //
1102 // The tests we worry about are of the form (p == null).
1103 // We do not simply inspect for a null constant, since a node may
1104 // optimize to 'null' later on.
1105 repush_if_args();
1106 // We need to mark this branch as taken so that if we recompile we will
1107 // see that it is possible. In the tiered system the interpreter doesn't
1108 // do profiling and by the time we get to the lower tier from the interpreter
1109 // the path may be cold again. Make sure it doesn't look untaken
1110 if (is_fallthrough) {
1111 profile_not_taken_branch(!ProfileInterpreter);
1112 } else {
1113 profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
1114 }
1115 uncommon_trap(Deoptimization::Reason_unreached,
1116 Deoptimization::Action_reinterpret,
1117 NULL,
1118 (is_fallthrough ? "taken always" : "taken never"));
1119 return;
1120 }
1122 Node* val = c->in(1);
1123 Node* con = c->in(2);
1124 const Type* tcon = _gvn.type(con);
1125 const Type* tval = _gvn.type(val);
1126 bool have_con = tcon->singleton();
1127 if (tval->singleton()) {
1128 if (!have_con) {
1129 // Swap, so constant is in con.
1130 con = val;
1131 tcon = tval;
1132 val = c->in(2);
1133 tval = _gvn.type(val);
1134 btest = BoolTest(btest).commute();
1135 have_con = true;
1136 } else {
1137 // Do we have two constants? Then leave well enough alone.
1138 have_con = false;
1139 }
1140 }
1141 if (!have_con) // remaining adjustments need a con
1142 return;
1145 int val_in_map = map()->find_edge(val);
1146 if (val_in_map < 0) return; // replace_in_map would be useless
1147 {
1148 JVMState* jvms = this->jvms();
1149 if (!(jvms->is_loc(val_in_map) ||
1150 jvms->is_stk(val_in_map)))
1151 return; // again, it would be useless
1152 }
1154 // Check for a comparison to a constant, and "know" that the compared
1155 // value is constrained on this path.
1156 assert(tcon->singleton(), "");
1157 ConstraintCastNode* ccast = NULL;
1158 Node* cast = NULL;
1160 switch (btest) {
1161 case BoolTest::eq: // Constant test?
1162 {
1163 const Type* tboth = tcon->join(tval);
1164 if (tboth == tval) break; // Nothing to gain.
1165 if (tcon->isa_int()) {
1166 ccast = new (C, 2) CastIINode(val, tboth);
1167 } else if (tcon == TypePtr::NULL_PTR) {
1168 // Cast to null, but keep the pointer identity temporarily live.
1169 ccast = new (C, 2) CastPPNode(val, tboth);
1170 } else {
1171 const TypeF* tf = tcon->isa_float_constant();
1172 const TypeD* td = tcon->isa_double_constant();
1173 // Exclude tests vs float/double 0 as these could be
1174 // either +0 or -0. Just because you are equal to +0
1175 // doesn't mean you ARE +0!
1176 if ((!tf || tf->_f != 0.0) &&
1177 (!td || td->_d != 0.0))
1178 cast = con; // Replace non-constant val by con.
1179 }
1180 }
1181 break;
1183 case BoolTest::ne:
1184 if (tcon == TypePtr::NULL_PTR) {
1185 cast = cast_not_null(val, false);
1186 }
1187 break;
1189 default:
1190 // (At this point we could record int range types with CastII.)
1191 break;
1192 }
1194 if (ccast != NULL) {
1195 const Type* tcc = ccast->as_Type()->type();
1196 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1197 // Delay transform() call to allow recovery of pre-cast value
1198 // at the control merge.
1199 ccast->set_req(0, control());
1200 _gvn.set_type_bottom(ccast);
1201 record_for_igvn(ccast);
1202 cast = ccast;
1203 }
1205 if (cast != NULL) { // Here's the payoff.
1206 replace_in_map(val, cast);
1207 }
1208 }
1211 //------------------------------do_one_bytecode--------------------------------
1212 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1213 void Parse::do_one_bytecode() {
1214 Node *a, *b, *c, *d; // Handy temps
1215 BoolTest::mask btest;
1216 int i;
1218 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1220 if (C->check_node_count(NodeLimitFudgeFactor * 5,
1221 "out of nodes parsing method")) {
1222 return;
1223 }
1225 #ifdef ASSERT
1226 // for setting breakpoints
1227 if (TraceOptoParse) {
1228 tty->print(" @");
1229 dump_bci(bci());
1230 }
1231 #endif
1233 switch (bc()) {
1234 case Bytecodes::_nop:
1235 // do nothing
1236 break;
1237 case Bytecodes::_lconst_0:
1238 push_pair(longcon(0));
1239 break;
1241 case Bytecodes::_lconst_1:
1242 push_pair(longcon(1));
1243 break;
1245 case Bytecodes::_fconst_0:
1246 push(zerocon(T_FLOAT));
1247 break;
1249 case Bytecodes::_fconst_1:
1250 push(makecon(TypeF::ONE));
1251 break;
1253 case Bytecodes::_fconst_2:
1254 push(makecon(TypeF::make(2.0f)));
1255 break;
1257 case Bytecodes::_dconst_0:
1258 push_pair(zerocon(T_DOUBLE));
1259 break;
1261 case Bytecodes::_dconst_1:
1262 push_pair(makecon(TypeD::ONE));
1263 break;
1265 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1266 case Bytecodes::_iconst_0: push(intcon( 0)); break;
1267 case Bytecodes::_iconst_1: push(intcon( 1)); break;
1268 case Bytecodes::_iconst_2: push(intcon( 2)); break;
1269 case Bytecodes::_iconst_3: push(intcon( 3)); break;
1270 case Bytecodes::_iconst_4: push(intcon( 4)); break;
1271 case Bytecodes::_iconst_5: push(intcon( 5)); break;
1272 case Bytecodes::_bipush: push(intcon( iter().get_byte())); break;
1273 case Bytecodes::_sipush: push(intcon( iter().get_short())); break;
1274 case Bytecodes::_aconst_null: push(null()); break;
1275 case Bytecodes::_ldc:
1276 case Bytecodes::_ldc_w:
1277 case Bytecodes::_ldc2_w:
1278 // If the constant is unresolved, run this BC once in the interpreter.
1279 if (iter().is_unresolved_string()) {
1280 uncommon_trap(Deoptimization::make_trap_request
1281 (Deoptimization::Reason_unloaded,
1282 Deoptimization::Action_reinterpret,
1283 iter().get_constant_index()),
1284 NULL, "unresolved_string");
1285 break;
1286 } else {
1287 ciConstant constant = iter().get_constant();
1288 if (constant.basic_type() == T_OBJECT) {
1289 ciObject* c = constant.as_object();
1290 if (c->is_klass()) {
1291 // The constant returned for a klass is the ciKlass for the
1292 // entry. We want the java_mirror so get it.
1293 ciKlass* klass = c->as_klass();
1294 if (klass->is_loaded()) {
1295 constant = ciConstant(T_OBJECT, klass->java_mirror());
1296 } else {
1297 uncommon_trap(Deoptimization::make_trap_request
1298 (Deoptimization::Reason_unloaded,
1299 Deoptimization::Action_reinterpret,
1300 iter().get_constant_index()),
1301 NULL, "unresolved_klass");
1302 break;
1303 }
1304 }
1305 }
1306 push_constant(constant);
1307 }
1309 break;
1311 case Bytecodes::_aload_0:
1312 push( local(0) );
1313 break;
1314 case Bytecodes::_aload_1:
1315 push( local(1) );
1316 break;
1317 case Bytecodes::_aload_2:
1318 push( local(2) );
1319 break;
1320 case Bytecodes::_aload_3:
1321 push( local(3) );
1322 break;
1323 case Bytecodes::_aload:
1324 push( local(iter().get_index()) );
1325 break;
1327 case Bytecodes::_fload_0:
1328 case Bytecodes::_iload_0:
1329 push( local(0) );
1330 break;
1331 case Bytecodes::_fload_1:
1332 case Bytecodes::_iload_1:
1333 push( local(1) );
1334 break;
1335 case Bytecodes::_fload_2:
1336 case Bytecodes::_iload_2:
1337 push( local(2) );
1338 break;
1339 case Bytecodes::_fload_3:
1340 case Bytecodes::_iload_3:
1341 push( local(3) );
1342 break;
1343 case Bytecodes::_fload:
1344 case Bytecodes::_iload:
1345 push( local(iter().get_index()) );
1346 break;
1347 case Bytecodes::_lload_0:
1348 push_pair_local( 0 );
1349 break;
1350 case Bytecodes::_lload_1:
1351 push_pair_local( 1 );
1352 break;
1353 case Bytecodes::_lload_2:
1354 push_pair_local( 2 );
1355 break;
1356 case Bytecodes::_lload_3:
1357 push_pair_local( 3 );
1358 break;
1359 case Bytecodes::_lload:
1360 push_pair_local( iter().get_index() );
1361 break;
1363 case Bytecodes::_dload_0:
1364 push_pair_local(0);
1365 break;
1366 case Bytecodes::_dload_1:
1367 push_pair_local(1);
1368 break;
1369 case Bytecodes::_dload_2:
1370 push_pair_local(2);
1371 break;
1372 case Bytecodes::_dload_3:
1373 push_pair_local(3);
1374 break;
1375 case Bytecodes::_dload:
1376 push_pair_local(iter().get_index());
1377 break;
1378 case Bytecodes::_fstore_0:
1379 case Bytecodes::_istore_0:
1380 case Bytecodes::_astore_0:
1381 set_local( 0, pop() );
1382 break;
1383 case Bytecodes::_fstore_1:
1384 case Bytecodes::_istore_1:
1385 case Bytecodes::_astore_1:
1386 set_local( 1, pop() );
1387 break;
1388 case Bytecodes::_fstore_2:
1389 case Bytecodes::_istore_2:
1390 case Bytecodes::_astore_2:
1391 set_local( 2, pop() );
1392 break;
1393 case Bytecodes::_fstore_3:
1394 case Bytecodes::_istore_3:
1395 case Bytecodes::_astore_3:
1396 set_local( 3, pop() );
1397 break;
1398 case Bytecodes::_fstore:
1399 case Bytecodes::_istore:
1400 case Bytecodes::_astore:
1401 set_local( iter().get_index(), pop() );
1402 break;
1403 // long stores
1404 case Bytecodes::_lstore_0:
1405 set_pair_local( 0, pop_pair() );
1406 break;
1407 case Bytecodes::_lstore_1:
1408 set_pair_local( 1, pop_pair() );
1409 break;
1410 case Bytecodes::_lstore_2:
1411 set_pair_local( 2, pop_pair() );
1412 break;
1413 case Bytecodes::_lstore_3:
1414 set_pair_local( 3, pop_pair() );
1415 break;
1416 case Bytecodes::_lstore:
1417 set_pair_local( iter().get_index(), pop_pair() );
1418 break;
1420 // double stores
1421 case Bytecodes::_dstore_0:
1422 set_pair_local( 0, dstore_rounding(pop_pair()) );
1423 break;
1424 case Bytecodes::_dstore_1:
1425 set_pair_local( 1, dstore_rounding(pop_pair()) );
1426 break;
1427 case Bytecodes::_dstore_2:
1428 set_pair_local( 2, dstore_rounding(pop_pair()) );
1429 break;
1430 case Bytecodes::_dstore_3:
1431 set_pair_local( 3, dstore_rounding(pop_pair()) );
1432 break;
1433 case Bytecodes::_dstore:
1434 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1435 break;
1437 case Bytecodes::_pop: _sp -= 1; break;
1438 case Bytecodes::_pop2: _sp -= 2; break;
1439 case Bytecodes::_swap:
1440 a = pop();
1441 b = pop();
1442 push(a);
1443 push(b);
1444 break;
1445 case Bytecodes::_dup:
1446 a = pop();
1447 push(a);
1448 push(a);
1449 break;
1450 case Bytecodes::_dup_x1:
1451 a = pop();
1452 b = pop();
1453 push( a );
1454 push( b );
1455 push( a );
1456 break;
1457 case Bytecodes::_dup_x2:
1458 a = pop();
1459 b = pop();
1460 c = pop();
1461 push( a );
1462 push( c );
1463 push( b );
1464 push( a );
1465 break;
1466 case Bytecodes::_dup2:
1467 a = pop();
1468 b = pop();
1469 push( b );
1470 push( a );
1471 push( b );
1472 push( a );
1473 break;
1475 case Bytecodes::_dup2_x1:
1476 // before: .. c, b, a
1477 // after: .. b, a, c, b, a
1478 // not tested
1479 a = pop();
1480 b = pop();
1481 c = pop();
1482 push( b );
1483 push( a );
1484 push( c );
1485 push( b );
1486 push( a );
1487 break;
1488 case Bytecodes::_dup2_x2:
1489 // before: .. d, c, b, a
1490 // after: .. b, a, d, c, b, a
1491 // not tested
1492 a = pop();
1493 b = pop();
1494 c = pop();
1495 d = pop();
1496 push( b );
1497 push( a );
1498 push( d );
1499 push( c );
1500 push( b );
1501 push( a );
1502 break;
1504 case Bytecodes::_arraylength: {
1505 // Must do null-check with value on expression stack
1506 Node *ary = do_null_check(peek(), T_ARRAY);
1507 // Compile-time detect of null-exception?
1508 if (stopped()) return;
1509 a = pop();
1510 push(load_array_length(a));
1511 break;
1512 }
1514 case Bytecodes::_baload: array_load(T_BYTE); break;
1515 case Bytecodes::_caload: array_load(T_CHAR); break;
1516 case Bytecodes::_iaload: array_load(T_INT); break;
1517 case Bytecodes::_saload: array_load(T_SHORT); break;
1518 case Bytecodes::_faload: array_load(T_FLOAT); break;
1519 case Bytecodes::_aaload: array_load(T_OBJECT); break;
1520 case Bytecodes::_laload: {
1521 a = array_addressing(T_LONG, 0);
1522 if (stopped()) return; // guarenteed null or range check
1523 _sp -= 2; // Pop array and index
1524 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
1525 break;
1526 }
1527 case Bytecodes::_daload: {
1528 a = array_addressing(T_DOUBLE, 0);
1529 if (stopped()) return; // guarenteed null or range check
1530 _sp -= 2; // Pop array and index
1531 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
1532 break;
1533 }
1534 case Bytecodes::_bastore: array_store(T_BYTE); break;
1535 case Bytecodes::_castore: array_store(T_CHAR); break;
1536 case Bytecodes::_iastore: array_store(T_INT); break;
1537 case Bytecodes::_sastore: array_store(T_SHORT); break;
1538 case Bytecodes::_fastore: array_store(T_FLOAT); break;
1539 case Bytecodes::_aastore: {
1540 d = array_addressing(T_OBJECT, 1);
1541 if (stopped()) return; // guarenteed null or range check
1542 array_store_check();
1543 c = pop(); // Oop to store
1544 b = pop(); // index (already used)
1545 a = pop(); // the array itself
1546 const Type* elemtype = _gvn.type(a)->is_aryptr()->elem();
1547 const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1548 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
1549 break;
1550 }
1551 case Bytecodes::_lastore: {
1552 a = array_addressing(T_LONG, 2);
1553 if (stopped()) return; // guarenteed null or range check
1554 c = pop_pair();
1555 _sp -= 2; // Pop array and index
1556 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
1557 break;
1558 }
1559 case Bytecodes::_dastore: {
1560 a = array_addressing(T_DOUBLE, 2);
1561 if (stopped()) return; // guarenteed null or range check
1562 c = pop_pair();
1563 _sp -= 2; // Pop array and index
1564 c = dstore_rounding(c);
1565 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
1566 break;
1567 }
1568 case Bytecodes::_getfield:
1569 do_getfield();
1570 break;
1572 case Bytecodes::_getstatic:
1573 do_getstatic();
1574 break;
1576 case Bytecodes::_putfield:
1577 do_putfield();
1578 break;
1580 case Bytecodes::_putstatic:
1581 do_putstatic();
1582 break;
1584 case Bytecodes::_irem:
1585 do_irem();
1586 break;
1587 case Bytecodes::_idiv:
1588 // Must keep both values on the expression-stack during null-check
1589 do_null_check(peek(), T_INT);
1590 // Compile-time detect of null-exception?
1591 if (stopped()) return;
1592 b = pop();
1593 a = pop();
1594 push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) );
1595 break;
1596 case Bytecodes::_imul:
1597 b = pop(); a = pop();
1598 push( _gvn.transform( new (C, 3) MulINode(a,b) ) );
1599 break;
1600 case Bytecodes::_iadd:
1601 b = pop(); a = pop();
1602 push( _gvn.transform( new (C, 3) AddINode(a,b) ) );
1603 break;
1604 case Bytecodes::_ineg:
1605 a = pop();
1606 push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) );
1607 break;
1608 case Bytecodes::_isub:
1609 b = pop(); a = pop();
1610 push( _gvn.transform( new (C, 3) SubINode(a,b) ) );
1611 break;
1612 case Bytecodes::_iand:
1613 b = pop(); a = pop();
1614 push( _gvn.transform( new (C, 3) AndINode(a,b) ) );
1615 break;
1616 case Bytecodes::_ior:
1617 b = pop(); a = pop();
1618 push( _gvn.transform( new (C, 3) OrINode(a,b) ) );
1619 break;
1620 case Bytecodes::_ixor:
1621 b = pop(); a = pop();
1622 push( _gvn.transform( new (C, 3) XorINode(a,b) ) );
1623 break;
1624 case Bytecodes::_ishl:
1625 b = pop(); a = pop();
1626 push( _gvn.transform( new (C, 3) LShiftINode(a,b) ) );
1627 break;
1628 case Bytecodes::_ishr:
1629 b = pop(); a = pop();
1630 push( _gvn.transform( new (C, 3) RShiftINode(a,b) ) );
1631 break;
1632 case Bytecodes::_iushr:
1633 b = pop(); a = pop();
1634 push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) );
1635 break;
1637 case Bytecodes::_fneg:
1638 a = pop();
1639 b = _gvn.transform(new (C, 2) NegFNode (a));
1640 push(b);
1641 break;
1643 case Bytecodes::_fsub:
1644 b = pop();
1645 a = pop();
1646 c = _gvn.transform( new (C, 3) SubFNode(a,b) );
1647 d = precision_rounding(c);
1648 push( d );
1649 break;
1651 case Bytecodes::_fadd:
1652 b = pop();
1653 a = pop();
1654 c = _gvn.transform( new (C, 3) AddFNode(a,b) );
1655 d = precision_rounding(c);
1656 push( d );
1657 break;
1659 case Bytecodes::_fmul:
1660 b = pop();
1661 a = pop();
1662 c = _gvn.transform( new (C, 3) MulFNode(a,b) );
1663 d = precision_rounding(c);
1664 push( d );
1665 break;
1667 case Bytecodes::_fdiv:
1668 b = pop();
1669 a = pop();
1670 c = _gvn.transform( new (C, 3) DivFNode(0,a,b) );
1671 d = precision_rounding(c);
1672 push( d );
1673 break;
1675 case Bytecodes::_frem:
1676 if (Matcher::has_match_rule(Op_ModF)) {
1677 // Generate a ModF node.
1678 b = pop();
1679 a = pop();
1680 c = _gvn.transform( new (C, 3) ModFNode(0,a,b) );
1681 d = precision_rounding(c);
1682 push( d );
1683 }
1684 else {
1685 // Generate a call.
1686 modf();
1687 }
1688 break;
1690 case Bytecodes::_fcmpl:
1691 b = pop();
1692 a = pop();
1693 c = _gvn.transform( new (C, 3) CmpF3Node( a, b));
1694 push(c);
1695 break;
1696 case Bytecodes::_fcmpg:
1697 b = pop();
1698 a = pop();
1700 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
1701 // which negates the result sign except for unordered. Flip the unordered
1702 // as well by using CmpF3 which implements unordered-lesser instead of
1703 // unordered-greater semantics. Finally, commute the result bits. Result
1704 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1705 c = _gvn.transform( new (C, 3) CmpF3Node( b, a));
1706 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1707 push(c);
1708 break;
1710 case Bytecodes::_f2i:
1711 a = pop();
1712 push(_gvn.transform(new (C, 2) ConvF2INode(a)));
1713 break;
1715 case Bytecodes::_d2i:
1716 a = pop_pair();
1717 b = _gvn.transform(new (C, 2) ConvD2INode(a));
1718 push( b );
1719 break;
1721 case Bytecodes::_f2d:
1722 a = pop();
1723 b = _gvn.transform( new (C, 2) ConvF2DNode(a));
1724 push_pair( b );
1725 break;
1727 case Bytecodes::_d2f:
1728 a = pop_pair();
1729 b = _gvn.transform( new (C, 2) ConvD2FNode(a));
1730 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1731 //b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) );
1732 push( b );
1733 break;
1735 case Bytecodes::_l2f:
1736 if (Matcher::convL2FSupported()) {
1737 a = pop_pair();
1738 b = _gvn.transform( new (C, 2) ConvL2FNode(a));
1739 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1740 // Rather than storing the result into an FP register then pushing
1741 // out to memory to round, the machine instruction that implements
1742 // ConvL2D is responsible for rounding.
1743 // c = precision_rounding(b);
1744 c = _gvn.transform(b);
1745 push(c);
1746 } else {
1747 l2f();
1748 }
1749 break;
1751 case Bytecodes::_l2d:
1752 a = pop_pair();
1753 b = _gvn.transform( new (C, 2) ConvL2DNode(a));
1754 // For i486.ad, rounding is always necessary (see _l2f above).
1755 // c = dprecision_rounding(b);
1756 c = _gvn.transform(b);
1757 push_pair(c);
1758 break;
1760 case Bytecodes::_f2l:
1761 a = pop();
1762 b = _gvn.transform( new (C, 2) ConvF2LNode(a));
1763 push_pair(b);
1764 break;
1766 case Bytecodes::_d2l:
1767 a = pop_pair();
1768 b = _gvn.transform( new (C, 2) ConvD2LNode(a));
1769 push_pair(b);
1770 break;
1772 case Bytecodes::_dsub:
1773 b = pop_pair();
1774 a = pop_pair();
1775 c = _gvn.transform( new (C, 3) SubDNode(a,b) );
1776 d = dprecision_rounding(c);
1777 push_pair( d );
1778 break;
1780 case Bytecodes::_dadd:
1781 b = pop_pair();
1782 a = pop_pair();
1783 c = _gvn.transform( new (C, 3) AddDNode(a,b) );
1784 d = dprecision_rounding(c);
1785 push_pair( d );
1786 break;
1788 case Bytecodes::_dmul:
1789 b = pop_pair();
1790 a = pop_pair();
1791 c = _gvn.transform( new (C, 3) MulDNode(a,b) );
1792 d = dprecision_rounding(c);
1793 push_pair( d );
1794 break;
1796 case Bytecodes::_ddiv:
1797 b = pop_pair();
1798 a = pop_pair();
1799 c = _gvn.transform( new (C, 3) DivDNode(0,a,b) );
1800 d = dprecision_rounding(c);
1801 push_pair( d );
1802 break;
1804 case Bytecodes::_dneg:
1805 a = pop_pair();
1806 b = _gvn.transform(new (C, 2) NegDNode (a));
1807 push_pair(b);
1808 break;
1810 case Bytecodes::_drem:
1811 if (Matcher::has_match_rule(Op_ModD)) {
1812 // Generate a ModD node.
1813 b = pop_pair();
1814 a = pop_pair();
1815 // a % b
1817 c = _gvn.transform( new (C, 3) ModDNode(0,a,b) );
1818 d = dprecision_rounding(c);
1819 push_pair( d );
1820 }
1821 else {
1822 // Generate a call.
1823 modd();
1824 }
1825 break;
1827 case Bytecodes::_dcmpl:
1828 b = pop_pair();
1829 a = pop_pair();
1830 c = _gvn.transform( new (C, 3) CmpD3Node( a, b));
1831 push(c);
1832 break;
1834 case Bytecodes::_dcmpg:
1835 b = pop_pair();
1836 a = pop_pair();
1837 // Same as dcmpl but need to flip the unordered case.
1838 // Commute the inputs, which negates the result sign except for unordered.
1839 // Flip the unordered as well by using CmpD3 which implements
1840 // unordered-lesser instead of unordered-greater semantics.
1841 // Finally, negate the result bits. Result is same as using a
1842 // CmpD3Greater except we did it with CmpD3 alone.
1843 c = _gvn.transform( new (C, 3) CmpD3Node( b, a));
1844 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1845 push(c);
1846 break;
1849 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
1850 case Bytecodes::_land:
1851 b = pop_pair();
1852 a = pop_pair();
1853 c = _gvn.transform( new (C, 3) AndLNode(a,b) );
1854 push_pair(c);
1855 break;
1856 case Bytecodes::_lor:
1857 b = pop_pair();
1858 a = pop_pair();
1859 c = _gvn.transform( new (C, 3) OrLNode(a,b) );
1860 push_pair(c);
1861 break;
1862 case Bytecodes::_lxor:
1863 b = pop_pair();
1864 a = pop_pair();
1865 c = _gvn.transform( new (C, 3) XorLNode(a,b) );
1866 push_pair(c);
1867 break;
1869 case Bytecodes::_lshl:
1870 b = pop(); // the shift count
1871 a = pop_pair(); // value to be shifted
1872 c = _gvn.transform( new (C, 3) LShiftLNode(a,b) );
1873 push_pair(c);
1874 break;
1875 case Bytecodes::_lshr:
1876 b = pop(); // the shift count
1877 a = pop_pair(); // value to be shifted
1878 c = _gvn.transform( new (C, 3) RShiftLNode(a,b) );
1879 push_pair(c);
1880 break;
1881 case Bytecodes::_lushr:
1882 b = pop(); // the shift count
1883 a = pop_pair(); // value to be shifted
1884 c = _gvn.transform( new (C, 3) URShiftLNode(a,b) );
1885 push_pair(c);
1886 break;
1887 case Bytecodes::_lmul:
1888 b = pop_pair();
1889 a = pop_pair();
1890 c = _gvn.transform( new (C, 3) MulLNode(a,b) );
1891 push_pair(c);
1892 break;
1894 case Bytecodes::_lrem:
1895 // Must keep both values on the expression-stack during null-check
1896 assert(peek(0) == top(), "long word order");
1897 do_null_check(peek(1), T_LONG);
1898 // Compile-time detect of null-exception?
1899 if (stopped()) return;
1900 b = pop_pair();
1901 a = pop_pair();
1902 c = _gvn.transform( new (C, 3) ModLNode(control(),a,b) );
1903 push_pair(c);
1904 break;
1906 case Bytecodes::_ldiv:
1907 // Must keep both values on the expression-stack during null-check
1908 assert(peek(0) == top(), "long word order");
1909 do_null_check(peek(1), T_LONG);
1910 // Compile-time detect of null-exception?
1911 if (stopped()) return;
1912 b = pop_pair();
1913 a = pop_pair();
1914 c = _gvn.transform( new (C, 3) DivLNode(control(),a,b) );
1915 push_pair(c);
1916 break;
1918 case Bytecodes::_ladd:
1919 b = pop_pair();
1920 a = pop_pair();
1921 c = _gvn.transform( new (C, 3) AddLNode(a,b) );
1922 push_pair(c);
1923 break;
1924 case Bytecodes::_lsub:
1925 b = pop_pair();
1926 a = pop_pair();
1927 c = _gvn.transform( new (C, 3) SubLNode(a,b) );
1928 push_pair(c);
1929 break;
1930 case Bytecodes::_lcmp:
1931 // Safepoints are now inserted _before_ branches. The long-compare
1932 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
1933 // slew of control flow. These are usually followed by a CmpI vs zero and
1934 // a branch; this pattern then optimizes to the obvious long-compare and
1935 // branch. However, if the branch is backwards there's a Safepoint
1936 // inserted. The inserted Safepoint captures the JVM state at the
1937 // pre-branch point, i.e. it captures the 3-way value. Thus if a
1938 // long-compare is used to control a loop the debug info will force
1939 // computation of the 3-way value, even though the generated code uses a
1940 // long-compare and branch. We try to rectify the situation by inserting
1941 // a SafePoint here and have it dominate and kill the safepoint added at a
1942 // following backwards branch. At this point the JVM state merely holds 2
1943 // longs but not the 3-way value.
1944 if( UseLoopSafepoints ) {
1945 switch( iter().next_bc() ) {
1946 case Bytecodes::_ifgt:
1947 case Bytecodes::_iflt:
1948 case Bytecodes::_ifge:
1949 case Bytecodes::_ifle:
1950 case Bytecodes::_ifne:
1951 case Bytecodes::_ifeq:
1952 // If this is a backwards branch in the bytecodes, add Safepoint
1953 maybe_add_safepoint(iter().next_get_dest());
1954 }
1955 }
1956 b = pop_pair();
1957 a = pop_pair();
1958 c = _gvn.transform( new (C, 3) CmpL3Node( a, b ));
1959 push(c);
1960 break;
1962 case Bytecodes::_lneg:
1963 a = pop_pair();
1964 b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a));
1965 push_pair(b);
1966 break;
1967 case Bytecodes::_l2i:
1968 a = pop_pair();
1969 push( _gvn.transform( new (C, 2) ConvL2INode(a)));
1970 break;
1971 case Bytecodes::_i2l:
1972 a = pop();
1973 b = _gvn.transform( new (C, 2) ConvI2LNode(a));
1974 push_pair(b);
1975 break;
1976 case Bytecodes::_i2b:
1977 // Sign extend
1978 a = pop();
1979 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(24)) );
1980 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(24)) );
1981 push( a );
1982 break;
1983 case Bytecodes::_i2s:
1984 a = pop();
1985 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(16)) );
1986 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(16)) );
1987 push( a );
1988 break;
1989 case Bytecodes::_i2c:
1990 a = pop();
1991 push( _gvn.transform( new (C, 3) AndINode(a,_gvn.intcon(0xFFFF)) ) );
1992 break;
1994 case Bytecodes::_i2f:
1995 a = pop();
1996 b = _gvn.transform( new (C, 2) ConvI2FNode(a) ) ;
1997 c = precision_rounding(b);
1998 push (b);
1999 break;
2001 case Bytecodes::_i2d:
2002 a = pop();
2003 b = _gvn.transform( new (C, 2) ConvI2DNode(a));
2004 push_pair(b);
2005 break;
2007 case Bytecodes::_iinc: // Increment local
2008 i = iter().get_index(); // Get local index
2009 set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2010 break;
2012 // Exit points of synchronized methods must have an unlock node
2013 case Bytecodes::_return:
2014 return_current(NULL);
2015 break;
2017 case Bytecodes::_ireturn:
2018 case Bytecodes::_areturn:
2019 case Bytecodes::_freturn:
2020 return_current(pop());
2021 break;
2022 case Bytecodes::_lreturn:
2023 return_current(pop_pair());
2024 break;
2025 case Bytecodes::_dreturn:
2026 return_current(pop_pair());
2027 break;
2029 case Bytecodes::_athrow:
2030 // null exception oop throws NULL pointer exception
2031 do_null_check(peek(), T_OBJECT);
2032 if (stopped()) return;
2033 if (JvmtiExport::can_post_exceptions()) {
2034 // "Full-speed throwing" is not necessary here,
2035 // since we're notifying the VM on every throw.
2036 uncommon_trap(Deoptimization::Reason_unhandled,
2037 Deoptimization::Action_none);
2038 return;
2039 }
2040 // Hook the thrown exception directly to subsequent handlers.
2041 if (BailoutToInterpreterForThrows) {
2042 // Keep method interpreted from now on.
2043 uncommon_trap(Deoptimization::Reason_unhandled,
2044 Deoptimization::Action_make_not_compilable);
2045 return;
2046 }
2047 add_exception_state(make_exception_state(peek()));
2048 break;
2050 case Bytecodes::_goto: // fall through
2051 case Bytecodes::_goto_w: {
2052 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2054 // If this is a backwards branch in the bytecodes, add Safepoint
2055 maybe_add_safepoint(target_bci);
2057 // Update method data
2058 profile_taken_branch(target_bci);
2060 // Merge the current control into the target basic block
2061 merge(target_bci);
2063 // See if we can get some profile data and hand it off to the next block
2064 Block *target_block = block()->successor_for_bci(target_bci);
2065 if (target_block->pred_count() != 1) break;
2066 ciMethodData* methodData = method()->method_data();
2067 if (!methodData->is_mature()) break;
2068 ciProfileData* data = methodData->bci_to_data(bci());
2069 assert( data->is_JumpData(), "" );
2070 int taken = ((ciJumpData*)data)->taken();
2071 taken = method()->scale_count(taken);
2072 target_block->set_count(taken);
2073 break;
2074 }
2076 case Bytecodes::_ifnull:
2077 do_ifnull(BoolTest::eq);
2078 break;
2079 case Bytecodes::_ifnonnull:
2080 do_ifnull(BoolTest::ne);
2081 break;
2083 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2084 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2085 handle_if_acmp:
2086 // If this is a backwards branch in the bytecodes, add Safepoint
2087 maybe_add_safepoint(iter().get_dest());
2088 a = pop();
2089 b = pop();
2090 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2091 do_if(btest, c);
2092 break;
2094 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2095 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2096 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2097 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2098 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2099 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2100 handle_ifxx:
2101 // If this is a backwards branch in the bytecodes, add Safepoint
2102 maybe_add_safepoint(iter().get_dest());
2103 a = _gvn.intcon(0);
2104 b = pop();
2105 c = _gvn.transform( new (C, 3) CmpINode(b, a) );
2106 do_if(btest, c);
2107 break;
2109 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2110 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2111 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2112 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2113 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2114 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2115 handle_if_icmp:
2116 // If this is a backwards branch in the bytecodes, add Safepoint
2117 maybe_add_safepoint(iter().get_dest());
2118 a = pop();
2119 b = pop();
2120 c = _gvn.transform( new (C, 3) CmpINode( b, a ) );
2121 do_if(btest, c);
2122 break;
2124 case Bytecodes::_tableswitch:
2125 do_tableswitch();
2126 break;
2128 case Bytecodes::_lookupswitch:
2129 do_lookupswitch();
2130 break;
2132 case Bytecodes::_invokestatic:
2133 case Bytecodes::_invokespecial:
2134 case Bytecodes::_invokevirtual:
2135 case Bytecodes::_invokeinterface:
2136 do_call();
2137 break;
2138 case Bytecodes::_checkcast:
2139 do_checkcast();
2140 break;
2141 case Bytecodes::_instanceof:
2142 do_instanceof();
2143 break;
2144 case Bytecodes::_anewarray:
2145 do_anewarray();
2146 break;
2147 case Bytecodes::_newarray:
2148 do_newarray((BasicType)iter().get_index());
2149 break;
2150 case Bytecodes::_multianewarray:
2151 do_multianewarray();
2152 break;
2153 case Bytecodes::_new:
2154 do_new();
2155 break;
2157 case Bytecodes::_jsr:
2158 case Bytecodes::_jsr_w:
2159 do_jsr();
2160 break;
2162 case Bytecodes::_ret:
2163 do_ret();
2164 break;
2167 case Bytecodes::_monitorenter:
2168 do_monitor_enter();
2169 break;
2171 case Bytecodes::_monitorexit:
2172 do_monitor_exit();
2173 break;
2175 case Bytecodes::_breakpoint:
2176 // Breakpoint set concurrently to compile
2177 // %%% use an uncommon trap?
2178 C->record_failure("breakpoint in method");
2179 return;
2181 default:
2182 #ifndef PRODUCT
2183 map()->dump(99);
2184 #endif
2185 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2186 ShouldNotReachHere();
2187 }
2189 #ifndef PRODUCT
2190 IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2191 if(printer) {
2192 char buffer[256];
2193 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2194 bool old = printer->traverse_outs();
2195 printer->set_traverse_outs(true);
2196 printer->print_method(C, buffer, 3);
2197 printer->set_traverse_outs(old);
2198 }
2199 #endif
2200 }