Fri, 27 Feb 2009 13:27:09 -0800
6810672: Comment typos
Summary: I have collected some typos I have found while looking at the code.
Reviewed-by: kvn, never
1 /*
2 * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_parse2.cpp.incl"
28 extern int explicit_null_checks_inserted,
29 explicit_null_checks_elided;
31 //---------------------------------array_load----------------------------------
32 void Parse::array_load(BasicType elem_type) {
33 const Type* elem = Type::TOP;
34 Node* adr = array_addressing(elem_type, 0, &elem);
35 if (stopped()) return; // guaranteed null or range check
36 _sp -= 2; // Pop array and index
37 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
38 Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
39 push(ld);
40 }
43 //--------------------------------array_store----------------------------------
44 void Parse::array_store(BasicType elem_type) {
45 Node* adr = array_addressing(elem_type, 1);
46 if (stopped()) return; // guaranteed null or range check
47 Node* val = pop();
48 _sp -= 2; // Pop array and index
49 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
50 store_to_memory(control(), adr, val, elem_type, adr_type);
51 }
54 //------------------------------array_addressing-------------------------------
55 // Pull array and index from the stack. Compute pointer-to-element.
56 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
57 Node *idx = peek(0+vals); // Get from stack without popping
58 Node *ary = peek(1+vals); // in case of exception
60 // Null check the array base, with correct stack contents
61 ary = do_null_check(ary, T_ARRAY);
62 // Compile-time detect of null-exception?
63 if (stopped()) return top();
65 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
66 const TypeInt* sizetype = arytype->size();
67 const Type* elemtype = arytype->elem();
69 if (UseUniqueSubclasses && result2 != NULL) {
70 const Type* el = elemtype->make_ptr();
71 if (el && el->isa_instptr()) {
72 const TypeInstPtr* toop = el->is_instptr();
73 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
74 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
75 const Type* subklass = Type::get_const_type(toop->klass());
76 elemtype = subklass->join(el);
77 }
78 }
79 }
81 // Check for big class initializers with all constant offsets
82 // feeding into a known-size array.
83 const TypeInt* idxtype = _gvn.type(idx)->is_int();
84 // See if the highest idx value is less than the lowest array bound,
85 // and if the idx value cannot be negative:
86 bool need_range_check = true;
87 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
88 need_range_check = false;
89 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
90 }
92 if (!arytype->klass()->is_loaded()) {
93 // Only fails for some -Xcomp runs
94 // The class is unloaded. We have to run this bytecode in the interpreter.
95 uncommon_trap(Deoptimization::Reason_unloaded,
96 Deoptimization::Action_reinterpret,
97 arytype->klass(), "!loaded array");
98 return top();
99 }
101 // Do the range check
102 if (GenerateRangeChecks && need_range_check) {
103 Node* tst;
104 if (sizetype->_hi <= 0) {
105 // The greatest array bound is negative, so we can conclude that we're
106 // compiling unreachable code, but the unsigned compare trick used below
107 // only works with non-negative lengths. Instead, hack "tst" to be zero so
108 // the uncommon_trap path will always be taken.
109 tst = _gvn.intcon(0);
110 } else {
111 // Range is constant in array-oop, so we can use the original state of mem
112 Node* len = load_array_length(ary);
114 // Test length vs index (standard trick using unsigned compare)
115 Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
116 BoolTest::mask btest = BoolTest::lt;
117 tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) );
118 }
119 // Branch to failure if out of bounds
120 { BuildCutout unless(this, tst, PROB_MAX);
121 if (C->allow_range_check_smearing()) {
122 // Do not use builtin_throw, since range checks are sometimes
123 // made more stringent by an optimistic transformation.
124 // This creates "tentative" range checks at this point,
125 // which are not guaranteed to throw exceptions.
126 // See IfNode::Ideal, is_range_check, adjust_check.
127 uncommon_trap(Deoptimization::Reason_range_check,
128 Deoptimization::Action_make_not_entrant,
129 NULL, "range_check");
130 } else {
131 // If we have already recompiled with the range-check-widening
132 // heroic optimization turned off, then we must really be throwing
133 // range check exceptions.
134 builtin_throw(Deoptimization::Reason_range_check, idx);
135 }
136 }
137 }
138 // Check for always knowing you are throwing a range-check exception
139 if (stopped()) return top();
141 Node* ptr = array_element_address(ary, idx, type, sizetype);
143 if (result2 != NULL) *result2 = elemtype;
145 assert(ptr != top(), "top should go hand-in-hand with stopped");
147 return ptr;
148 }
151 // returns IfNode
152 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
153 Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
154 Node *tst = _gvn.transform( new (C, 2) BoolNode( cmp, mask));
155 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
156 return iff;
157 }
159 // return Region node
160 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
161 Node *region = new (C, 3) RegionNode(3); // 2 results
162 record_for_igvn(region);
163 region->init_req(1, iffalse);
164 region->init_req(2, iftrue );
165 _gvn.set_type(region, Type::CONTROL);
166 region = _gvn.transform(region);
167 set_control (region);
168 return region;
169 }
172 //------------------------------helper for tableswitch-------------------------
173 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
174 // True branch, use existing map info
175 { PreserveJVMState pjvms(this);
176 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
177 set_control( iftrue );
178 profile_switch_case(prof_table_index);
179 merge_new_path(dest_bci_if_true);
180 }
182 // False branch
183 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
184 set_control( iffalse );
185 }
187 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
188 // True branch, use existing map info
189 { PreserveJVMState pjvms(this);
190 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode (iff) );
191 set_control( iffalse );
192 profile_switch_case(prof_table_index);
193 merge_new_path(dest_bci_if_true);
194 }
196 // False branch
197 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff) );
198 set_control( iftrue );
199 }
201 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
202 // False branch, use existing map and control()
203 profile_switch_case(prof_table_index);
204 merge_new_path(dest_bci);
205 }
208 extern "C" {
209 static int jint_cmp(const void *i, const void *j) {
210 int a = *(jint *)i;
211 int b = *(jint *)j;
212 return a > b ? 1 : a < b ? -1 : 0;
213 }
214 }
217 // Default value for methodData switch indexing. Must be a negative value to avoid
218 // conflict with any legal switch index.
219 #define NullTableIndex -1
221 class SwitchRange : public StackObj {
222 // a range of integers coupled with a bci destination
223 jint _lo; // inclusive lower limit
224 jint _hi; // inclusive upper limit
225 int _dest;
226 int _table_index; // index into method data table
228 public:
229 jint lo() const { return _lo; }
230 jint hi() const { return _hi; }
231 int dest() const { return _dest; }
232 int table_index() const { return _table_index; }
233 bool is_singleton() const { return _lo == _hi; }
235 void setRange(jint lo, jint hi, int dest, int table_index) {
236 assert(lo <= hi, "must be a non-empty range");
237 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
238 }
239 bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
240 assert(lo <= hi, "must be a non-empty range");
241 if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
242 _hi = hi;
243 return true;
244 }
245 return false;
246 }
248 void set (jint value, int dest, int table_index) {
249 setRange(value, value, dest, table_index);
250 }
251 bool adjoin(jint value, int dest, int table_index) {
252 return adjoinRange(value, value, dest, table_index);
253 }
255 void print(ciEnv* env) {
256 if (is_singleton())
257 tty->print(" {%d}=>%d", lo(), dest());
258 else if (lo() == min_jint)
259 tty->print(" {..%d}=>%d", hi(), dest());
260 else if (hi() == max_jint)
261 tty->print(" {%d..}=>%d", lo(), dest());
262 else
263 tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
264 }
265 };
268 //-------------------------------do_tableswitch--------------------------------
269 void Parse::do_tableswitch() {
270 Node* lookup = pop();
272 // Get information about tableswitch
273 int default_dest = iter().get_dest_table(0);
274 int lo_index = iter().get_int_table(1);
275 int hi_index = iter().get_int_table(2);
276 int len = hi_index - lo_index + 1;
278 if (len < 1) {
279 // If this is a backward branch, add safepoint
280 maybe_add_safepoint(default_dest);
281 merge(default_dest);
282 return;
283 }
285 // generate decision tree, using trichotomy when possible
286 int rnum = len+2;
287 bool makes_backward_branch = false;
288 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
289 int rp = -1;
290 if (lo_index != min_jint) {
291 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
292 }
293 for (int j = 0; j < len; j++) {
294 jint match_int = lo_index+j;
295 int dest = iter().get_dest_table(j+3);
296 makes_backward_branch |= (dest <= bci());
297 int table_index = method_data_update() ? j : NullTableIndex;
298 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
299 ranges[++rp].set(match_int, dest, table_index);
300 }
301 }
302 jint highest = lo_index+(len-1);
303 assert(ranges[rp].hi() == highest, "");
304 if (highest != max_jint
305 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
306 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
307 }
308 assert(rp < len+2, "not too many ranges");
310 // Safepoint in case if backward branch observed
311 if( makes_backward_branch && UseLoopSafepoints )
312 add_safepoint();
314 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
315 }
318 //------------------------------do_lookupswitch--------------------------------
319 void Parse::do_lookupswitch() {
320 Node *lookup = pop(); // lookup value
321 // Get information about lookupswitch
322 int default_dest = iter().get_dest_table(0);
323 int len = iter().get_int_table(1);
325 if (len < 1) { // If this is a backward branch, add safepoint
326 maybe_add_safepoint(default_dest);
327 merge(default_dest);
328 return;
329 }
331 // generate decision tree, using trichotomy when possible
332 jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
333 {
334 for( int j = 0; j < len; j++ ) {
335 table[j+j+0] = iter().get_int_table(2+j+j);
336 table[j+j+1] = iter().get_dest_table(2+j+j+1);
337 }
338 qsort( table, len, 2*sizeof(table[0]), jint_cmp );
339 }
341 int rnum = len*2+1;
342 bool makes_backward_branch = false;
343 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
344 int rp = -1;
345 for( int j = 0; j < len; j++ ) {
346 jint match_int = table[j+j+0];
347 int dest = table[j+j+1];
348 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
349 int table_index = method_data_update() ? j : NullTableIndex;
350 makes_backward_branch |= (dest <= bci());
351 if( match_int != next_lo ) {
352 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
353 }
354 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
355 ranges[++rp].set(match_int, dest, table_index);
356 }
357 }
358 jint highest = table[2*(len-1)];
359 assert(ranges[rp].hi() == highest, "");
360 if( highest != max_jint
361 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
362 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
363 }
364 assert(rp < rnum, "not too many ranges");
366 // Safepoint in case backward branch observed
367 if( makes_backward_branch && UseLoopSafepoints )
368 add_safepoint();
370 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
371 }
373 //----------------------------create_jump_tables-------------------------------
374 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
375 // Are jumptables enabled
376 if (!UseJumpTables) return false;
378 // Are jumptables supported
379 if (!Matcher::has_match_rule(Op_Jump)) return false;
381 // Don't make jump table if profiling
382 if (method_data_update()) return false;
384 // Decide if a guard is needed to lop off big ranges at either (or
385 // both) end(s) of the input set. We'll call this the default target
386 // even though we can't be sure that it is the true "default".
388 bool needs_guard = false;
389 int default_dest;
390 int64 total_outlier_size = 0;
391 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
392 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
394 if (lo->dest() == hi->dest()) {
395 total_outlier_size = hi_size + lo_size;
396 default_dest = lo->dest();
397 } else if (lo_size > hi_size) {
398 total_outlier_size = lo_size;
399 default_dest = lo->dest();
400 } else {
401 total_outlier_size = hi_size;
402 default_dest = hi->dest();
403 }
405 // If a guard test will eliminate very sparse end ranges, then
406 // it is worth the cost of an extra jump.
407 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
408 needs_guard = true;
409 if (default_dest == lo->dest()) lo++;
410 if (default_dest == hi->dest()) hi--;
411 }
413 // Find the total number of cases and ranges
414 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
415 int num_range = hi - lo + 1;
417 // Don't create table if: too large, too small, or too sparse.
418 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
419 return false;
420 if (num_cases > (MaxJumpTableSparseness * num_range))
421 return false;
423 // Normalize table lookups to zero
424 int lowval = lo->lo();
425 key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) );
427 // Generate a guard to protect against input keyvals that aren't
428 // in the switch domain.
429 if (needs_guard) {
430 Node* size = _gvn.intcon(num_cases);
431 Node* cmp = _gvn.transform( new (C, 3) CmpUNode(key_val, size) );
432 Node* tst = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ge) );
433 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
434 jump_if_true_fork(iff, default_dest, NullTableIndex);
435 }
437 // Create an ideal node JumpTable that has projections
438 // of all possible ranges for a switch statement
439 // The key_val input must be converted to a pointer offset and scaled.
440 // Compare Parse::array_addressing above.
441 #ifdef _LP64
442 // Clean the 32-bit int into a real 64-bit offset.
443 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
444 const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
445 key_val = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) );
446 #endif
447 // Shift the value by wordsize so we have an index into the table, rather
448 // than a switch value
449 Node *shiftWord = _gvn.MakeConX(wordSize);
450 key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord));
452 // Create the JumpNode
453 Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) );
455 // These are the switch destinations hanging off the jumpnode
456 int i = 0;
457 for (SwitchRange* r = lo; r <= hi; r++) {
458 for (int j = r->lo(); j <= r->hi(); j++, i++) {
459 Node* input = _gvn.transform(new (C, 1) JumpProjNode(jtn, i, r->dest(), j - lowval));
460 {
461 PreserveJVMState pjvms(this);
462 set_control(input);
463 jump_if_always_fork(r->dest(), r->table_index());
464 }
465 }
466 }
467 assert(i == num_cases, "miscount of cases");
468 stop_and_kill_map(); // no more uses for this JVMS
469 return true;
470 }
472 //----------------------------jump_switch_ranges-------------------------------
473 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
474 Block* switch_block = block();
476 if (switch_depth == 0) {
477 // Do special processing for the top-level call.
478 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
479 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
481 // Decrement pred-numbers for the unique set of nodes.
482 #ifdef ASSERT
483 // Ensure that the block's successors are a (duplicate-free) set.
484 int successors_counted = 0; // block occurrences in [hi..lo]
485 int unique_successors = switch_block->num_successors();
486 for (int i = 0; i < unique_successors; i++) {
487 Block* target = switch_block->successor_at(i);
489 // Check that the set of successors is the same in both places.
490 int successors_found = 0;
491 for (SwitchRange* p = lo; p <= hi; p++) {
492 if (p->dest() == target->start()) successors_found++;
493 }
494 assert(successors_found > 0, "successor must be known");
495 successors_counted += successors_found;
496 }
497 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
498 #endif
500 // Maybe prune the inputs, based on the type of key_val.
501 jint min_val = min_jint;
502 jint max_val = max_jint;
503 const TypeInt* ti = key_val->bottom_type()->isa_int();
504 if (ti != NULL) {
505 min_val = ti->_lo;
506 max_val = ti->_hi;
507 assert(min_val <= max_val, "invalid int type");
508 }
509 while (lo->hi() < min_val) lo++;
510 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
511 while (hi->lo() > max_val) hi--;
512 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
513 }
515 #ifndef PRODUCT
516 if (switch_depth == 0) {
517 _max_switch_depth = 0;
518 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
519 }
520 #endif
522 assert(lo <= hi, "must be a non-empty set of ranges");
523 if (lo == hi) {
524 jump_if_always_fork(lo->dest(), lo->table_index());
525 } else {
526 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
527 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
529 if (create_jump_tables(key_val, lo, hi)) return;
531 int nr = hi - lo + 1;
533 SwitchRange* mid = lo + nr/2;
534 // if there is an easy choice, pivot at a singleton:
535 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
537 assert(lo < mid && mid <= hi, "good pivot choice");
538 assert(nr != 2 || mid == hi, "should pick higher of 2");
539 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
541 Node *test_val = _gvn.intcon(mid->lo());
543 if (mid->is_singleton()) {
544 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
545 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
547 // Special Case: If there are exactly three ranges, and the high
548 // and low range each go to the same place, omit the "gt" test,
549 // since it will not discriminate anything.
550 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
551 if (eq_test_only) {
552 assert(mid == hi-1, "");
553 }
555 // if there is a higher range, test for it and process it:
556 if (mid < hi && !eq_test_only) {
557 // two comparisons of same values--should enable 1 test for 2 branches
558 // Use BoolTest::le instead of BoolTest::gt
559 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le);
560 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_le) );
561 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_le) );
562 { PreserveJVMState pjvms(this);
563 set_control(iffalse);
564 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
565 }
566 set_control(iftrue);
567 }
569 } else {
570 // mid is a range, not a singleton, so treat mid..hi as a unit
571 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
573 // if there is a higher range, test for it and process it:
574 if (mid == hi) {
575 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
576 } else {
577 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_ge) );
578 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_ge) );
579 { PreserveJVMState pjvms(this);
580 set_control(iftrue);
581 jump_switch_ranges(key_val, mid, hi, switch_depth+1);
582 }
583 set_control(iffalse);
584 }
585 }
587 // in any case, process the lower range
588 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
589 }
591 // Decrease pred_count for each successor after all is done.
592 if (switch_depth == 0) {
593 int unique_successors = switch_block->num_successors();
594 for (int i = 0; i < unique_successors; i++) {
595 Block* target = switch_block->successor_at(i);
596 // Throw away the pre-allocated path for each unique successor.
597 target->next_path_num();
598 }
599 }
601 #ifndef PRODUCT
602 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
603 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
604 SwitchRange* r;
605 int nsing = 0;
606 for( r = lo; r <= hi; r++ ) {
607 if( r->is_singleton() ) nsing++;
608 }
609 tty->print(">>> ");
610 _method->print_short_name();
611 tty->print_cr(" switch decision tree");
612 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
613 hi-lo+1, nsing, _max_switch_depth, _est_switch_depth);
614 if (_max_switch_depth > _est_switch_depth) {
615 tty->print_cr("******** BAD SWITCH DEPTH ********");
616 }
617 tty->print(" ");
618 for( r = lo; r <= hi; r++ ) {
619 r->print(env());
620 }
621 tty->print_cr("");
622 }
623 #endif
624 }
626 void Parse::modf() {
627 Node *f2 = pop();
628 Node *f1 = pop();
629 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
630 CAST_FROM_FN_PTR(address, SharedRuntime::frem),
631 "frem", NULL, //no memory effects
632 f1, f2);
633 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
635 push(res);
636 }
638 void Parse::modd() {
639 Node *d2 = pop_pair();
640 Node *d1 = pop_pair();
641 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
642 CAST_FROM_FN_PTR(address, SharedRuntime::drem),
643 "drem", NULL, //no memory effects
644 d1, top(), d2, top());
645 Node* res_d = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
647 #ifdef ASSERT
648 Node* res_top = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 1));
649 assert(res_top == top(), "second value must be top");
650 #endif
652 push_pair(res_d);
653 }
655 void Parse::l2f() {
656 Node* f2 = pop();
657 Node* f1 = pop();
658 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
659 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
660 "l2f", NULL, //no memory effects
661 f1, f2);
662 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
664 push(res);
665 }
667 void Parse::do_irem() {
668 // Must keep both values on the expression-stack during null-check
669 do_null_check(peek(), T_INT);
670 // Compile-time detect of null-exception?
671 if (stopped()) return;
673 Node* b = pop();
674 Node* a = pop();
676 const Type *t = _gvn.type(b);
677 if (t != Type::TOP) {
678 const TypeInt *ti = t->is_int();
679 if (ti->is_con()) {
680 int divisor = ti->get_con();
681 // check for positive power of 2
682 if (divisor > 0 &&
683 (divisor & ~(divisor-1)) == divisor) {
684 // yes !
685 Node *mask = _gvn.intcon((divisor - 1));
686 // Sigh, must handle negative dividends
687 Node *zero = _gvn.intcon(0);
688 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
689 Node *iff = _gvn.transform( new (C, 1) IfFalseNode(ifff) );
690 Node *ift = _gvn.transform( new (C, 1) IfTrueNode (ifff) );
691 Node *reg = jump_if_join(ift, iff);
692 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
693 // Negative path; negate/and/negate
694 Node *neg = _gvn.transform( new (C, 3) SubINode(zero, a) );
695 Node *andn= _gvn.transform( new (C, 3) AndINode(neg, mask) );
696 Node *negn= _gvn.transform( new (C, 3) SubINode(zero, andn) );
697 phi->init_req(1, negn);
698 // Fast positive case
699 Node *andx = _gvn.transform( new (C, 3) AndINode(a, mask) );
700 phi->init_req(2, andx);
701 // Push the merge
702 push( _gvn.transform(phi) );
703 return;
704 }
705 }
706 }
707 // Default case
708 push( _gvn.transform( new (C, 3) ModINode(control(),a,b) ) );
709 }
711 // Handle jsr and jsr_w bytecode
712 void Parse::do_jsr() {
713 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
715 // Store information about current state, tagged with new _jsr_bci
716 int return_bci = iter().next_bci();
717 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
719 // Update method data
720 profile_taken_branch(jsr_bci);
722 // The way we do things now, there is only one successor block
723 // for the jsr, because the target code is cloned by ciTypeFlow.
724 Block* target = successor_for_bci(jsr_bci);
726 // What got pushed?
727 const Type* ret_addr = target->peek();
728 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
730 // Effect on jsr on stack
731 push(_gvn.makecon(ret_addr));
733 // Flow to the jsr.
734 merge(jsr_bci);
735 }
737 // Handle ret bytecode
738 void Parse::do_ret() {
739 // Find to whom we return.
740 #if 0 // %%%% MAKE THIS WORK
741 Node* con = local();
742 const TypePtr* tp = con->bottom_type()->isa_ptr();
743 assert(tp && tp->singleton(), "");
744 int return_bci = (int) tp->get_con();
745 merge(return_bci);
746 #else
747 assert(block()->num_successors() == 1, "a ret can only go one place now");
748 Block* target = block()->successor_at(0);
749 assert(!target->is_ready(), "our arrival must be expected");
750 profile_ret(target->flow()->start());
751 int pnum = target->next_path_num();
752 merge_common(target, pnum);
753 #endif
754 }
756 //--------------------------dynamic_branch_prediction--------------------------
757 // Try to gather dynamic branch prediction behavior. Return a probability
758 // of the branch being taken and set the "cnt" field. Returns a -1.0
759 // if we need to use static prediction for some reason.
760 float Parse::dynamic_branch_prediction(float &cnt) {
761 ResourceMark rm;
763 cnt = COUNT_UNKNOWN;
765 // Use MethodData information if it is available
766 // FIXME: free the ProfileData structure
767 ciMethodData* methodData = method()->method_data();
768 if (!methodData->is_mature()) return PROB_UNKNOWN;
769 ciProfileData* data = methodData->bci_to_data(bci());
770 if (!data->is_JumpData()) return PROB_UNKNOWN;
772 // get taken and not taken values
773 int taken = data->as_JumpData()->taken();
774 int not_taken = 0;
775 if (data->is_BranchData()) {
776 not_taken = data->as_BranchData()->not_taken();
777 }
779 // scale the counts to be commensurate with invocation counts:
780 taken = method()->scale_count(taken);
781 not_taken = method()->scale_count(not_taken);
783 // Give up if too few counts to be meaningful
784 if (taken + not_taken < 40) {
785 if (C->log() != NULL) {
786 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
787 }
788 return PROB_UNKNOWN;
789 }
791 // Compute frequency that we arrive here
792 int sum = taken + not_taken;
793 // Adjust, if this block is a cloned private block but the
794 // Jump counts are shared. Taken the private counts for
795 // just this path instead of the shared counts.
796 if( block()->count() > 0 )
797 sum = block()->count();
798 cnt = (float)sum / (float)FreqCountInvocations;
800 // Pin probability to sane limits
801 float prob;
802 if( !taken )
803 prob = (0+PROB_MIN) / 2;
804 else if( !not_taken )
805 prob = (1+PROB_MAX) / 2;
806 else { // Compute probability of true path
807 prob = (float)taken / (float)(taken + not_taken);
808 if (prob > PROB_MAX) prob = PROB_MAX;
809 if (prob < PROB_MIN) prob = PROB_MIN;
810 }
812 assert((cnt > 0.0f) && (prob > 0.0f),
813 "Bad frequency assignment in if");
815 if (C->log() != NULL) {
816 const char* prob_str = NULL;
817 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
818 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
819 char prob_str_buf[30];
820 if (prob_str == NULL) {
821 sprintf(prob_str_buf, "%g", prob);
822 prob_str = prob_str_buf;
823 }
824 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
825 iter().get_dest(), taken, not_taken, cnt, prob_str);
826 }
827 return prob;
828 }
830 //-----------------------------branch_prediction-------------------------------
831 float Parse::branch_prediction(float& cnt,
832 BoolTest::mask btest,
833 int target_bci) {
834 float prob = dynamic_branch_prediction(cnt);
835 // If prob is unknown, switch to static prediction
836 if (prob != PROB_UNKNOWN) return prob;
838 prob = PROB_FAIR; // Set default value
839 if (btest == BoolTest::eq) // Exactly equal test?
840 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
841 else if (btest == BoolTest::ne)
842 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
844 // If this is a conditional test guarding a backwards branch,
845 // assume its a loop-back edge. Make it a likely taken branch.
846 if (target_bci < bci()) {
847 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
848 // Since it's an OSR, we probably have profile data, but since
849 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
850 // Let's make a special check here for completely zero counts.
851 ciMethodData* methodData = method()->method_data();
852 if (!methodData->is_empty()) {
853 ciProfileData* data = methodData->bci_to_data(bci());
854 // Only stop for truly zero counts, which mean an unknown part
855 // of the OSR-ed method, and we want to deopt to gather more stats.
856 // If you have ANY counts, then this loop is simply 'cold' relative
857 // to the OSR loop.
858 if (data->as_BranchData()->taken() +
859 data->as_BranchData()->not_taken() == 0 ) {
860 // This is the only way to return PROB_UNKNOWN:
861 return PROB_UNKNOWN;
862 }
863 }
864 }
865 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
866 }
868 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
869 return prob;
870 }
872 // The magic constants are chosen so as to match the output of
873 // branch_prediction() when the profile reports a zero taken count.
874 // It is important to distinguish zero counts unambiguously, because
875 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
876 // very small but nonzero probabilities, which if confused with zero
877 // counts would keep the program recompiling indefinitely.
878 bool Parse::seems_never_taken(float prob) {
879 return prob < PROB_MIN;
880 }
882 //-------------------------------repush_if_args--------------------------------
883 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
884 inline void Parse::repush_if_args() {
885 #ifndef PRODUCT
886 if (PrintOpto && WizardMode) {
887 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
888 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
889 method()->print_name(); tty->cr();
890 }
891 #endif
892 int bc_depth = - Bytecodes::depth(iter().cur_bc());
893 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
894 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
895 assert(argument(0) != NULL, "must exist");
896 assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
897 _sp += bc_depth;
898 }
900 //----------------------------------do_ifnull----------------------------------
901 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
902 int target_bci = iter().get_dest();
904 Block* branch_block = successor_for_bci(target_bci);
905 Block* next_block = successor_for_bci(iter().next_bci());
907 float cnt;
908 float prob = branch_prediction(cnt, btest, target_bci);
909 if (prob == PROB_UNKNOWN) {
910 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
911 #ifndef PRODUCT
912 if (PrintOpto && Verbose)
913 tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
914 #endif
915 repush_if_args(); // to gather stats on loop
916 // We need to mark this branch as taken so that if we recompile we will
917 // see that it is possible. In the tiered system the interpreter doesn't
918 // do profiling and by the time we get to the lower tier from the interpreter
919 // the path may be cold again. Make sure it doesn't look untaken
920 profile_taken_branch(target_bci, !ProfileInterpreter);
921 uncommon_trap(Deoptimization::Reason_unreached,
922 Deoptimization::Action_reinterpret,
923 NULL, "cold");
924 if (EliminateAutoBox) {
925 // Mark the successor blocks as parsed
926 branch_block->next_path_num();
927 next_block->next_path_num();
928 }
929 return;
930 }
932 explicit_null_checks_inserted++;
934 // Generate real control flow
935 Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) );
937 // Sanity check the probability value
938 assert(prob > 0.0f,"Bad probability in Parser");
939 // Need xform to put node in hash table
940 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
941 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
942 // True branch
943 { PreserveJVMState pjvms(this);
944 Node* iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
945 set_control(iftrue);
947 if (stopped()) { // Path is dead?
948 explicit_null_checks_elided++;
949 if (EliminateAutoBox) {
950 // Mark the successor block as parsed
951 branch_block->next_path_num();
952 }
953 } else { // Path is live.
954 // Update method data
955 profile_taken_branch(target_bci);
956 adjust_map_after_if(btest, c, prob, branch_block, next_block);
957 if (!stopped())
958 merge(target_bci);
959 }
960 }
962 // False branch
963 Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
964 set_control(iffalse);
966 if (stopped()) { // Path is dead?
967 explicit_null_checks_elided++;
968 if (EliminateAutoBox) {
969 // Mark the successor block as parsed
970 next_block->next_path_num();
971 }
972 } else { // Path is live.
973 // Update method data
974 profile_not_taken_branch();
975 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
976 next_block, branch_block);
977 }
978 }
980 //------------------------------------do_if------------------------------------
981 void Parse::do_if(BoolTest::mask btest, Node* c) {
982 int target_bci = iter().get_dest();
984 Block* branch_block = successor_for_bci(target_bci);
985 Block* next_block = successor_for_bci(iter().next_bci());
987 float cnt;
988 float prob = branch_prediction(cnt, btest, target_bci);
989 float untaken_prob = 1.0 - prob;
991 if (prob == PROB_UNKNOWN) {
992 #ifndef PRODUCT
993 if (PrintOpto && Verbose)
994 tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
995 #endif
996 repush_if_args(); // to gather stats on loop
997 // We need to mark this branch as taken so that if we recompile we will
998 // see that it is possible. In the tiered system the interpreter doesn't
999 // do profiling and by the time we get to the lower tier from the interpreter
1000 // the path may be cold again. Make sure it doesn't look untaken
1001 profile_taken_branch(target_bci, !ProfileInterpreter);
1002 uncommon_trap(Deoptimization::Reason_unreached,
1003 Deoptimization::Action_reinterpret,
1004 NULL, "cold");
1005 if (EliminateAutoBox) {
1006 // Mark the successor blocks as parsed
1007 branch_block->next_path_num();
1008 next_block->next_path_num();
1009 }
1010 return;
1011 }
1013 // Sanity check the probability value
1014 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1016 bool taken_if_true = true;
1017 // Convert BoolTest to canonical form:
1018 if (!BoolTest(btest).is_canonical()) {
1019 btest = BoolTest(btest).negate();
1020 taken_if_true = false;
1021 // prob is NOT updated here; it remains the probability of the taken
1022 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1023 }
1024 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1026 Node* tst0 = new (C, 2) BoolNode(c, btest);
1027 Node* tst = _gvn.transform(tst0);
1028 BoolTest::mask taken_btest = BoolTest::illegal;
1029 BoolTest::mask untaken_btest = BoolTest::illegal;
1031 if (tst->is_Bool()) {
1032 // Refresh c from the transformed bool node, since it may be
1033 // simpler than the original c. Also re-canonicalize btest.
1034 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1035 // That can arise from statements like: if (x instanceof C) ...
1036 if (tst != tst0) {
1037 // Canonicalize one more time since transform can change it.
1038 btest = tst->as_Bool()->_test._test;
1039 if (!BoolTest(btest).is_canonical()) {
1040 // Reverse edges one more time...
1041 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1042 btest = tst->as_Bool()->_test._test;
1043 assert(BoolTest(btest).is_canonical(), "sanity");
1044 taken_if_true = !taken_if_true;
1045 }
1046 c = tst->in(1);
1047 }
1048 BoolTest::mask neg_btest = BoolTest(btest).negate();
1049 taken_btest = taken_if_true ? btest : neg_btest;
1050 untaken_btest = taken_if_true ? neg_btest : btest;
1051 }
1053 // Generate real control flow
1054 float true_prob = (taken_if_true ? prob : untaken_prob);
1055 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1056 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1057 Node* taken_branch = new (C, 1) IfTrueNode(iff);
1058 Node* untaken_branch = new (C, 1) IfFalseNode(iff);
1059 if (!taken_if_true) { // Finish conversion to canonical form
1060 Node* tmp = taken_branch;
1061 taken_branch = untaken_branch;
1062 untaken_branch = tmp;
1063 }
1065 // Branch is taken:
1066 { PreserveJVMState pjvms(this);
1067 taken_branch = _gvn.transform(taken_branch);
1068 set_control(taken_branch);
1070 if (stopped()) {
1071 if (EliminateAutoBox) {
1072 // Mark the successor block as parsed
1073 branch_block->next_path_num();
1074 }
1075 } else {
1076 // Update method data
1077 profile_taken_branch(target_bci);
1078 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1079 if (!stopped())
1080 merge(target_bci);
1081 }
1082 }
1084 untaken_branch = _gvn.transform(untaken_branch);
1085 set_control(untaken_branch);
1087 // Branch not taken.
1088 if (stopped()) {
1089 if (EliminateAutoBox) {
1090 // Mark the successor block as parsed
1091 next_block->next_path_num();
1092 }
1093 } else {
1094 // Update method data
1095 profile_not_taken_branch();
1096 adjust_map_after_if(untaken_btest, c, untaken_prob,
1097 next_block, branch_block);
1098 }
1099 }
1101 //----------------------------adjust_map_after_if------------------------------
1102 // Adjust the JVM state to reflect the result of taking this path.
1103 // Basically, it means inspecting the CmpNode controlling this
1104 // branch, seeing how it constrains a tested value, and then
1105 // deciding if it's worth our while to encode this constraint
1106 // as graph nodes in the current abstract interpretation map.
1107 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1108 Block* path, Block* other_path) {
1109 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1110 return; // nothing to do
1112 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1114 int cop = c->Opcode();
1115 if (seems_never_taken(prob) && cop == Op_CmpP && btest == BoolTest::eq) {
1116 // (An earlier version of do_if omitted '&& btest == BoolTest::eq'.)
1117 //
1118 // If this might possibly turn into an implicit null check,
1119 // and the null has never yet been seen, we need to generate
1120 // an uncommon trap, so as to recompile instead of suffering
1121 // with very slow branches. (We'll get the slow branches if
1122 // the program ever changes phase and starts seeing nulls here.)
1123 //
1124 // The tests we worry about are of the form (p == null).
1125 // We do not simply inspect for a null constant, since a node may
1126 // optimize to 'null' later on.
1127 repush_if_args();
1128 // We need to mark this branch as taken so that if we recompile we will
1129 // see that it is possible. In the tiered system the interpreter doesn't
1130 // do profiling and by the time we get to the lower tier from the interpreter
1131 // the path may be cold again. Make sure it doesn't look untaken
1132 if (is_fallthrough) {
1133 profile_not_taken_branch(!ProfileInterpreter);
1134 } else {
1135 profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
1136 }
1137 uncommon_trap(Deoptimization::Reason_unreached,
1138 Deoptimization::Action_reinterpret,
1139 NULL,
1140 (is_fallthrough ? "taken always" : "taken never"));
1141 return;
1142 }
1144 Node* val = c->in(1);
1145 Node* con = c->in(2);
1146 const Type* tcon = _gvn.type(con);
1147 const Type* tval = _gvn.type(val);
1148 bool have_con = tcon->singleton();
1149 if (tval->singleton()) {
1150 if (!have_con) {
1151 // Swap, so constant is in con.
1152 con = val;
1153 tcon = tval;
1154 val = c->in(2);
1155 tval = _gvn.type(val);
1156 btest = BoolTest(btest).commute();
1157 have_con = true;
1158 } else {
1159 // Do we have two constants? Then leave well enough alone.
1160 have_con = false;
1161 }
1162 }
1163 if (!have_con) // remaining adjustments need a con
1164 return;
1167 int val_in_map = map()->find_edge(val);
1168 if (val_in_map < 0) return; // replace_in_map would be useless
1169 {
1170 JVMState* jvms = this->jvms();
1171 if (!(jvms->is_loc(val_in_map) ||
1172 jvms->is_stk(val_in_map)))
1173 return; // again, it would be useless
1174 }
1176 // Check for a comparison to a constant, and "know" that the compared
1177 // value is constrained on this path.
1178 assert(tcon->singleton(), "");
1179 ConstraintCastNode* ccast = NULL;
1180 Node* cast = NULL;
1182 switch (btest) {
1183 case BoolTest::eq: // Constant test?
1184 {
1185 const Type* tboth = tcon->join(tval);
1186 if (tboth == tval) break; // Nothing to gain.
1187 if (tcon->isa_int()) {
1188 ccast = new (C, 2) CastIINode(val, tboth);
1189 } else if (tcon == TypePtr::NULL_PTR) {
1190 // Cast to null, but keep the pointer identity temporarily live.
1191 ccast = new (C, 2) CastPPNode(val, tboth);
1192 } else {
1193 const TypeF* tf = tcon->isa_float_constant();
1194 const TypeD* td = tcon->isa_double_constant();
1195 // Exclude tests vs float/double 0 as these could be
1196 // either +0 or -0. Just because you are equal to +0
1197 // doesn't mean you ARE +0!
1198 if ((!tf || tf->_f != 0.0) &&
1199 (!td || td->_d != 0.0))
1200 cast = con; // Replace non-constant val by con.
1201 }
1202 }
1203 break;
1205 case BoolTest::ne:
1206 if (tcon == TypePtr::NULL_PTR) {
1207 cast = cast_not_null(val, false);
1208 }
1209 break;
1211 default:
1212 // (At this point we could record int range types with CastII.)
1213 break;
1214 }
1216 if (ccast != NULL) {
1217 const Type* tcc = ccast->as_Type()->type();
1218 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1219 // Delay transform() call to allow recovery of pre-cast value
1220 // at the control merge.
1221 ccast->set_req(0, control());
1222 _gvn.set_type_bottom(ccast);
1223 record_for_igvn(ccast);
1224 cast = ccast;
1225 }
1227 if (cast != NULL) { // Here's the payoff.
1228 replace_in_map(val, cast);
1229 }
1230 }
1233 //------------------------------do_one_bytecode--------------------------------
1234 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1235 void Parse::do_one_bytecode() {
1236 Node *a, *b, *c, *d; // Handy temps
1237 BoolTest::mask btest;
1238 int i;
1240 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1242 if (C->check_node_count(NodeLimitFudgeFactor * 5,
1243 "out of nodes parsing method")) {
1244 return;
1245 }
1247 #ifdef ASSERT
1248 // for setting breakpoints
1249 if (TraceOptoParse) {
1250 tty->print(" @");
1251 dump_bci(bci());
1252 }
1253 #endif
1255 switch (bc()) {
1256 case Bytecodes::_nop:
1257 // do nothing
1258 break;
1259 case Bytecodes::_lconst_0:
1260 push_pair(longcon(0));
1261 break;
1263 case Bytecodes::_lconst_1:
1264 push_pair(longcon(1));
1265 break;
1267 case Bytecodes::_fconst_0:
1268 push(zerocon(T_FLOAT));
1269 break;
1271 case Bytecodes::_fconst_1:
1272 push(makecon(TypeF::ONE));
1273 break;
1275 case Bytecodes::_fconst_2:
1276 push(makecon(TypeF::make(2.0f)));
1277 break;
1279 case Bytecodes::_dconst_0:
1280 push_pair(zerocon(T_DOUBLE));
1281 break;
1283 case Bytecodes::_dconst_1:
1284 push_pair(makecon(TypeD::ONE));
1285 break;
1287 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1288 case Bytecodes::_iconst_0: push(intcon( 0)); break;
1289 case Bytecodes::_iconst_1: push(intcon( 1)); break;
1290 case Bytecodes::_iconst_2: push(intcon( 2)); break;
1291 case Bytecodes::_iconst_3: push(intcon( 3)); break;
1292 case Bytecodes::_iconst_4: push(intcon( 4)); break;
1293 case Bytecodes::_iconst_5: push(intcon( 5)); break;
1294 case Bytecodes::_bipush: push(intcon( iter().get_byte())); break;
1295 case Bytecodes::_sipush: push(intcon( iter().get_short())); break;
1296 case Bytecodes::_aconst_null: push(null()); break;
1297 case Bytecodes::_ldc:
1298 case Bytecodes::_ldc_w:
1299 case Bytecodes::_ldc2_w:
1300 // If the constant is unresolved, run this BC once in the interpreter.
1301 if (iter().is_unresolved_string()) {
1302 uncommon_trap(Deoptimization::make_trap_request
1303 (Deoptimization::Reason_unloaded,
1304 Deoptimization::Action_reinterpret,
1305 iter().get_constant_index()),
1306 NULL, "unresolved_string");
1307 break;
1308 } else {
1309 ciConstant constant = iter().get_constant();
1310 if (constant.basic_type() == T_OBJECT) {
1311 ciObject* c = constant.as_object();
1312 if (c->is_klass()) {
1313 // The constant returned for a klass is the ciKlass for the
1314 // entry. We want the java_mirror so get it.
1315 ciKlass* klass = c->as_klass();
1316 if (klass->is_loaded()) {
1317 constant = ciConstant(T_OBJECT, klass->java_mirror());
1318 } else {
1319 uncommon_trap(Deoptimization::make_trap_request
1320 (Deoptimization::Reason_unloaded,
1321 Deoptimization::Action_reinterpret,
1322 iter().get_constant_index()),
1323 NULL, "unresolved_klass");
1324 break;
1325 }
1326 }
1327 }
1328 push_constant(constant);
1329 }
1331 break;
1333 case Bytecodes::_aload_0:
1334 push( local(0) );
1335 break;
1336 case Bytecodes::_aload_1:
1337 push( local(1) );
1338 break;
1339 case Bytecodes::_aload_2:
1340 push( local(2) );
1341 break;
1342 case Bytecodes::_aload_3:
1343 push( local(3) );
1344 break;
1345 case Bytecodes::_aload:
1346 push( local(iter().get_index()) );
1347 break;
1349 case Bytecodes::_fload_0:
1350 case Bytecodes::_iload_0:
1351 push( local(0) );
1352 break;
1353 case Bytecodes::_fload_1:
1354 case Bytecodes::_iload_1:
1355 push( local(1) );
1356 break;
1357 case Bytecodes::_fload_2:
1358 case Bytecodes::_iload_2:
1359 push( local(2) );
1360 break;
1361 case Bytecodes::_fload_3:
1362 case Bytecodes::_iload_3:
1363 push( local(3) );
1364 break;
1365 case Bytecodes::_fload:
1366 case Bytecodes::_iload:
1367 push( local(iter().get_index()) );
1368 break;
1369 case Bytecodes::_lload_0:
1370 push_pair_local( 0 );
1371 break;
1372 case Bytecodes::_lload_1:
1373 push_pair_local( 1 );
1374 break;
1375 case Bytecodes::_lload_2:
1376 push_pair_local( 2 );
1377 break;
1378 case Bytecodes::_lload_3:
1379 push_pair_local( 3 );
1380 break;
1381 case Bytecodes::_lload:
1382 push_pair_local( iter().get_index() );
1383 break;
1385 case Bytecodes::_dload_0:
1386 push_pair_local(0);
1387 break;
1388 case Bytecodes::_dload_1:
1389 push_pair_local(1);
1390 break;
1391 case Bytecodes::_dload_2:
1392 push_pair_local(2);
1393 break;
1394 case Bytecodes::_dload_3:
1395 push_pair_local(3);
1396 break;
1397 case Bytecodes::_dload:
1398 push_pair_local(iter().get_index());
1399 break;
1400 case Bytecodes::_fstore_0:
1401 case Bytecodes::_istore_0:
1402 case Bytecodes::_astore_0:
1403 set_local( 0, pop() );
1404 break;
1405 case Bytecodes::_fstore_1:
1406 case Bytecodes::_istore_1:
1407 case Bytecodes::_astore_1:
1408 set_local( 1, pop() );
1409 break;
1410 case Bytecodes::_fstore_2:
1411 case Bytecodes::_istore_2:
1412 case Bytecodes::_astore_2:
1413 set_local( 2, pop() );
1414 break;
1415 case Bytecodes::_fstore_3:
1416 case Bytecodes::_istore_3:
1417 case Bytecodes::_astore_3:
1418 set_local( 3, pop() );
1419 break;
1420 case Bytecodes::_fstore:
1421 case Bytecodes::_istore:
1422 case Bytecodes::_astore:
1423 set_local( iter().get_index(), pop() );
1424 break;
1425 // long stores
1426 case Bytecodes::_lstore_0:
1427 set_pair_local( 0, pop_pair() );
1428 break;
1429 case Bytecodes::_lstore_1:
1430 set_pair_local( 1, pop_pair() );
1431 break;
1432 case Bytecodes::_lstore_2:
1433 set_pair_local( 2, pop_pair() );
1434 break;
1435 case Bytecodes::_lstore_3:
1436 set_pair_local( 3, pop_pair() );
1437 break;
1438 case Bytecodes::_lstore:
1439 set_pair_local( iter().get_index(), pop_pair() );
1440 break;
1442 // double stores
1443 case Bytecodes::_dstore_0:
1444 set_pair_local( 0, dstore_rounding(pop_pair()) );
1445 break;
1446 case Bytecodes::_dstore_1:
1447 set_pair_local( 1, dstore_rounding(pop_pair()) );
1448 break;
1449 case Bytecodes::_dstore_2:
1450 set_pair_local( 2, dstore_rounding(pop_pair()) );
1451 break;
1452 case Bytecodes::_dstore_3:
1453 set_pair_local( 3, dstore_rounding(pop_pair()) );
1454 break;
1455 case Bytecodes::_dstore:
1456 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1457 break;
1459 case Bytecodes::_pop: _sp -= 1; break;
1460 case Bytecodes::_pop2: _sp -= 2; break;
1461 case Bytecodes::_swap:
1462 a = pop();
1463 b = pop();
1464 push(a);
1465 push(b);
1466 break;
1467 case Bytecodes::_dup:
1468 a = pop();
1469 push(a);
1470 push(a);
1471 break;
1472 case Bytecodes::_dup_x1:
1473 a = pop();
1474 b = pop();
1475 push( a );
1476 push( b );
1477 push( a );
1478 break;
1479 case Bytecodes::_dup_x2:
1480 a = pop();
1481 b = pop();
1482 c = pop();
1483 push( a );
1484 push( c );
1485 push( b );
1486 push( a );
1487 break;
1488 case Bytecodes::_dup2:
1489 a = pop();
1490 b = pop();
1491 push( b );
1492 push( a );
1493 push( b );
1494 push( a );
1495 break;
1497 case Bytecodes::_dup2_x1:
1498 // before: .. c, b, a
1499 // after: .. b, a, c, b, a
1500 // not tested
1501 a = pop();
1502 b = pop();
1503 c = pop();
1504 push( b );
1505 push( a );
1506 push( c );
1507 push( b );
1508 push( a );
1509 break;
1510 case Bytecodes::_dup2_x2:
1511 // before: .. d, c, b, a
1512 // after: .. b, a, d, c, b, a
1513 // not tested
1514 a = pop();
1515 b = pop();
1516 c = pop();
1517 d = pop();
1518 push( b );
1519 push( a );
1520 push( d );
1521 push( c );
1522 push( b );
1523 push( a );
1524 break;
1526 case Bytecodes::_arraylength: {
1527 // Must do null-check with value on expression stack
1528 Node *ary = do_null_check(peek(), T_ARRAY);
1529 // Compile-time detect of null-exception?
1530 if (stopped()) return;
1531 a = pop();
1532 push(load_array_length(a));
1533 break;
1534 }
1536 case Bytecodes::_baload: array_load(T_BYTE); break;
1537 case Bytecodes::_caload: array_load(T_CHAR); break;
1538 case Bytecodes::_iaload: array_load(T_INT); break;
1539 case Bytecodes::_saload: array_load(T_SHORT); break;
1540 case Bytecodes::_faload: array_load(T_FLOAT); break;
1541 case Bytecodes::_aaload: array_load(T_OBJECT); break;
1542 case Bytecodes::_laload: {
1543 a = array_addressing(T_LONG, 0);
1544 if (stopped()) return; // guaranteed null or range check
1545 _sp -= 2; // Pop array and index
1546 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
1547 break;
1548 }
1549 case Bytecodes::_daload: {
1550 a = array_addressing(T_DOUBLE, 0);
1551 if (stopped()) return; // guaranteed null or range check
1552 _sp -= 2; // Pop array and index
1553 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
1554 break;
1555 }
1556 case Bytecodes::_bastore: array_store(T_BYTE); break;
1557 case Bytecodes::_castore: array_store(T_CHAR); break;
1558 case Bytecodes::_iastore: array_store(T_INT); break;
1559 case Bytecodes::_sastore: array_store(T_SHORT); break;
1560 case Bytecodes::_fastore: array_store(T_FLOAT); break;
1561 case Bytecodes::_aastore: {
1562 d = array_addressing(T_OBJECT, 1);
1563 if (stopped()) return; // guaranteed null or range check
1564 array_store_check();
1565 c = pop(); // Oop to store
1566 b = pop(); // index (already used)
1567 a = pop(); // the array itself
1568 const Type* elemtype = _gvn.type(a)->is_aryptr()->elem();
1569 const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1570 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
1571 break;
1572 }
1573 case Bytecodes::_lastore: {
1574 a = array_addressing(T_LONG, 2);
1575 if (stopped()) return; // guaranteed null or range check
1576 c = pop_pair();
1577 _sp -= 2; // Pop array and index
1578 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
1579 break;
1580 }
1581 case Bytecodes::_dastore: {
1582 a = array_addressing(T_DOUBLE, 2);
1583 if (stopped()) return; // guaranteed null or range check
1584 c = pop_pair();
1585 _sp -= 2; // Pop array and index
1586 c = dstore_rounding(c);
1587 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
1588 break;
1589 }
1590 case Bytecodes::_getfield:
1591 do_getfield();
1592 break;
1594 case Bytecodes::_getstatic:
1595 do_getstatic();
1596 break;
1598 case Bytecodes::_putfield:
1599 do_putfield();
1600 break;
1602 case Bytecodes::_putstatic:
1603 do_putstatic();
1604 break;
1606 case Bytecodes::_irem:
1607 do_irem();
1608 break;
1609 case Bytecodes::_idiv:
1610 // Must keep both values on the expression-stack during null-check
1611 do_null_check(peek(), T_INT);
1612 // Compile-time detect of null-exception?
1613 if (stopped()) return;
1614 b = pop();
1615 a = pop();
1616 push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) );
1617 break;
1618 case Bytecodes::_imul:
1619 b = pop(); a = pop();
1620 push( _gvn.transform( new (C, 3) MulINode(a,b) ) );
1621 break;
1622 case Bytecodes::_iadd:
1623 b = pop(); a = pop();
1624 push( _gvn.transform( new (C, 3) AddINode(a,b) ) );
1625 break;
1626 case Bytecodes::_ineg:
1627 a = pop();
1628 push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) );
1629 break;
1630 case Bytecodes::_isub:
1631 b = pop(); a = pop();
1632 push( _gvn.transform( new (C, 3) SubINode(a,b) ) );
1633 break;
1634 case Bytecodes::_iand:
1635 b = pop(); a = pop();
1636 push( _gvn.transform( new (C, 3) AndINode(a,b) ) );
1637 break;
1638 case Bytecodes::_ior:
1639 b = pop(); a = pop();
1640 push( _gvn.transform( new (C, 3) OrINode(a,b) ) );
1641 break;
1642 case Bytecodes::_ixor:
1643 b = pop(); a = pop();
1644 push( _gvn.transform( new (C, 3) XorINode(a,b) ) );
1645 break;
1646 case Bytecodes::_ishl:
1647 b = pop(); a = pop();
1648 push( _gvn.transform( new (C, 3) LShiftINode(a,b) ) );
1649 break;
1650 case Bytecodes::_ishr:
1651 b = pop(); a = pop();
1652 push( _gvn.transform( new (C, 3) RShiftINode(a,b) ) );
1653 break;
1654 case Bytecodes::_iushr:
1655 b = pop(); a = pop();
1656 push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) );
1657 break;
1659 case Bytecodes::_fneg:
1660 a = pop();
1661 b = _gvn.transform(new (C, 2) NegFNode (a));
1662 push(b);
1663 break;
1665 case Bytecodes::_fsub:
1666 b = pop();
1667 a = pop();
1668 c = _gvn.transform( new (C, 3) SubFNode(a,b) );
1669 d = precision_rounding(c);
1670 push( d );
1671 break;
1673 case Bytecodes::_fadd:
1674 b = pop();
1675 a = pop();
1676 c = _gvn.transform( new (C, 3) AddFNode(a,b) );
1677 d = precision_rounding(c);
1678 push( d );
1679 break;
1681 case Bytecodes::_fmul:
1682 b = pop();
1683 a = pop();
1684 c = _gvn.transform( new (C, 3) MulFNode(a,b) );
1685 d = precision_rounding(c);
1686 push( d );
1687 break;
1689 case Bytecodes::_fdiv:
1690 b = pop();
1691 a = pop();
1692 c = _gvn.transform( new (C, 3) DivFNode(0,a,b) );
1693 d = precision_rounding(c);
1694 push( d );
1695 break;
1697 case Bytecodes::_frem:
1698 if (Matcher::has_match_rule(Op_ModF)) {
1699 // Generate a ModF node.
1700 b = pop();
1701 a = pop();
1702 c = _gvn.transform( new (C, 3) ModFNode(0,a,b) );
1703 d = precision_rounding(c);
1704 push( d );
1705 }
1706 else {
1707 // Generate a call.
1708 modf();
1709 }
1710 break;
1712 case Bytecodes::_fcmpl:
1713 b = pop();
1714 a = pop();
1715 c = _gvn.transform( new (C, 3) CmpF3Node( a, b));
1716 push(c);
1717 break;
1718 case Bytecodes::_fcmpg:
1719 b = pop();
1720 a = pop();
1722 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
1723 // which negates the result sign except for unordered. Flip the unordered
1724 // as well by using CmpF3 which implements unordered-lesser instead of
1725 // unordered-greater semantics. Finally, commute the result bits. Result
1726 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1727 c = _gvn.transform( new (C, 3) CmpF3Node( b, a));
1728 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1729 push(c);
1730 break;
1732 case Bytecodes::_f2i:
1733 a = pop();
1734 push(_gvn.transform(new (C, 2) ConvF2INode(a)));
1735 break;
1737 case Bytecodes::_d2i:
1738 a = pop_pair();
1739 b = _gvn.transform(new (C, 2) ConvD2INode(a));
1740 push( b );
1741 break;
1743 case Bytecodes::_f2d:
1744 a = pop();
1745 b = _gvn.transform( new (C, 2) ConvF2DNode(a));
1746 push_pair( b );
1747 break;
1749 case Bytecodes::_d2f:
1750 a = pop_pair();
1751 b = _gvn.transform( new (C, 2) ConvD2FNode(a));
1752 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1753 //b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) );
1754 push( b );
1755 break;
1757 case Bytecodes::_l2f:
1758 if (Matcher::convL2FSupported()) {
1759 a = pop_pair();
1760 b = _gvn.transform( new (C, 2) ConvL2FNode(a));
1761 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1762 // Rather than storing the result into an FP register then pushing
1763 // out to memory to round, the machine instruction that implements
1764 // ConvL2D is responsible for rounding.
1765 // c = precision_rounding(b);
1766 c = _gvn.transform(b);
1767 push(c);
1768 } else {
1769 l2f();
1770 }
1771 break;
1773 case Bytecodes::_l2d:
1774 a = pop_pair();
1775 b = _gvn.transform( new (C, 2) ConvL2DNode(a));
1776 // For i486.ad, rounding is always necessary (see _l2f above).
1777 // c = dprecision_rounding(b);
1778 c = _gvn.transform(b);
1779 push_pair(c);
1780 break;
1782 case Bytecodes::_f2l:
1783 a = pop();
1784 b = _gvn.transform( new (C, 2) ConvF2LNode(a));
1785 push_pair(b);
1786 break;
1788 case Bytecodes::_d2l:
1789 a = pop_pair();
1790 b = _gvn.transform( new (C, 2) ConvD2LNode(a));
1791 push_pair(b);
1792 break;
1794 case Bytecodes::_dsub:
1795 b = pop_pair();
1796 a = pop_pair();
1797 c = _gvn.transform( new (C, 3) SubDNode(a,b) );
1798 d = dprecision_rounding(c);
1799 push_pair( d );
1800 break;
1802 case Bytecodes::_dadd:
1803 b = pop_pair();
1804 a = pop_pair();
1805 c = _gvn.transform( new (C, 3) AddDNode(a,b) );
1806 d = dprecision_rounding(c);
1807 push_pair( d );
1808 break;
1810 case Bytecodes::_dmul:
1811 b = pop_pair();
1812 a = pop_pair();
1813 c = _gvn.transform( new (C, 3) MulDNode(a,b) );
1814 d = dprecision_rounding(c);
1815 push_pair( d );
1816 break;
1818 case Bytecodes::_ddiv:
1819 b = pop_pair();
1820 a = pop_pair();
1821 c = _gvn.transform( new (C, 3) DivDNode(0,a,b) );
1822 d = dprecision_rounding(c);
1823 push_pair( d );
1824 break;
1826 case Bytecodes::_dneg:
1827 a = pop_pair();
1828 b = _gvn.transform(new (C, 2) NegDNode (a));
1829 push_pair(b);
1830 break;
1832 case Bytecodes::_drem:
1833 if (Matcher::has_match_rule(Op_ModD)) {
1834 // Generate a ModD node.
1835 b = pop_pair();
1836 a = pop_pair();
1837 // a % b
1839 c = _gvn.transform( new (C, 3) ModDNode(0,a,b) );
1840 d = dprecision_rounding(c);
1841 push_pair( d );
1842 }
1843 else {
1844 // Generate a call.
1845 modd();
1846 }
1847 break;
1849 case Bytecodes::_dcmpl:
1850 b = pop_pair();
1851 a = pop_pair();
1852 c = _gvn.transform( new (C, 3) CmpD3Node( a, b));
1853 push(c);
1854 break;
1856 case Bytecodes::_dcmpg:
1857 b = pop_pair();
1858 a = pop_pair();
1859 // Same as dcmpl but need to flip the unordered case.
1860 // Commute the inputs, which negates the result sign except for unordered.
1861 // Flip the unordered as well by using CmpD3 which implements
1862 // unordered-lesser instead of unordered-greater semantics.
1863 // Finally, negate the result bits. Result is same as using a
1864 // CmpD3Greater except we did it with CmpD3 alone.
1865 c = _gvn.transform( new (C, 3) CmpD3Node( b, a));
1866 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1867 push(c);
1868 break;
1871 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
1872 case Bytecodes::_land:
1873 b = pop_pair();
1874 a = pop_pair();
1875 c = _gvn.transform( new (C, 3) AndLNode(a,b) );
1876 push_pair(c);
1877 break;
1878 case Bytecodes::_lor:
1879 b = pop_pair();
1880 a = pop_pair();
1881 c = _gvn.transform( new (C, 3) OrLNode(a,b) );
1882 push_pair(c);
1883 break;
1884 case Bytecodes::_lxor:
1885 b = pop_pair();
1886 a = pop_pair();
1887 c = _gvn.transform( new (C, 3) XorLNode(a,b) );
1888 push_pair(c);
1889 break;
1891 case Bytecodes::_lshl:
1892 b = pop(); // the shift count
1893 a = pop_pair(); // value to be shifted
1894 c = _gvn.transform( new (C, 3) LShiftLNode(a,b) );
1895 push_pair(c);
1896 break;
1897 case Bytecodes::_lshr:
1898 b = pop(); // the shift count
1899 a = pop_pair(); // value to be shifted
1900 c = _gvn.transform( new (C, 3) RShiftLNode(a,b) );
1901 push_pair(c);
1902 break;
1903 case Bytecodes::_lushr:
1904 b = pop(); // the shift count
1905 a = pop_pair(); // value to be shifted
1906 c = _gvn.transform( new (C, 3) URShiftLNode(a,b) );
1907 push_pair(c);
1908 break;
1909 case Bytecodes::_lmul:
1910 b = pop_pair();
1911 a = pop_pair();
1912 c = _gvn.transform( new (C, 3) MulLNode(a,b) );
1913 push_pair(c);
1914 break;
1916 case Bytecodes::_lrem:
1917 // Must keep both values on the expression-stack during null-check
1918 assert(peek(0) == top(), "long word order");
1919 do_null_check(peek(1), T_LONG);
1920 // Compile-time detect of null-exception?
1921 if (stopped()) return;
1922 b = pop_pair();
1923 a = pop_pair();
1924 c = _gvn.transform( new (C, 3) ModLNode(control(),a,b) );
1925 push_pair(c);
1926 break;
1928 case Bytecodes::_ldiv:
1929 // Must keep both values on the expression-stack during null-check
1930 assert(peek(0) == top(), "long word order");
1931 do_null_check(peek(1), T_LONG);
1932 // Compile-time detect of null-exception?
1933 if (stopped()) return;
1934 b = pop_pair();
1935 a = pop_pair();
1936 c = _gvn.transform( new (C, 3) DivLNode(control(),a,b) );
1937 push_pair(c);
1938 break;
1940 case Bytecodes::_ladd:
1941 b = pop_pair();
1942 a = pop_pair();
1943 c = _gvn.transform( new (C, 3) AddLNode(a,b) );
1944 push_pair(c);
1945 break;
1946 case Bytecodes::_lsub:
1947 b = pop_pair();
1948 a = pop_pair();
1949 c = _gvn.transform( new (C, 3) SubLNode(a,b) );
1950 push_pair(c);
1951 break;
1952 case Bytecodes::_lcmp:
1953 // Safepoints are now inserted _before_ branches. The long-compare
1954 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
1955 // slew of control flow. These are usually followed by a CmpI vs zero and
1956 // a branch; this pattern then optimizes to the obvious long-compare and
1957 // branch. However, if the branch is backwards there's a Safepoint
1958 // inserted. The inserted Safepoint captures the JVM state at the
1959 // pre-branch point, i.e. it captures the 3-way value. Thus if a
1960 // long-compare is used to control a loop the debug info will force
1961 // computation of the 3-way value, even though the generated code uses a
1962 // long-compare and branch. We try to rectify the situation by inserting
1963 // a SafePoint here and have it dominate and kill the safepoint added at a
1964 // following backwards branch. At this point the JVM state merely holds 2
1965 // longs but not the 3-way value.
1966 if( UseLoopSafepoints ) {
1967 switch( iter().next_bc() ) {
1968 case Bytecodes::_ifgt:
1969 case Bytecodes::_iflt:
1970 case Bytecodes::_ifge:
1971 case Bytecodes::_ifle:
1972 case Bytecodes::_ifne:
1973 case Bytecodes::_ifeq:
1974 // If this is a backwards branch in the bytecodes, add Safepoint
1975 maybe_add_safepoint(iter().next_get_dest());
1976 }
1977 }
1978 b = pop_pair();
1979 a = pop_pair();
1980 c = _gvn.transform( new (C, 3) CmpL3Node( a, b ));
1981 push(c);
1982 break;
1984 case Bytecodes::_lneg:
1985 a = pop_pair();
1986 b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a));
1987 push_pair(b);
1988 break;
1989 case Bytecodes::_l2i:
1990 a = pop_pair();
1991 push( _gvn.transform( new (C, 2) ConvL2INode(a)));
1992 break;
1993 case Bytecodes::_i2l:
1994 a = pop();
1995 b = _gvn.transform( new (C, 2) ConvI2LNode(a));
1996 push_pair(b);
1997 break;
1998 case Bytecodes::_i2b:
1999 // Sign extend
2000 a = pop();
2001 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(24)) );
2002 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(24)) );
2003 push( a );
2004 break;
2005 case Bytecodes::_i2s:
2006 a = pop();
2007 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(16)) );
2008 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(16)) );
2009 push( a );
2010 break;
2011 case Bytecodes::_i2c:
2012 a = pop();
2013 push( _gvn.transform( new (C, 3) AndINode(a,_gvn.intcon(0xFFFF)) ) );
2014 break;
2016 case Bytecodes::_i2f:
2017 a = pop();
2018 b = _gvn.transform( new (C, 2) ConvI2FNode(a) ) ;
2019 c = precision_rounding(b);
2020 push (b);
2021 break;
2023 case Bytecodes::_i2d:
2024 a = pop();
2025 b = _gvn.transform( new (C, 2) ConvI2DNode(a));
2026 push_pair(b);
2027 break;
2029 case Bytecodes::_iinc: // Increment local
2030 i = iter().get_index(); // Get local index
2031 set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2032 break;
2034 // Exit points of synchronized methods must have an unlock node
2035 case Bytecodes::_return:
2036 return_current(NULL);
2037 break;
2039 case Bytecodes::_ireturn:
2040 case Bytecodes::_areturn:
2041 case Bytecodes::_freturn:
2042 return_current(pop());
2043 break;
2044 case Bytecodes::_lreturn:
2045 return_current(pop_pair());
2046 break;
2047 case Bytecodes::_dreturn:
2048 return_current(pop_pair());
2049 break;
2051 case Bytecodes::_athrow:
2052 // null exception oop throws NULL pointer exception
2053 do_null_check(peek(), T_OBJECT);
2054 if (stopped()) return;
2055 if (JvmtiExport::can_post_exceptions()) {
2056 // "Full-speed throwing" is not necessary here,
2057 // since we're notifying the VM on every throw.
2058 uncommon_trap(Deoptimization::Reason_unhandled,
2059 Deoptimization::Action_none);
2060 return;
2061 }
2062 // Hook the thrown exception directly to subsequent handlers.
2063 if (BailoutToInterpreterForThrows) {
2064 // Keep method interpreted from now on.
2065 uncommon_trap(Deoptimization::Reason_unhandled,
2066 Deoptimization::Action_make_not_compilable);
2067 return;
2068 }
2069 add_exception_state(make_exception_state(peek()));
2070 break;
2072 case Bytecodes::_goto: // fall through
2073 case Bytecodes::_goto_w: {
2074 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2076 // If this is a backwards branch in the bytecodes, add Safepoint
2077 maybe_add_safepoint(target_bci);
2079 // Update method data
2080 profile_taken_branch(target_bci);
2082 // Merge the current control into the target basic block
2083 merge(target_bci);
2085 // See if we can get some profile data and hand it off to the next block
2086 Block *target_block = block()->successor_for_bci(target_bci);
2087 if (target_block->pred_count() != 1) break;
2088 ciMethodData* methodData = method()->method_data();
2089 if (!methodData->is_mature()) break;
2090 ciProfileData* data = methodData->bci_to_data(bci());
2091 assert( data->is_JumpData(), "" );
2092 int taken = ((ciJumpData*)data)->taken();
2093 taken = method()->scale_count(taken);
2094 target_block->set_count(taken);
2095 break;
2096 }
2098 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
2099 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2100 handle_if_null:
2101 // If this is a backwards branch in the bytecodes, add Safepoint
2102 maybe_add_safepoint(iter().get_dest());
2103 a = null();
2104 b = pop();
2105 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2106 do_ifnull(btest, c);
2107 break;
2109 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2110 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2111 handle_if_acmp:
2112 // If this is a backwards branch in the bytecodes, add Safepoint
2113 maybe_add_safepoint(iter().get_dest());
2114 a = pop();
2115 b = pop();
2116 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2117 do_if(btest, c);
2118 break;
2120 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2121 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2122 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2123 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2124 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2125 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2126 handle_ifxx:
2127 // If this is a backwards branch in the bytecodes, add Safepoint
2128 maybe_add_safepoint(iter().get_dest());
2129 a = _gvn.intcon(0);
2130 b = pop();
2131 c = _gvn.transform( new (C, 3) CmpINode(b, a) );
2132 do_if(btest, c);
2133 break;
2135 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2136 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2137 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2138 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2139 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2140 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2141 handle_if_icmp:
2142 // If this is a backwards branch in the bytecodes, add Safepoint
2143 maybe_add_safepoint(iter().get_dest());
2144 a = pop();
2145 b = pop();
2146 c = _gvn.transform( new (C, 3) CmpINode( b, a ) );
2147 do_if(btest, c);
2148 break;
2150 case Bytecodes::_tableswitch:
2151 do_tableswitch();
2152 break;
2154 case Bytecodes::_lookupswitch:
2155 do_lookupswitch();
2156 break;
2158 case Bytecodes::_invokestatic:
2159 case Bytecodes::_invokespecial:
2160 case Bytecodes::_invokevirtual:
2161 case Bytecodes::_invokeinterface:
2162 do_call();
2163 break;
2164 case Bytecodes::_checkcast:
2165 do_checkcast();
2166 break;
2167 case Bytecodes::_instanceof:
2168 do_instanceof();
2169 break;
2170 case Bytecodes::_anewarray:
2171 do_anewarray();
2172 break;
2173 case Bytecodes::_newarray:
2174 do_newarray((BasicType)iter().get_index());
2175 break;
2176 case Bytecodes::_multianewarray:
2177 do_multianewarray();
2178 break;
2179 case Bytecodes::_new:
2180 do_new();
2181 break;
2183 case Bytecodes::_jsr:
2184 case Bytecodes::_jsr_w:
2185 do_jsr();
2186 break;
2188 case Bytecodes::_ret:
2189 do_ret();
2190 break;
2193 case Bytecodes::_monitorenter:
2194 do_monitor_enter();
2195 break;
2197 case Bytecodes::_monitorexit:
2198 do_monitor_exit();
2199 break;
2201 case Bytecodes::_breakpoint:
2202 // Breakpoint set concurrently to compile
2203 // %%% use an uncommon trap?
2204 C->record_failure("breakpoint in method");
2205 return;
2207 default:
2208 #ifndef PRODUCT
2209 map()->dump(99);
2210 #endif
2211 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2212 ShouldNotReachHere();
2213 }
2215 #ifndef PRODUCT
2216 IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2217 if(printer) {
2218 char buffer[256];
2219 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2220 bool old = printer->traverse_outs();
2221 printer->set_traverse_outs(true);
2222 printer->print_method(C, buffer, 4);
2223 printer->set_traverse_outs(old);
2224 }
2225 #endif
2226 }