Tue, 24 Jun 2008 10:43:29 -0700
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
Summary: Remove DecodeNNode::decode() and EncodePNode::encode() methods.
Reviewed-by: rasbold, never
1 /*
2 * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_parse2.cpp.incl"
28 extern int explicit_null_checks_inserted,
29 explicit_null_checks_elided;
31 //---------------------------------array_load----------------------------------
32 void Parse::array_load(BasicType elem_type) {
33 const Type* elem = Type::TOP;
34 Node* adr = array_addressing(elem_type, 0, &elem);
35 if (stopped()) return; // guarenteed null or range check
36 _sp -= 2; // Pop array and index
37 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
38 Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
39 push(ld);
40 }
43 //--------------------------------array_store----------------------------------
44 void Parse::array_store(BasicType elem_type) {
45 Node* adr = array_addressing(elem_type, 1);
46 if (stopped()) return; // guarenteed null or range check
47 Node* val = pop();
48 _sp -= 2; // Pop array and index
49 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
50 store_to_memory(control(), adr, val, elem_type, adr_type);
51 }
54 //------------------------------array_addressing-------------------------------
55 // Pull array and index from the stack. Compute pointer-to-element.
56 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
57 Node *idx = peek(0+vals); // Get from stack without popping
58 Node *ary = peek(1+vals); // in case of exception
60 // Null check the array base, with correct stack contents
61 ary = do_null_check(ary, T_ARRAY);
62 // Compile-time detect of null-exception?
63 if (stopped()) return top();
65 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
66 const TypeInt* sizetype = arytype->size();
67 const Type* elemtype = arytype->elem();
69 if (UseUniqueSubclasses && result2 != NULL) {
70 const Type* el = elemtype->make_ptr();
71 if (el && el->isa_instptr()) {
72 const TypeInstPtr* toop = el->is_instptr();
73 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
74 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
75 const Type* subklass = Type::get_const_type(toop->klass());
76 elemtype = subklass->join(el);
77 }
78 }
79 }
81 // Check for big class initializers with all constant offsets
82 // feeding into a known-size array.
83 const TypeInt* idxtype = _gvn.type(idx)->is_int();
84 // See if the highest idx value is less than the lowest array bound,
85 // and if the idx value cannot be negative:
86 bool need_range_check = true;
87 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
88 need_range_check = false;
89 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
90 }
92 if (!arytype->klass()->is_loaded()) {
93 // Only fails for some -Xcomp runs
94 // The class is unloaded. We have to run this bytecode in the interpreter.
95 uncommon_trap(Deoptimization::Reason_unloaded,
96 Deoptimization::Action_reinterpret,
97 arytype->klass(), "!loaded array");
98 return top();
99 }
101 // Do the range check
102 if (GenerateRangeChecks && need_range_check) {
103 // Range is constant in array-oop, so we can use the original state of mem
104 Node* len = load_array_length(ary);
105 Node* tst;
106 if (sizetype->_hi <= 0) {
107 // If the greatest array bound is negative, we can conclude that we're
108 // compiling unreachable code, but the unsigned compare trick used below
109 // only works with non-negative lengths. Instead, hack "tst" to be zero so
110 // the uncommon_trap path will always be taken.
111 tst = _gvn.intcon(0);
112 } else {
113 // Test length vs index (standard trick using unsigned compare)
114 Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
115 BoolTest::mask btest = BoolTest::lt;
116 tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) );
117 }
118 // Branch to failure if out of bounds
119 { BuildCutout unless(this, tst, PROB_MAX);
120 if (C->allow_range_check_smearing()) {
121 // Do not use builtin_throw, since range checks are sometimes
122 // made more stringent by an optimistic transformation.
123 // This creates "tentative" range checks at this point,
124 // which are not guaranteed to throw exceptions.
125 // See IfNode::Ideal, is_range_check, adjust_check.
126 uncommon_trap(Deoptimization::Reason_range_check,
127 Deoptimization::Action_make_not_entrant,
128 NULL, "range_check");
129 } else {
130 // If we have already recompiled with the range-check-widening
131 // heroic optimization turned off, then we must really be throwing
132 // range check exceptions.
133 builtin_throw(Deoptimization::Reason_range_check, idx);
134 }
135 }
136 }
137 // Check for always knowing you are throwing a range-check exception
138 if (stopped()) return top();
140 Node* ptr = array_element_address( ary, idx, type, sizetype);
142 if (result2 != NULL) *result2 = elemtype;
143 return ptr;
144 }
147 // returns IfNode
148 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
149 Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
150 Node *tst = _gvn.transform( new (C, 2) BoolNode( cmp, mask));
151 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
152 return iff;
153 }
155 // return Region node
156 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
157 Node *region = new (C, 3) RegionNode(3); // 2 results
158 record_for_igvn(region);
159 region->init_req(1, iffalse);
160 region->init_req(2, iftrue );
161 _gvn.set_type(region, Type::CONTROL);
162 region = _gvn.transform(region);
163 set_control (region);
164 return region;
165 }
168 //------------------------------helper for tableswitch-------------------------
169 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
170 // True branch, use existing map info
171 { PreserveJVMState pjvms(this);
172 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
173 set_control( iftrue );
174 profile_switch_case(prof_table_index);
175 merge_new_path(dest_bci_if_true);
176 }
178 // False branch
179 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
180 set_control( iffalse );
181 }
183 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
184 // True branch, use existing map info
185 { PreserveJVMState pjvms(this);
186 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode (iff) );
187 set_control( iffalse );
188 profile_switch_case(prof_table_index);
189 merge_new_path(dest_bci_if_true);
190 }
192 // False branch
193 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff) );
194 set_control( iftrue );
195 }
197 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
198 // False branch, use existing map and control()
199 profile_switch_case(prof_table_index);
200 merge_new_path(dest_bci);
201 }
204 extern "C" {
205 static int jint_cmp(const void *i, const void *j) {
206 int a = *(jint *)i;
207 int b = *(jint *)j;
208 return a > b ? 1 : a < b ? -1 : 0;
209 }
210 }
213 // Default value for methodData switch indexing. Must be a negative value to avoid
214 // conflict with any legal switch index.
215 #define NullTableIndex -1
217 class SwitchRange : public StackObj {
218 // a range of integers coupled with a bci destination
219 jint _lo; // inclusive lower limit
220 jint _hi; // inclusive upper limit
221 int _dest;
222 int _table_index; // index into method data table
224 public:
225 jint lo() const { return _lo; }
226 jint hi() const { return _hi; }
227 int dest() const { return _dest; }
228 int table_index() const { return _table_index; }
229 bool is_singleton() const { return _lo == _hi; }
231 void setRange(jint lo, jint hi, int dest, int table_index) {
232 assert(lo <= hi, "must be a non-empty range");
233 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
234 }
235 bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
236 assert(lo <= hi, "must be a non-empty range");
237 if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
238 _hi = hi;
239 return true;
240 }
241 return false;
242 }
244 void set (jint value, int dest, int table_index) {
245 setRange(value, value, dest, table_index);
246 }
247 bool adjoin(jint value, int dest, int table_index) {
248 return adjoinRange(value, value, dest, table_index);
249 }
251 void print(ciEnv* env) {
252 if (is_singleton())
253 tty->print(" {%d}=>%d", lo(), dest());
254 else if (lo() == min_jint)
255 tty->print(" {..%d}=>%d", hi(), dest());
256 else if (hi() == max_jint)
257 tty->print(" {%d..}=>%d", lo(), dest());
258 else
259 tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
260 }
261 };
264 //-------------------------------do_tableswitch--------------------------------
265 void Parse::do_tableswitch() {
266 Node* lookup = pop();
268 // Get information about tableswitch
269 int default_dest = iter().get_dest_table(0);
270 int lo_index = iter().get_int_table(1);
271 int hi_index = iter().get_int_table(2);
272 int len = hi_index - lo_index + 1;
274 if (len < 1) {
275 // If this is a backward branch, add safepoint
276 maybe_add_safepoint(default_dest);
277 merge(default_dest);
278 return;
279 }
281 // generate decision tree, using trichotomy when possible
282 int rnum = len+2;
283 bool makes_backward_branch = false;
284 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
285 int rp = -1;
286 if (lo_index != min_jint) {
287 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
288 }
289 for (int j = 0; j < len; j++) {
290 jint match_int = lo_index+j;
291 int dest = iter().get_dest_table(j+3);
292 makes_backward_branch |= (dest <= bci());
293 int table_index = method_data_update() ? j : NullTableIndex;
294 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
295 ranges[++rp].set(match_int, dest, table_index);
296 }
297 }
298 jint highest = lo_index+(len-1);
299 assert(ranges[rp].hi() == highest, "");
300 if (highest != max_jint
301 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
302 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
303 }
304 assert(rp < len+2, "not too many ranges");
306 // Safepoint in case if backward branch observed
307 if( makes_backward_branch && UseLoopSafepoints )
308 add_safepoint();
310 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
311 }
314 //------------------------------do_lookupswitch--------------------------------
315 void Parse::do_lookupswitch() {
316 Node *lookup = pop(); // lookup value
317 // Get information about lookupswitch
318 int default_dest = iter().get_dest_table(0);
319 int len = iter().get_int_table(1);
321 if (len < 1) { // If this is a backward branch, add safepoint
322 maybe_add_safepoint(default_dest);
323 merge(default_dest);
324 return;
325 }
327 // generate decision tree, using trichotomy when possible
328 jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
329 {
330 for( int j = 0; j < len; j++ ) {
331 table[j+j+0] = iter().get_int_table(2+j+j);
332 table[j+j+1] = iter().get_dest_table(2+j+j+1);
333 }
334 qsort( table, len, 2*sizeof(table[0]), jint_cmp );
335 }
337 int rnum = len*2+1;
338 bool makes_backward_branch = false;
339 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
340 int rp = -1;
341 for( int j = 0; j < len; j++ ) {
342 jint match_int = table[j+j+0];
343 int dest = table[j+j+1];
344 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
345 int table_index = method_data_update() ? j : NullTableIndex;
346 makes_backward_branch |= (dest <= bci());
347 if( match_int != next_lo ) {
348 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
349 }
350 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
351 ranges[++rp].set(match_int, dest, table_index);
352 }
353 }
354 jint highest = table[2*(len-1)];
355 assert(ranges[rp].hi() == highest, "");
356 if( highest != max_jint
357 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
358 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
359 }
360 assert(rp < rnum, "not too many ranges");
362 // Safepoint in case backward branch observed
363 if( makes_backward_branch && UseLoopSafepoints )
364 add_safepoint();
366 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
367 }
369 //----------------------------create_jump_tables-------------------------------
370 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
371 // Are jumptables enabled
372 if (!UseJumpTables) return false;
374 // Are jumptables supported
375 if (!Matcher::has_match_rule(Op_Jump)) return false;
377 // Don't make jump table if profiling
378 if (method_data_update()) return false;
380 // Decide if a guard is needed to lop off big ranges at either (or
381 // both) end(s) of the input set. We'll call this the default target
382 // even though we can't be sure that it is the true "default".
384 bool needs_guard = false;
385 int default_dest;
386 int64 total_outlier_size = 0;
387 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
388 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
390 if (lo->dest() == hi->dest()) {
391 total_outlier_size = hi_size + lo_size;
392 default_dest = lo->dest();
393 } else if (lo_size > hi_size) {
394 total_outlier_size = lo_size;
395 default_dest = lo->dest();
396 } else {
397 total_outlier_size = hi_size;
398 default_dest = hi->dest();
399 }
401 // If a guard test will eliminate very sparse end ranges, then
402 // it is worth the cost of an extra jump.
403 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
404 needs_guard = true;
405 if (default_dest == lo->dest()) lo++;
406 if (default_dest == hi->dest()) hi--;
407 }
409 // Find the total number of cases and ranges
410 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
411 int num_range = hi - lo + 1;
413 // Don't create table if: too large, too small, or too sparse.
414 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
415 return false;
416 if (num_cases > (MaxJumpTableSparseness * num_range))
417 return false;
419 // Normalize table lookups to zero
420 int lowval = lo->lo();
421 key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) );
423 // Generate a guard to protect against input keyvals that aren't
424 // in the switch domain.
425 if (needs_guard) {
426 Node* size = _gvn.intcon(num_cases);
427 Node* cmp = _gvn.transform( new (C, 3) CmpUNode(key_val, size) );
428 Node* tst = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ge) );
429 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
430 jump_if_true_fork(iff, default_dest, NullTableIndex);
431 }
433 // Create an ideal node JumpTable that has projections
434 // of all possible ranges for a switch statement
435 // The key_val input must be converted to a pointer offset and scaled.
436 // Compare Parse::array_addressing above.
437 #ifdef _LP64
438 // Clean the 32-bit int into a real 64-bit offset.
439 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
440 const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
441 key_val = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) );
442 #endif
443 // Shift the value by wordsize so we have an index into the table, rather
444 // than a switch value
445 Node *shiftWord = _gvn.MakeConX(wordSize);
446 key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord));
448 // Create the JumpNode
449 Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) );
451 // These are the switch destinations hanging off the jumpnode
452 int i = 0;
453 for (SwitchRange* r = lo; r <= hi; r++) {
454 for (int j = r->lo(); j <= r->hi(); j++, i++) {
455 Node* input = _gvn.transform(new (C, 1) JumpProjNode(jtn, i, r->dest(), j - lowval));
456 {
457 PreserveJVMState pjvms(this);
458 set_control(input);
459 jump_if_always_fork(r->dest(), r->table_index());
460 }
461 }
462 }
463 assert(i == num_cases, "miscount of cases");
464 stop_and_kill_map(); // no more uses for this JVMS
465 return true;
466 }
468 //----------------------------jump_switch_ranges-------------------------------
469 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
470 Block* switch_block = block();
472 if (switch_depth == 0) {
473 // Do special processing for the top-level call.
474 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
475 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
477 // Decrement pred-numbers for the unique set of nodes.
478 #ifdef ASSERT
479 // Ensure that the block's successors are a (duplicate-free) set.
480 int successors_counted = 0; // block occurrences in [hi..lo]
481 int unique_successors = switch_block->num_successors();
482 for (int i = 0; i < unique_successors; i++) {
483 Block* target = switch_block->successor_at(i);
485 // Check that the set of successors is the same in both places.
486 int successors_found = 0;
487 for (SwitchRange* p = lo; p <= hi; p++) {
488 if (p->dest() == target->start()) successors_found++;
489 }
490 assert(successors_found > 0, "successor must be known");
491 successors_counted += successors_found;
492 }
493 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
494 #endif
496 // Maybe prune the inputs, based on the type of key_val.
497 jint min_val = min_jint;
498 jint max_val = max_jint;
499 const TypeInt* ti = key_val->bottom_type()->isa_int();
500 if (ti != NULL) {
501 min_val = ti->_lo;
502 max_val = ti->_hi;
503 assert(min_val <= max_val, "invalid int type");
504 }
505 while (lo->hi() < min_val) lo++;
506 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
507 while (hi->lo() > max_val) hi--;
508 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
509 }
511 #ifndef PRODUCT
512 if (switch_depth == 0) {
513 _max_switch_depth = 0;
514 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
515 }
516 #endif
518 assert(lo <= hi, "must be a non-empty set of ranges");
519 if (lo == hi) {
520 jump_if_always_fork(lo->dest(), lo->table_index());
521 } else {
522 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
523 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
525 if (create_jump_tables(key_val, lo, hi)) return;
527 int nr = hi - lo + 1;
529 SwitchRange* mid = lo + nr/2;
530 // if there is an easy choice, pivot at a singleton:
531 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
533 assert(lo < mid && mid <= hi, "good pivot choice");
534 assert(nr != 2 || mid == hi, "should pick higher of 2");
535 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
537 Node *test_val = _gvn.intcon(mid->lo());
539 if (mid->is_singleton()) {
540 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
541 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
543 // Special Case: If there are exactly three ranges, and the high
544 // and low range each go to the same place, omit the "gt" test,
545 // since it will not discriminate anything.
546 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
547 if (eq_test_only) {
548 assert(mid == hi-1, "");
549 }
551 // if there is a higher range, test for it and process it:
552 if (mid < hi && !eq_test_only) {
553 // two comparisons of same values--should enable 1 test for 2 branches
554 // Use BoolTest::le instead of BoolTest::gt
555 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le);
556 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_le) );
557 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_le) );
558 { PreserveJVMState pjvms(this);
559 set_control(iffalse);
560 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
561 }
562 set_control(iftrue);
563 }
565 } else {
566 // mid is a range, not a singleton, so treat mid..hi as a unit
567 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
569 // if there is a higher range, test for it and process it:
570 if (mid == hi) {
571 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
572 } else {
573 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_ge) );
574 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_ge) );
575 { PreserveJVMState pjvms(this);
576 set_control(iftrue);
577 jump_switch_ranges(key_val, mid, hi, switch_depth+1);
578 }
579 set_control(iffalse);
580 }
581 }
583 // in any case, process the lower range
584 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
585 }
587 // Decrease pred_count for each successor after all is done.
588 if (switch_depth == 0) {
589 int unique_successors = switch_block->num_successors();
590 for (int i = 0; i < unique_successors; i++) {
591 Block* target = switch_block->successor_at(i);
592 // Throw away the pre-allocated path for each unique successor.
593 target->next_path_num();
594 }
595 }
597 #ifndef PRODUCT
598 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
599 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
600 SwitchRange* r;
601 int nsing = 0;
602 for( r = lo; r <= hi; r++ ) {
603 if( r->is_singleton() ) nsing++;
604 }
605 tty->print(">>> ");
606 _method->print_short_name();
607 tty->print_cr(" switch decision tree");
608 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
609 hi-lo+1, nsing, _max_switch_depth, _est_switch_depth);
610 if (_max_switch_depth > _est_switch_depth) {
611 tty->print_cr("******** BAD SWITCH DEPTH ********");
612 }
613 tty->print(" ");
614 for( r = lo; r <= hi; r++ ) {
615 r->print(env());
616 }
617 tty->print_cr("");
618 }
619 #endif
620 }
622 void Parse::modf() {
623 Node *f2 = pop();
624 Node *f1 = pop();
625 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
626 CAST_FROM_FN_PTR(address, SharedRuntime::frem),
627 "frem", NULL, //no memory effects
628 f1, f2);
629 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
631 push(res);
632 }
634 void Parse::modd() {
635 Node *d2 = pop_pair();
636 Node *d1 = pop_pair();
637 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
638 CAST_FROM_FN_PTR(address, SharedRuntime::drem),
639 "drem", NULL, //no memory effects
640 d1, top(), d2, top());
641 Node* res_d = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
643 #ifdef ASSERT
644 Node* res_top = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 1));
645 assert(res_top == top(), "second value must be top");
646 #endif
648 push_pair(res_d);
649 }
651 void Parse::l2f() {
652 Node* f2 = pop();
653 Node* f1 = pop();
654 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
655 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
656 "l2f", NULL, //no memory effects
657 f1, f2);
658 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
660 push(res);
661 }
663 void Parse::do_irem() {
664 // Must keep both values on the expression-stack during null-check
665 do_null_check(peek(), T_INT);
666 // Compile-time detect of null-exception?
667 if (stopped()) return;
669 Node* b = pop();
670 Node* a = pop();
672 const Type *t = _gvn.type(b);
673 if (t != Type::TOP) {
674 const TypeInt *ti = t->is_int();
675 if (ti->is_con()) {
676 int divisor = ti->get_con();
677 // check for positive power of 2
678 if (divisor > 0 &&
679 (divisor & ~(divisor-1)) == divisor) {
680 // yes !
681 Node *mask = _gvn.intcon((divisor - 1));
682 // Sigh, must handle negative dividends
683 Node *zero = _gvn.intcon(0);
684 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
685 Node *iff = _gvn.transform( new (C, 1) IfFalseNode(ifff) );
686 Node *ift = _gvn.transform( new (C, 1) IfTrueNode (ifff) );
687 Node *reg = jump_if_join(ift, iff);
688 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
689 // Negative path; negate/and/negate
690 Node *neg = _gvn.transform( new (C, 3) SubINode(zero, a) );
691 Node *andn= _gvn.transform( new (C, 3) AndINode(neg, mask) );
692 Node *negn= _gvn.transform( new (C, 3) SubINode(zero, andn) );
693 phi->init_req(1, negn);
694 // Fast positive case
695 Node *andx = _gvn.transform( new (C, 3) AndINode(a, mask) );
696 phi->init_req(2, andx);
697 // Push the merge
698 push( _gvn.transform(phi) );
699 return;
700 }
701 }
702 }
703 // Default case
704 push( _gvn.transform( new (C, 3) ModINode(control(),a,b) ) );
705 }
707 // Handle jsr and jsr_w bytecode
708 void Parse::do_jsr() {
709 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
711 // Store information about current state, tagged with new _jsr_bci
712 int return_bci = iter().next_bci();
713 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
715 // Update method data
716 profile_taken_branch(jsr_bci);
718 // The way we do things now, there is only one successor block
719 // for the jsr, because the target code is cloned by ciTypeFlow.
720 Block* target = successor_for_bci(jsr_bci);
722 // What got pushed?
723 const Type* ret_addr = target->peek();
724 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
726 // Effect on jsr on stack
727 push(_gvn.makecon(ret_addr));
729 // Flow to the jsr.
730 merge(jsr_bci);
731 }
733 // Handle ret bytecode
734 void Parse::do_ret() {
735 // Find to whom we return.
736 #if 0 // %%%% MAKE THIS WORK
737 Node* con = local();
738 const TypePtr* tp = con->bottom_type()->isa_ptr();
739 assert(tp && tp->singleton(), "");
740 int return_bci = (int) tp->get_con();
741 merge(return_bci);
742 #else
743 assert(block()->num_successors() == 1, "a ret can only go one place now");
744 Block* target = block()->successor_at(0);
745 assert(!target->is_ready(), "our arrival must be expected");
746 profile_ret(target->flow()->start());
747 int pnum = target->next_path_num();
748 merge_common(target, pnum);
749 #endif
750 }
752 //--------------------------dynamic_branch_prediction--------------------------
753 // Try to gather dynamic branch prediction behavior. Return a probability
754 // of the branch being taken and set the "cnt" field. Returns a -1.0
755 // if we need to use static prediction for some reason.
756 float Parse::dynamic_branch_prediction(float &cnt) {
757 ResourceMark rm;
759 cnt = COUNT_UNKNOWN;
761 // Use MethodData information if it is available
762 // FIXME: free the ProfileData structure
763 ciMethodData* methodData = method()->method_data();
764 if (!methodData->is_mature()) return PROB_UNKNOWN;
765 ciProfileData* data = methodData->bci_to_data(bci());
766 if (!data->is_JumpData()) return PROB_UNKNOWN;
768 // get taken and not taken values
769 int taken = data->as_JumpData()->taken();
770 int not_taken = 0;
771 if (data->is_BranchData()) {
772 not_taken = data->as_BranchData()->not_taken();
773 }
775 // scale the counts to be commensurate with invocation counts:
776 taken = method()->scale_count(taken);
777 not_taken = method()->scale_count(not_taken);
779 // Give up if too few counts to be meaningful
780 if (taken + not_taken < 40) {
781 if (C->log() != NULL) {
782 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
783 }
784 return PROB_UNKNOWN;
785 }
787 // Compute frequency that we arrive here
788 int sum = taken + not_taken;
789 // Adjust, if this block is a cloned private block but the
790 // Jump counts are shared. Taken the private counts for
791 // just this path instead of the shared counts.
792 if( block()->count() > 0 )
793 sum = block()->count();
794 cnt = (float)sum / (float)FreqCountInvocations;
796 // Pin probability to sane limits
797 float prob;
798 if( !taken )
799 prob = (0+PROB_MIN) / 2;
800 else if( !not_taken )
801 prob = (1+PROB_MAX) / 2;
802 else { // Compute probability of true path
803 prob = (float)taken / (float)(taken + not_taken);
804 if (prob > PROB_MAX) prob = PROB_MAX;
805 if (prob < PROB_MIN) prob = PROB_MIN;
806 }
808 assert((cnt > 0.0f) && (prob > 0.0f),
809 "Bad frequency assignment in if");
811 if (C->log() != NULL) {
812 const char* prob_str = NULL;
813 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
814 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
815 char prob_str_buf[30];
816 if (prob_str == NULL) {
817 sprintf(prob_str_buf, "%g", prob);
818 prob_str = prob_str_buf;
819 }
820 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
821 iter().get_dest(), taken, not_taken, cnt, prob_str);
822 }
823 return prob;
824 }
826 //-----------------------------branch_prediction-------------------------------
827 float Parse::branch_prediction(float& cnt,
828 BoolTest::mask btest,
829 int target_bci) {
830 float prob = dynamic_branch_prediction(cnt);
831 // If prob is unknown, switch to static prediction
832 if (prob != PROB_UNKNOWN) return prob;
834 prob = PROB_FAIR; // Set default value
835 if (btest == BoolTest::eq) // Exactly equal test?
836 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
837 else if (btest == BoolTest::ne)
838 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
840 // If this is a conditional test guarding a backwards branch,
841 // assume its a loop-back edge. Make it a likely taken branch.
842 if (target_bci < bci()) {
843 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
844 // Since it's an OSR, we probably have profile data, but since
845 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
846 // Let's make a special check here for completely zero counts.
847 ciMethodData* methodData = method()->method_data();
848 if (!methodData->is_empty()) {
849 ciProfileData* data = methodData->bci_to_data(bci());
850 // Only stop for truly zero counts, which mean an unknown part
851 // of the OSR-ed method, and we want to deopt to gather more stats.
852 // If you have ANY counts, then this loop is simply 'cold' relative
853 // to the OSR loop.
854 if (data->as_BranchData()->taken() +
855 data->as_BranchData()->not_taken() == 0 ) {
856 // This is the only way to return PROB_UNKNOWN:
857 return PROB_UNKNOWN;
858 }
859 }
860 }
861 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
862 }
864 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
865 return prob;
866 }
868 // The magic constants are chosen so as to match the output of
869 // branch_prediction() when the profile reports a zero taken count.
870 // It is important to distinguish zero counts unambiguously, because
871 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
872 // very small but nonzero probabilities, which if confused with zero
873 // counts would keep the program recompiling indefinitely.
874 bool Parse::seems_never_taken(float prob) {
875 return prob < PROB_MIN;
876 }
878 inline void Parse::repush_if_args() {
879 #ifndef PRODUCT
880 if (PrintOpto && WizardMode) {
881 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
882 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
883 method()->print_name(); tty->cr();
884 }
885 #endif
886 int bc_depth = - Bytecodes::depth(iter().cur_bc());
887 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
888 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
889 assert(argument(0) != NULL, "must exist");
890 assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
891 _sp += bc_depth;
892 }
894 //----------------------------------do_ifnull----------------------------------
895 void Parse::do_ifnull(BoolTest::mask btest) {
896 int target_bci = iter().get_dest();
898 Block* branch_block = successor_for_bci(target_bci);
899 Block* next_block = successor_for_bci(iter().next_bci());
901 float cnt;
902 float prob = branch_prediction(cnt, btest, target_bci);
903 if (prob == PROB_UNKNOWN) {
904 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
905 #ifndef PRODUCT
906 if (PrintOpto && Verbose)
907 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
908 #endif
909 repush_if_args(); // to gather stats on loop
910 // We need to mark this branch as taken so that if we recompile we will
911 // see that it is possible. In the tiered system the interpreter doesn't
912 // do profiling and by the time we get to the lower tier from the interpreter
913 // the path may be cold again. Make sure it doesn't look untaken
914 profile_taken_branch(target_bci, !ProfileInterpreter);
915 uncommon_trap(Deoptimization::Reason_unreached,
916 Deoptimization::Action_reinterpret,
917 NULL, "cold");
918 if (EliminateAutoBox) {
919 // Mark the successor blocks as parsed
920 branch_block->next_path_num();
921 next_block->next_path_num();
922 }
923 return;
924 }
926 // If this is a backwards branch in the bytecodes, add Safepoint
927 maybe_add_safepoint(target_bci);
929 explicit_null_checks_inserted++;
930 Node* a = null();
931 Node* b = pop();
932 Node* c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
934 // Make a cast-away-nullness that is control dependent on the test
935 const Type *t = _gvn.type(b);
936 const Type *t_not_null = t->join(TypePtr::NOTNULL);
937 Node *cast = new (C, 2) CastPPNode(b,t_not_null);
939 // Generate real control flow
940 Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) );
942 // Sanity check the probability value
943 assert(prob > 0.0f,"Bad probability in Parser");
944 // Need xform to put node in hash table
945 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
946 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
947 // True branch
948 { PreserveJVMState pjvms(this);
949 Node* iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
950 set_control(iftrue);
952 if (stopped()) { // Path is dead?
953 explicit_null_checks_elided++;
954 if (EliminateAutoBox) {
955 // Mark the successor block as parsed
956 branch_block->next_path_num();
957 }
958 } else { // Path is live.
959 // Update method data
960 profile_taken_branch(target_bci);
961 adjust_map_after_if(btest, c, prob, branch_block, next_block);
962 if (!stopped())
963 merge(target_bci);
964 }
965 }
967 // False branch
968 Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
969 set_control(iffalse);
971 if (stopped()) { // Path is dead?
972 explicit_null_checks_elided++;
973 if (EliminateAutoBox) {
974 // Mark the successor block as parsed
975 next_block->next_path_num();
976 }
977 } else { // Path is live.
978 // Update method data
979 profile_not_taken_branch();
980 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
981 next_block, branch_block);
982 }
983 }
985 //------------------------------------do_if------------------------------------
986 void Parse::do_if(BoolTest::mask btest, Node* c) {
987 int target_bci = iter().get_dest();
989 Block* branch_block = successor_for_bci(target_bci);
990 Block* next_block = successor_for_bci(iter().next_bci());
992 float cnt;
993 float prob = branch_prediction(cnt, btest, target_bci);
994 float untaken_prob = 1.0 - prob;
996 if (prob == PROB_UNKNOWN) {
997 #ifndef PRODUCT
998 if (PrintOpto && Verbose)
999 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
1000 #endif
1001 repush_if_args(); // to gather stats on loop
1002 // We need to mark this branch as taken so that if we recompile we will
1003 // see that it is possible. In the tiered system the interpreter doesn't
1004 // do profiling and by the time we get to the lower tier from the interpreter
1005 // the path may be cold again. Make sure it doesn't look untaken
1006 profile_taken_branch(target_bci, !ProfileInterpreter);
1007 uncommon_trap(Deoptimization::Reason_unreached,
1008 Deoptimization::Action_reinterpret,
1009 NULL, "cold");
1010 if (EliminateAutoBox) {
1011 // Mark the successor blocks as parsed
1012 branch_block->next_path_num();
1013 next_block->next_path_num();
1014 }
1015 return;
1016 }
1018 // Sanity check the probability value
1019 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1021 bool taken_if_true = true;
1022 // Convert BoolTest to canonical form:
1023 if (!BoolTest(btest).is_canonical()) {
1024 btest = BoolTest(btest).negate();
1025 taken_if_true = false;
1026 // prob is NOT updated here; it remains the probability of the taken
1027 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1028 }
1029 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1031 Node* tst0 = new (C, 2) BoolNode(c, btest);
1032 Node* tst = _gvn.transform(tst0);
1033 BoolTest::mask taken_btest = BoolTest::illegal;
1034 BoolTest::mask untaken_btest = BoolTest::illegal;
1036 if (tst->is_Bool()) {
1037 // Refresh c from the transformed bool node, since it may be
1038 // simpler than the original c. Also re-canonicalize btest.
1039 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1040 // That can arise from statements like: if (x instanceof C) ...
1041 if (tst != tst0) {
1042 // Canonicalize one more time since transform can change it.
1043 btest = tst->as_Bool()->_test._test;
1044 if (!BoolTest(btest).is_canonical()) {
1045 // Reverse edges one more time...
1046 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1047 btest = tst->as_Bool()->_test._test;
1048 assert(BoolTest(btest).is_canonical(), "sanity");
1049 taken_if_true = !taken_if_true;
1050 }
1051 c = tst->in(1);
1052 }
1053 BoolTest::mask neg_btest = BoolTest(btest).negate();
1054 taken_btest = taken_if_true ? btest : neg_btest;
1055 untaken_btest = taken_if_true ? neg_btest : btest;
1056 }
1058 // Generate real control flow
1059 float true_prob = (taken_if_true ? prob : untaken_prob);
1060 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1061 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1062 Node* taken_branch = new (C, 1) IfTrueNode(iff);
1063 Node* untaken_branch = new (C, 1) IfFalseNode(iff);
1064 if (!taken_if_true) { // Finish conversion to canonical form
1065 Node* tmp = taken_branch;
1066 taken_branch = untaken_branch;
1067 untaken_branch = tmp;
1068 }
1070 // Branch is taken:
1071 { PreserveJVMState pjvms(this);
1072 taken_branch = _gvn.transform(taken_branch);
1073 set_control(taken_branch);
1075 if (stopped()) {
1076 if (EliminateAutoBox) {
1077 // Mark the successor block as parsed
1078 branch_block->next_path_num();
1079 }
1080 } else {
1081 // Update method data
1082 profile_taken_branch(target_bci);
1083 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1084 if (!stopped())
1085 merge(target_bci);
1086 }
1087 }
1089 untaken_branch = _gvn.transform(untaken_branch);
1090 set_control(untaken_branch);
1092 // Branch not taken.
1093 if (stopped()) {
1094 if (EliminateAutoBox) {
1095 // Mark the successor block as parsed
1096 next_block->next_path_num();
1097 }
1098 } else {
1099 // Update method data
1100 profile_not_taken_branch();
1101 adjust_map_after_if(untaken_btest, c, untaken_prob,
1102 next_block, branch_block);
1103 }
1104 }
1106 //----------------------------adjust_map_after_if------------------------------
1107 // Adjust the JVM state to reflect the result of taking this path.
1108 // Basically, it means inspecting the CmpNode controlling this
1109 // branch, seeing how it constrains a tested value, and then
1110 // deciding if it's worth our while to encode this constraint
1111 // as graph nodes in the current abstract interpretation map.
1112 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1113 Block* path, Block* other_path) {
1114 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1115 return; // nothing to do
1117 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1119 int cop = c->Opcode();
1120 if (seems_never_taken(prob) && cop == Op_CmpP && btest == BoolTest::eq) {
1121 // (An earlier version of do_if omitted '&& btest == BoolTest::eq'.)
1122 //
1123 // If this might possibly turn into an implicit null check,
1124 // and the null has never yet been seen, we need to generate
1125 // an uncommon trap, so as to recompile instead of suffering
1126 // with very slow branches. (We'll get the slow branches if
1127 // the program ever changes phase and starts seeing nulls here.)
1128 //
1129 // The tests we worry about are of the form (p == null).
1130 // We do not simply inspect for a null constant, since a node may
1131 // optimize to 'null' later on.
1132 repush_if_args();
1133 // We need to mark this branch as taken so that if we recompile we will
1134 // see that it is possible. In the tiered system the interpreter doesn't
1135 // do profiling and by the time we get to the lower tier from the interpreter
1136 // the path may be cold again. Make sure it doesn't look untaken
1137 if (is_fallthrough) {
1138 profile_not_taken_branch(!ProfileInterpreter);
1139 } else {
1140 profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
1141 }
1142 uncommon_trap(Deoptimization::Reason_unreached,
1143 Deoptimization::Action_reinterpret,
1144 NULL,
1145 (is_fallthrough ? "taken always" : "taken never"));
1146 return;
1147 }
1149 Node* val = c->in(1);
1150 Node* con = c->in(2);
1151 const Type* tcon = _gvn.type(con);
1152 const Type* tval = _gvn.type(val);
1153 bool have_con = tcon->singleton();
1154 if (tval->singleton()) {
1155 if (!have_con) {
1156 // Swap, so constant is in con.
1157 con = val;
1158 tcon = tval;
1159 val = c->in(2);
1160 tval = _gvn.type(val);
1161 btest = BoolTest(btest).commute();
1162 have_con = true;
1163 } else {
1164 // Do we have two constants? Then leave well enough alone.
1165 have_con = false;
1166 }
1167 }
1168 if (!have_con) // remaining adjustments need a con
1169 return;
1172 int val_in_map = map()->find_edge(val);
1173 if (val_in_map < 0) return; // replace_in_map would be useless
1174 {
1175 JVMState* jvms = this->jvms();
1176 if (!(jvms->is_loc(val_in_map) ||
1177 jvms->is_stk(val_in_map)))
1178 return; // again, it would be useless
1179 }
1181 // Check for a comparison to a constant, and "know" that the compared
1182 // value is constrained on this path.
1183 assert(tcon->singleton(), "");
1184 ConstraintCastNode* ccast = NULL;
1185 Node* cast = NULL;
1187 switch (btest) {
1188 case BoolTest::eq: // Constant test?
1189 {
1190 const Type* tboth = tcon->join(tval);
1191 if (tboth == tval) break; // Nothing to gain.
1192 if (tcon->isa_int()) {
1193 ccast = new (C, 2) CastIINode(val, tboth);
1194 } else if (tcon == TypePtr::NULL_PTR) {
1195 // Cast to null, but keep the pointer identity temporarily live.
1196 ccast = new (C, 2) CastPPNode(val, tboth);
1197 } else {
1198 const TypeF* tf = tcon->isa_float_constant();
1199 const TypeD* td = tcon->isa_double_constant();
1200 // Exclude tests vs float/double 0 as these could be
1201 // either +0 or -0. Just because you are equal to +0
1202 // doesn't mean you ARE +0!
1203 if ((!tf || tf->_f != 0.0) &&
1204 (!td || td->_d != 0.0))
1205 cast = con; // Replace non-constant val by con.
1206 }
1207 }
1208 break;
1210 case BoolTest::ne:
1211 if (tcon == TypePtr::NULL_PTR) {
1212 cast = cast_not_null(val, false);
1213 }
1214 break;
1216 default:
1217 // (At this point we could record int range types with CastII.)
1218 break;
1219 }
1221 if (ccast != NULL) {
1222 const Type* tcc = ccast->as_Type()->type();
1223 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1224 // Delay transform() call to allow recovery of pre-cast value
1225 // at the control merge.
1226 ccast->set_req(0, control());
1227 _gvn.set_type_bottom(ccast);
1228 record_for_igvn(ccast);
1229 cast = ccast;
1230 }
1232 if (cast != NULL) { // Here's the payoff.
1233 replace_in_map(val, cast);
1234 }
1235 }
1238 //------------------------------do_one_bytecode--------------------------------
1239 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1240 void Parse::do_one_bytecode() {
1241 Node *a, *b, *c, *d; // Handy temps
1242 BoolTest::mask btest;
1243 int i;
1245 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1247 if (C->check_node_count(NodeLimitFudgeFactor * 5,
1248 "out of nodes parsing method")) {
1249 return;
1250 }
1252 #ifdef ASSERT
1253 // for setting breakpoints
1254 if (TraceOptoParse) {
1255 tty->print(" @");
1256 dump_bci(bci());
1257 }
1258 #endif
1260 switch (bc()) {
1261 case Bytecodes::_nop:
1262 // do nothing
1263 break;
1264 case Bytecodes::_lconst_0:
1265 push_pair(longcon(0));
1266 break;
1268 case Bytecodes::_lconst_1:
1269 push_pair(longcon(1));
1270 break;
1272 case Bytecodes::_fconst_0:
1273 push(zerocon(T_FLOAT));
1274 break;
1276 case Bytecodes::_fconst_1:
1277 push(makecon(TypeF::ONE));
1278 break;
1280 case Bytecodes::_fconst_2:
1281 push(makecon(TypeF::make(2.0f)));
1282 break;
1284 case Bytecodes::_dconst_0:
1285 push_pair(zerocon(T_DOUBLE));
1286 break;
1288 case Bytecodes::_dconst_1:
1289 push_pair(makecon(TypeD::ONE));
1290 break;
1292 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1293 case Bytecodes::_iconst_0: push(intcon( 0)); break;
1294 case Bytecodes::_iconst_1: push(intcon( 1)); break;
1295 case Bytecodes::_iconst_2: push(intcon( 2)); break;
1296 case Bytecodes::_iconst_3: push(intcon( 3)); break;
1297 case Bytecodes::_iconst_4: push(intcon( 4)); break;
1298 case Bytecodes::_iconst_5: push(intcon( 5)); break;
1299 case Bytecodes::_bipush: push(intcon( iter().get_byte())); break;
1300 case Bytecodes::_sipush: push(intcon( iter().get_short())); break;
1301 case Bytecodes::_aconst_null: push(null()); break;
1302 case Bytecodes::_ldc:
1303 case Bytecodes::_ldc_w:
1304 case Bytecodes::_ldc2_w:
1305 // If the constant is unresolved, run this BC once in the interpreter.
1306 if (iter().is_unresolved_string()) {
1307 uncommon_trap(Deoptimization::make_trap_request
1308 (Deoptimization::Reason_unloaded,
1309 Deoptimization::Action_reinterpret,
1310 iter().get_constant_index()),
1311 NULL, "unresolved_string");
1312 break;
1313 } else {
1314 ciConstant constant = iter().get_constant();
1315 if (constant.basic_type() == T_OBJECT) {
1316 ciObject* c = constant.as_object();
1317 if (c->is_klass()) {
1318 // The constant returned for a klass is the ciKlass for the
1319 // entry. We want the java_mirror so get it.
1320 ciKlass* klass = c->as_klass();
1321 if (klass->is_loaded()) {
1322 constant = ciConstant(T_OBJECT, klass->java_mirror());
1323 } else {
1324 uncommon_trap(Deoptimization::make_trap_request
1325 (Deoptimization::Reason_unloaded,
1326 Deoptimization::Action_reinterpret,
1327 iter().get_constant_index()),
1328 NULL, "unresolved_klass");
1329 break;
1330 }
1331 }
1332 }
1333 push_constant(constant);
1334 }
1336 break;
1338 case Bytecodes::_aload_0:
1339 push( local(0) );
1340 break;
1341 case Bytecodes::_aload_1:
1342 push( local(1) );
1343 break;
1344 case Bytecodes::_aload_2:
1345 push( local(2) );
1346 break;
1347 case Bytecodes::_aload_3:
1348 push( local(3) );
1349 break;
1350 case Bytecodes::_aload:
1351 push( local(iter().get_index()) );
1352 break;
1354 case Bytecodes::_fload_0:
1355 case Bytecodes::_iload_0:
1356 push( local(0) );
1357 break;
1358 case Bytecodes::_fload_1:
1359 case Bytecodes::_iload_1:
1360 push( local(1) );
1361 break;
1362 case Bytecodes::_fload_2:
1363 case Bytecodes::_iload_2:
1364 push( local(2) );
1365 break;
1366 case Bytecodes::_fload_3:
1367 case Bytecodes::_iload_3:
1368 push( local(3) );
1369 break;
1370 case Bytecodes::_fload:
1371 case Bytecodes::_iload:
1372 push( local(iter().get_index()) );
1373 break;
1374 case Bytecodes::_lload_0:
1375 push_pair_local( 0 );
1376 break;
1377 case Bytecodes::_lload_1:
1378 push_pair_local( 1 );
1379 break;
1380 case Bytecodes::_lload_2:
1381 push_pair_local( 2 );
1382 break;
1383 case Bytecodes::_lload_3:
1384 push_pair_local( 3 );
1385 break;
1386 case Bytecodes::_lload:
1387 push_pair_local( iter().get_index() );
1388 break;
1390 case Bytecodes::_dload_0:
1391 push_pair_local(0);
1392 break;
1393 case Bytecodes::_dload_1:
1394 push_pair_local(1);
1395 break;
1396 case Bytecodes::_dload_2:
1397 push_pair_local(2);
1398 break;
1399 case Bytecodes::_dload_3:
1400 push_pair_local(3);
1401 break;
1402 case Bytecodes::_dload:
1403 push_pair_local(iter().get_index());
1404 break;
1405 case Bytecodes::_fstore_0:
1406 case Bytecodes::_istore_0:
1407 case Bytecodes::_astore_0:
1408 set_local( 0, pop() );
1409 break;
1410 case Bytecodes::_fstore_1:
1411 case Bytecodes::_istore_1:
1412 case Bytecodes::_astore_1:
1413 set_local( 1, pop() );
1414 break;
1415 case Bytecodes::_fstore_2:
1416 case Bytecodes::_istore_2:
1417 case Bytecodes::_astore_2:
1418 set_local( 2, pop() );
1419 break;
1420 case Bytecodes::_fstore_3:
1421 case Bytecodes::_istore_3:
1422 case Bytecodes::_astore_3:
1423 set_local( 3, pop() );
1424 break;
1425 case Bytecodes::_fstore:
1426 case Bytecodes::_istore:
1427 case Bytecodes::_astore:
1428 set_local( iter().get_index(), pop() );
1429 break;
1430 // long stores
1431 case Bytecodes::_lstore_0:
1432 set_pair_local( 0, pop_pair() );
1433 break;
1434 case Bytecodes::_lstore_1:
1435 set_pair_local( 1, pop_pair() );
1436 break;
1437 case Bytecodes::_lstore_2:
1438 set_pair_local( 2, pop_pair() );
1439 break;
1440 case Bytecodes::_lstore_3:
1441 set_pair_local( 3, pop_pair() );
1442 break;
1443 case Bytecodes::_lstore:
1444 set_pair_local( iter().get_index(), pop_pair() );
1445 break;
1447 // double stores
1448 case Bytecodes::_dstore_0:
1449 set_pair_local( 0, dstore_rounding(pop_pair()) );
1450 break;
1451 case Bytecodes::_dstore_1:
1452 set_pair_local( 1, dstore_rounding(pop_pair()) );
1453 break;
1454 case Bytecodes::_dstore_2:
1455 set_pair_local( 2, dstore_rounding(pop_pair()) );
1456 break;
1457 case Bytecodes::_dstore_3:
1458 set_pair_local( 3, dstore_rounding(pop_pair()) );
1459 break;
1460 case Bytecodes::_dstore:
1461 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1462 break;
1464 case Bytecodes::_pop: _sp -= 1; break;
1465 case Bytecodes::_pop2: _sp -= 2; break;
1466 case Bytecodes::_swap:
1467 a = pop();
1468 b = pop();
1469 push(a);
1470 push(b);
1471 break;
1472 case Bytecodes::_dup:
1473 a = pop();
1474 push(a);
1475 push(a);
1476 break;
1477 case Bytecodes::_dup_x1:
1478 a = pop();
1479 b = pop();
1480 push( a );
1481 push( b );
1482 push( a );
1483 break;
1484 case Bytecodes::_dup_x2:
1485 a = pop();
1486 b = pop();
1487 c = pop();
1488 push( a );
1489 push( c );
1490 push( b );
1491 push( a );
1492 break;
1493 case Bytecodes::_dup2:
1494 a = pop();
1495 b = pop();
1496 push( b );
1497 push( a );
1498 push( b );
1499 push( a );
1500 break;
1502 case Bytecodes::_dup2_x1:
1503 // before: .. c, b, a
1504 // after: .. b, a, c, b, a
1505 // not tested
1506 a = pop();
1507 b = pop();
1508 c = pop();
1509 push( b );
1510 push( a );
1511 push( c );
1512 push( b );
1513 push( a );
1514 break;
1515 case Bytecodes::_dup2_x2:
1516 // before: .. d, c, b, a
1517 // after: .. b, a, d, c, b, a
1518 // not tested
1519 a = pop();
1520 b = pop();
1521 c = pop();
1522 d = pop();
1523 push( b );
1524 push( a );
1525 push( d );
1526 push( c );
1527 push( b );
1528 push( a );
1529 break;
1531 case Bytecodes::_arraylength: {
1532 // Must do null-check with value on expression stack
1533 Node *ary = do_null_check(peek(), T_ARRAY);
1534 // Compile-time detect of null-exception?
1535 if (stopped()) return;
1536 a = pop();
1537 push(load_array_length(a));
1538 break;
1539 }
1541 case Bytecodes::_baload: array_load(T_BYTE); break;
1542 case Bytecodes::_caload: array_load(T_CHAR); break;
1543 case Bytecodes::_iaload: array_load(T_INT); break;
1544 case Bytecodes::_saload: array_load(T_SHORT); break;
1545 case Bytecodes::_faload: array_load(T_FLOAT); break;
1546 case Bytecodes::_aaload: array_load(T_OBJECT); break;
1547 case Bytecodes::_laload: {
1548 a = array_addressing(T_LONG, 0);
1549 if (stopped()) return; // guarenteed null or range check
1550 _sp -= 2; // Pop array and index
1551 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
1552 break;
1553 }
1554 case Bytecodes::_daload: {
1555 a = array_addressing(T_DOUBLE, 0);
1556 if (stopped()) return; // guarenteed null or range check
1557 _sp -= 2; // Pop array and index
1558 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
1559 break;
1560 }
1561 case Bytecodes::_bastore: array_store(T_BYTE); break;
1562 case Bytecodes::_castore: array_store(T_CHAR); break;
1563 case Bytecodes::_iastore: array_store(T_INT); break;
1564 case Bytecodes::_sastore: array_store(T_SHORT); break;
1565 case Bytecodes::_fastore: array_store(T_FLOAT); break;
1566 case Bytecodes::_aastore: {
1567 d = array_addressing(T_OBJECT, 1);
1568 if (stopped()) return; // guarenteed null or range check
1569 array_store_check();
1570 c = pop(); // Oop to store
1571 b = pop(); // index (already used)
1572 a = pop(); // the array itself
1573 const Type* elemtype = _gvn.type(a)->is_aryptr()->elem();
1574 const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1575 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
1576 break;
1577 }
1578 case Bytecodes::_lastore: {
1579 a = array_addressing(T_LONG, 2);
1580 if (stopped()) return; // guarenteed null or range check
1581 c = pop_pair();
1582 _sp -= 2; // Pop array and index
1583 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
1584 break;
1585 }
1586 case Bytecodes::_dastore: {
1587 a = array_addressing(T_DOUBLE, 2);
1588 if (stopped()) return; // guarenteed null or range check
1589 c = pop_pair();
1590 _sp -= 2; // Pop array and index
1591 c = dstore_rounding(c);
1592 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
1593 break;
1594 }
1595 case Bytecodes::_getfield:
1596 do_getfield();
1597 break;
1599 case Bytecodes::_getstatic:
1600 do_getstatic();
1601 break;
1603 case Bytecodes::_putfield:
1604 do_putfield();
1605 break;
1607 case Bytecodes::_putstatic:
1608 do_putstatic();
1609 break;
1611 case Bytecodes::_irem:
1612 do_irem();
1613 break;
1614 case Bytecodes::_idiv:
1615 // Must keep both values on the expression-stack during null-check
1616 do_null_check(peek(), T_INT);
1617 // Compile-time detect of null-exception?
1618 if (stopped()) return;
1619 b = pop();
1620 a = pop();
1621 push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) );
1622 break;
1623 case Bytecodes::_imul:
1624 b = pop(); a = pop();
1625 push( _gvn.transform( new (C, 3) MulINode(a,b) ) );
1626 break;
1627 case Bytecodes::_iadd:
1628 b = pop(); a = pop();
1629 push( _gvn.transform( new (C, 3) AddINode(a,b) ) );
1630 break;
1631 case Bytecodes::_ineg:
1632 a = pop();
1633 push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) );
1634 break;
1635 case Bytecodes::_isub:
1636 b = pop(); a = pop();
1637 push( _gvn.transform( new (C, 3) SubINode(a,b) ) );
1638 break;
1639 case Bytecodes::_iand:
1640 b = pop(); a = pop();
1641 push( _gvn.transform( new (C, 3) AndINode(a,b) ) );
1642 break;
1643 case Bytecodes::_ior:
1644 b = pop(); a = pop();
1645 push( _gvn.transform( new (C, 3) OrINode(a,b) ) );
1646 break;
1647 case Bytecodes::_ixor:
1648 b = pop(); a = pop();
1649 push( _gvn.transform( new (C, 3) XorINode(a,b) ) );
1650 break;
1651 case Bytecodes::_ishl:
1652 b = pop(); a = pop();
1653 push( _gvn.transform( new (C, 3) LShiftINode(a,b) ) );
1654 break;
1655 case Bytecodes::_ishr:
1656 b = pop(); a = pop();
1657 push( _gvn.transform( new (C, 3) RShiftINode(a,b) ) );
1658 break;
1659 case Bytecodes::_iushr:
1660 b = pop(); a = pop();
1661 push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) );
1662 break;
1664 case Bytecodes::_fneg:
1665 a = pop();
1666 b = _gvn.transform(new (C, 2) NegFNode (a));
1667 push(b);
1668 break;
1670 case Bytecodes::_fsub:
1671 b = pop();
1672 a = pop();
1673 c = _gvn.transform( new (C, 3) SubFNode(a,b) );
1674 d = precision_rounding(c);
1675 push( d );
1676 break;
1678 case Bytecodes::_fadd:
1679 b = pop();
1680 a = pop();
1681 c = _gvn.transform( new (C, 3) AddFNode(a,b) );
1682 d = precision_rounding(c);
1683 push( d );
1684 break;
1686 case Bytecodes::_fmul:
1687 b = pop();
1688 a = pop();
1689 c = _gvn.transform( new (C, 3) MulFNode(a,b) );
1690 d = precision_rounding(c);
1691 push( d );
1692 break;
1694 case Bytecodes::_fdiv:
1695 b = pop();
1696 a = pop();
1697 c = _gvn.transform( new (C, 3) DivFNode(0,a,b) );
1698 d = precision_rounding(c);
1699 push( d );
1700 break;
1702 case Bytecodes::_frem:
1703 if (Matcher::has_match_rule(Op_ModF)) {
1704 // Generate a ModF node.
1705 b = pop();
1706 a = pop();
1707 c = _gvn.transform( new (C, 3) ModFNode(0,a,b) );
1708 d = precision_rounding(c);
1709 push( d );
1710 }
1711 else {
1712 // Generate a call.
1713 modf();
1714 }
1715 break;
1717 case Bytecodes::_fcmpl:
1718 b = pop();
1719 a = pop();
1720 c = _gvn.transform( new (C, 3) CmpF3Node( a, b));
1721 push(c);
1722 break;
1723 case Bytecodes::_fcmpg:
1724 b = pop();
1725 a = pop();
1727 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
1728 // which negates the result sign except for unordered. Flip the unordered
1729 // as well by using CmpF3 which implements unordered-lesser instead of
1730 // unordered-greater semantics. Finally, commute the result bits. Result
1731 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1732 c = _gvn.transform( new (C, 3) CmpF3Node( b, a));
1733 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1734 push(c);
1735 break;
1737 case Bytecodes::_f2i:
1738 a = pop();
1739 push(_gvn.transform(new (C, 2) ConvF2INode(a)));
1740 break;
1742 case Bytecodes::_d2i:
1743 a = pop_pair();
1744 b = _gvn.transform(new (C, 2) ConvD2INode(a));
1745 push( b );
1746 break;
1748 case Bytecodes::_f2d:
1749 a = pop();
1750 b = _gvn.transform( new (C, 2) ConvF2DNode(a));
1751 push_pair( b );
1752 break;
1754 case Bytecodes::_d2f:
1755 a = pop_pair();
1756 b = _gvn.transform( new (C, 2) ConvD2FNode(a));
1757 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1758 //b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) );
1759 push( b );
1760 break;
1762 case Bytecodes::_l2f:
1763 if (Matcher::convL2FSupported()) {
1764 a = pop_pair();
1765 b = _gvn.transform( new (C, 2) ConvL2FNode(a));
1766 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1767 // Rather than storing the result into an FP register then pushing
1768 // out to memory to round, the machine instruction that implements
1769 // ConvL2D is responsible for rounding.
1770 // c = precision_rounding(b);
1771 c = _gvn.transform(b);
1772 push(c);
1773 } else {
1774 l2f();
1775 }
1776 break;
1778 case Bytecodes::_l2d:
1779 a = pop_pair();
1780 b = _gvn.transform( new (C, 2) ConvL2DNode(a));
1781 // For i486.ad, rounding is always necessary (see _l2f above).
1782 // c = dprecision_rounding(b);
1783 c = _gvn.transform(b);
1784 push_pair(c);
1785 break;
1787 case Bytecodes::_f2l:
1788 a = pop();
1789 b = _gvn.transform( new (C, 2) ConvF2LNode(a));
1790 push_pair(b);
1791 break;
1793 case Bytecodes::_d2l:
1794 a = pop_pair();
1795 b = _gvn.transform( new (C, 2) ConvD2LNode(a));
1796 push_pair(b);
1797 break;
1799 case Bytecodes::_dsub:
1800 b = pop_pair();
1801 a = pop_pair();
1802 c = _gvn.transform( new (C, 3) SubDNode(a,b) );
1803 d = dprecision_rounding(c);
1804 push_pair( d );
1805 break;
1807 case Bytecodes::_dadd:
1808 b = pop_pair();
1809 a = pop_pair();
1810 c = _gvn.transform( new (C, 3) AddDNode(a,b) );
1811 d = dprecision_rounding(c);
1812 push_pair( d );
1813 break;
1815 case Bytecodes::_dmul:
1816 b = pop_pair();
1817 a = pop_pair();
1818 c = _gvn.transform( new (C, 3) MulDNode(a,b) );
1819 d = dprecision_rounding(c);
1820 push_pair( d );
1821 break;
1823 case Bytecodes::_ddiv:
1824 b = pop_pair();
1825 a = pop_pair();
1826 c = _gvn.transform( new (C, 3) DivDNode(0,a,b) );
1827 d = dprecision_rounding(c);
1828 push_pair( d );
1829 break;
1831 case Bytecodes::_dneg:
1832 a = pop_pair();
1833 b = _gvn.transform(new (C, 2) NegDNode (a));
1834 push_pair(b);
1835 break;
1837 case Bytecodes::_drem:
1838 if (Matcher::has_match_rule(Op_ModD)) {
1839 // Generate a ModD node.
1840 b = pop_pair();
1841 a = pop_pair();
1842 // a % b
1844 c = _gvn.transform( new (C, 3) ModDNode(0,a,b) );
1845 d = dprecision_rounding(c);
1846 push_pair( d );
1847 }
1848 else {
1849 // Generate a call.
1850 modd();
1851 }
1852 break;
1854 case Bytecodes::_dcmpl:
1855 b = pop_pair();
1856 a = pop_pair();
1857 c = _gvn.transform( new (C, 3) CmpD3Node( a, b));
1858 push(c);
1859 break;
1861 case Bytecodes::_dcmpg:
1862 b = pop_pair();
1863 a = pop_pair();
1864 // Same as dcmpl but need to flip the unordered case.
1865 // Commute the inputs, which negates the result sign except for unordered.
1866 // Flip the unordered as well by using CmpD3 which implements
1867 // unordered-lesser instead of unordered-greater semantics.
1868 // Finally, negate the result bits. Result is same as using a
1869 // CmpD3Greater except we did it with CmpD3 alone.
1870 c = _gvn.transform( new (C, 3) CmpD3Node( b, a));
1871 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1872 push(c);
1873 break;
1876 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
1877 case Bytecodes::_land:
1878 b = pop_pair();
1879 a = pop_pair();
1880 c = _gvn.transform( new (C, 3) AndLNode(a,b) );
1881 push_pair(c);
1882 break;
1883 case Bytecodes::_lor:
1884 b = pop_pair();
1885 a = pop_pair();
1886 c = _gvn.transform( new (C, 3) OrLNode(a,b) );
1887 push_pair(c);
1888 break;
1889 case Bytecodes::_lxor:
1890 b = pop_pair();
1891 a = pop_pair();
1892 c = _gvn.transform( new (C, 3) XorLNode(a,b) );
1893 push_pair(c);
1894 break;
1896 case Bytecodes::_lshl:
1897 b = pop(); // the shift count
1898 a = pop_pair(); // value to be shifted
1899 c = _gvn.transform( new (C, 3) LShiftLNode(a,b) );
1900 push_pair(c);
1901 break;
1902 case Bytecodes::_lshr:
1903 b = pop(); // the shift count
1904 a = pop_pair(); // value to be shifted
1905 c = _gvn.transform( new (C, 3) RShiftLNode(a,b) );
1906 push_pair(c);
1907 break;
1908 case Bytecodes::_lushr:
1909 b = pop(); // the shift count
1910 a = pop_pair(); // value to be shifted
1911 c = _gvn.transform( new (C, 3) URShiftLNode(a,b) );
1912 push_pair(c);
1913 break;
1914 case Bytecodes::_lmul:
1915 b = pop_pair();
1916 a = pop_pair();
1917 c = _gvn.transform( new (C, 3) MulLNode(a,b) );
1918 push_pair(c);
1919 break;
1921 case Bytecodes::_lrem:
1922 // Must keep both values on the expression-stack during null-check
1923 assert(peek(0) == top(), "long word order");
1924 do_null_check(peek(1), T_LONG);
1925 // Compile-time detect of null-exception?
1926 if (stopped()) return;
1927 b = pop_pair();
1928 a = pop_pair();
1929 c = _gvn.transform( new (C, 3) ModLNode(control(),a,b) );
1930 push_pair(c);
1931 break;
1933 case Bytecodes::_ldiv:
1934 // Must keep both values on the expression-stack during null-check
1935 assert(peek(0) == top(), "long word order");
1936 do_null_check(peek(1), T_LONG);
1937 // Compile-time detect of null-exception?
1938 if (stopped()) return;
1939 b = pop_pair();
1940 a = pop_pair();
1941 c = _gvn.transform( new (C, 3) DivLNode(control(),a,b) );
1942 push_pair(c);
1943 break;
1945 case Bytecodes::_ladd:
1946 b = pop_pair();
1947 a = pop_pair();
1948 c = _gvn.transform( new (C, 3) AddLNode(a,b) );
1949 push_pair(c);
1950 break;
1951 case Bytecodes::_lsub:
1952 b = pop_pair();
1953 a = pop_pair();
1954 c = _gvn.transform( new (C, 3) SubLNode(a,b) );
1955 push_pair(c);
1956 break;
1957 case Bytecodes::_lcmp:
1958 // Safepoints are now inserted _before_ branches. The long-compare
1959 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
1960 // slew of control flow. These are usually followed by a CmpI vs zero and
1961 // a branch; this pattern then optimizes to the obvious long-compare and
1962 // branch. However, if the branch is backwards there's a Safepoint
1963 // inserted. The inserted Safepoint captures the JVM state at the
1964 // pre-branch point, i.e. it captures the 3-way value. Thus if a
1965 // long-compare is used to control a loop the debug info will force
1966 // computation of the 3-way value, even though the generated code uses a
1967 // long-compare and branch. We try to rectify the situation by inserting
1968 // a SafePoint here and have it dominate and kill the safepoint added at a
1969 // following backwards branch. At this point the JVM state merely holds 2
1970 // longs but not the 3-way value.
1971 if( UseLoopSafepoints ) {
1972 switch( iter().next_bc() ) {
1973 case Bytecodes::_ifgt:
1974 case Bytecodes::_iflt:
1975 case Bytecodes::_ifge:
1976 case Bytecodes::_ifle:
1977 case Bytecodes::_ifne:
1978 case Bytecodes::_ifeq:
1979 // If this is a backwards branch in the bytecodes, add Safepoint
1980 maybe_add_safepoint(iter().next_get_dest());
1981 }
1982 }
1983 b = pop_pair();
1984 a = pop_pair();
1985 c = _gvn.transform( new (C, 3) CmpL3Node( a, b ));
1986 push(c);
1987 break;
1989 case Bytecodes::_lneg:
1990 a = pop_pair();
1991 b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a));
1992 push_pair(b);
1993 break;
1994 case Bytecodes::_l2i:
1995 a = pop_pair();
1996 push( _gvn.transform( new (C, 2) ConvL2INode(a)));
1997 break;
1998 case Bytecodes::_i2l:
1999 a = pop();
2000 b = _gvn.transform( new (C, 2) ConvI2LNode(a));
2001 push_pair(b);
2002 break;
2003 case Bytecodes::_i2b:
2004 // Sign extend
2005 a = pop();
2006 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(24)) );
2007 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(24)) );
2008 push( a );
2009 break;
2010 case Bytecodes::_i2s:
2011 a = pop();
2012 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(16)) );
2013 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(16)) );
2014 push( a );
2015 break;
2016 case Bytecodes::_i2c:
2017 a = pop();
2018 push( _gvn.transform( new (C, 3) AndINode(a,_gvn.intcon(0xFFFF)) ) );
2019 break;
2021 case Bytecodes::_i2f:
2022 a = pop();
2023 b = _gvn.transform( new (C, 2) ConvI2FNode(a) ) ;
2024 c = precision_rounding(b);
2025 push (b);
2026 break;
2028 case Bytecodes::_i2d:
2029 a = pop();
2030 b = _gvn.transform( new (C, 2) ConvI2DNode(a));
2031 push_pair(b);
2032 break;
2034 case Bytecodes::_iinc: // Increment local
2035 i = iter().get_index(); // Get local index
2036 set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2037 break;
2039 // Exit points of synchronized methods must have an unlock node
2040 case Bytecodes::_return:
2041 return_current(NULL);
2042 break;
2044 case Bytecodes::_ireturn:
2045 case Bytecodes::_areturn:
2046 case Bytecodes::_freturn:
2047 return_current(pop());
2048 break;
2049 case Bytecodes::_lreturn:
2050 return_current(pop_pair());
2051 break;
2052 case Bytecodes::_dreturn:
2053 return_current(pop_pair());
2054 break;
2056 case Bytecodes::_athrow:
2057 // null exception oop throws NULL pointer exception
2058 do_null_check(peek(), T_OBJECT);
2059 if (stopped()) return;
2060 if (JvmtiExport::can_post_exceptions()) {
2061 // "Full-speed throwing" is not necessary here,
2062 // since we're notifying the VM on every throw.
2063 uncommon_trap(Deoptimization::Reason_unhandled,
2064 Deoptimization::Action_none);
2065 return;
2066 }
2067 // Hook the thrown exception directly to subsequent handlers.
2068 if (BailoutToInterpreterForThrows) {
2069 // Keep method interpreted from now on.
2070 uncommon_trap(Deoptimization::Reason_unhandled,
2071 Deoptimization::Action_make_not_compilable);
2072 return;
2073 }
2074 add_exception_state(make_exception_state(peek()));
2075 break;
2077 case Bytecodes::_goto: // fall through
2078 case Bytecodes::_goto_w: {
2079 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2081 // If this is a backwards branch in the bytecodes, add Safepoint
2082 maybe_add_safepoint(target_bci);
2084 // Update method data
2085 profile_taken_branch(target_bci);
2087 // Merge the current control into the target basic block
2088 merge(target_bci);
2090 // See if we can get some profile data and hand it off to the next block
2091 Block *target_block = block()->successor_for_bci(target_bci);
2092 if (target_block->pred_count() != 1) break;
2093 ciMethodData* methodData = method()->method_data();
2094 if (!methodData->is_mature()) break;
2095 ciProfileData* data = methodData->bci_to_data(bci());
2096 assert( data->is_JumpData(), "" );
2097 int taken = ((ciJumpData*)data)->taken();
2098 taken = method()->scale_count(taken);
2099 target_block->set_count(taken);
2100 break;
2101 }
2103 case Bytecodes::_ifnull:
2104 do_ifnull(BoolTest::eq);
2105 break;
2106 case Bytecodes::_ifnonnull:
2107 do_ifnull(BoolTest::ne);
2108 break;
2110 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2111 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2112 handle_if_acmp:
2113 // If this is a backwards branch in the bytecodes, add Safepoint
2114 maybe_add_safepoint(iter().get_dest());
2115 a = pop();
2116 b = pop();
2117 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2118 do_if(btest, c);
2119 break;
2121 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2122 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2123 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2124 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2125 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2126 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2127 handle_ifxx:
2128 // If this is a backwards branch in the bytecodes, add Safepoint
2129 maybe_add_safepoint(iter().get_dest());
2130 a = _gvn.intcon(0);
2131 b = pop();
2132 c = _gvn.transform( new (C, 3) CmpINode(b, a) );
2133 do_if(btest, c);
2134 break;
2136 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2137 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2138 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2139 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2140 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2141 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2142 handle_if_icmp:
2143 // If this is a backwards branch in the bytecodes, add Safepoint
2144 maybe_add_safepoint(iter().get_dest());
2145 a = pop();
2146 b = pop();
2147 c = _gvn.transform( new (C, 3) CmpINode( b, a ) );
2148 do_if(btest, c);
2149 break;
2151 case Bytecodes::_tableswitch:
2152 do_tableswitch();
2153 break;
2155 case Bytecodes::_lookupswitch:
2156 do_lookupswitch();
2157 break;
2159 case Bytecodes::_invokestatic:
2160 case Bytecodes::_invokespecial:
2161 case Bytecodes::_invokevirtual:
2162 case Bytecodes::_invokeinterface:
2163 do_call();
2164 break;
2165 case Bytecodes::_checkcast:
2166 do_checkcast();
2167 break;
2168 case Bytecodes::_instanceof:
2169 do_instanceof();
2170 break;
2171 case Bytecodes::_anewarray:
2172 do_anewarray();
2173 break;
2174 case Bytecodes::_newarray:
2175 do_newarray((BasicType)iter().get_index());
2176 break;
2177 case Bytecodes::_multianewarray:
2178 do_multianewarray();
2179 break;
2180 case Bytecodes::_new:
2181 do_new();
2182 break;
2184 case Bytecodes::_jsr:
2185 case Bytecodes::_jsr_w:
2186 do_jsr();
2187 break;
2189 case Bytecodes::_ret:
2190 do_ret();
2191 break;
2194 case Bytecodes::_monitorenter:
2195 do_monitor_enter();
2196 break;
2198 case Bytecodes::_monitorexit:
2199 do_monitor_exit();
2200 break;
2202 case Bytecodes::_breakpoint:
2203 // Breakpoint set concurrently to compile
2204 // %%% use an uncommon trap?
2205 C->record_failure("breakpoint in method");
2206 return;
2208 default:
2209 #ifndef PRODUCT
2210 map()->dump(99);
2211 #endif
2212 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2213 ShouldNotReachHere();
2214 }
2216 #ifndef PRODUCT
2217 IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2218 if(printer) {
2219 char buffer[256];
2220 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2221 bool old = printer->traverse_outs();
2222 printer->set_traverse_outs(true);
2223 printer->print_method(C, buffer, 3);
2224 printer->set_traverse_outs(old);
2225 }
2226 #endif
2227 }