Sun, 13 Apr 2008 17:43:42 -0400
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
Summary: Compressed oops in instances, arrays, and headers. Code contributors are coleenp, phh, never, swamyv
Reviewed-by: jmasa, kamg, acorn, tbell, kvn, rasbold
1 /*
2 * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_parse2.cpp.incl"
28 extern int explicit_null_checks_inserted,
29 explicit_null_checks_elided;
31 //---------------------------------array_load----------------------------------
32 void Parse::array_load(BasicType elem_type) {
33 const Type* elem = Type::TOP;
34 Node* adr = array_addressing(elem_type, 0, &elem);
35 if (stopped()) return; // guarenteed null or range check
36 _sp -= 2; // Pop array and index
37 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
38 Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
39 push(ld);
40 }
43 //--------------------------------array_store----------------------------------
44 void Parse::array_store(BasicType elem_type) {
45 Node* adr = array_addressing(elem_type, 1);
46 if (stopped()) return; // guarenteed null or range check
47 Node* val = pop();
48 _sp -= 2; // Pop array and index
49 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
50 store_to_memory(control(), adr, val, elem_type, adr_type);
51 }
54 //------------------------------array_addressing-------------------------------
55 // Pull array and index from the stack. Compute pointer-to-element.
56 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
57 Node *idx = peek(0+vals); // Get from stack without popping
58 Node *ary = peek(1+vals); // in case of exception
60 // Null check the array base, with correct stack contents
61 ary = do_null_check(ary, T_ARRAY);
62 // Compile-time detect of null-exception?
63 if (stopped()) return top();
65 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
66 const TypeInt* sizetype = arytype->size();
67 const Type* elemtype = arytype->elem();
69 if (UseUniqueSubclasses && result2 != NULL) {
70 const Type* el = elemtype;
71 if (elemtype->isa_narrowoop()) {
72 el = elemtype->is_narrowoop()->make_oopptr();
73 }
74 const TypeInstPtr* toop = el->isa_instptr();
75 if (toop) {
76 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
77 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
78 const Type* subklass = Type::get_const_type(toop->klass());
79 elemtype = subklass->join(el);
80 }
81 }
82 }
84 // Check for big class initializers with all constant offsets
85 // feeding into a known-size array.
86 const TypeInt* idxtype = _gvn.type(idx)->is_int();
87 // See if the highest idx value is less than the lowest array bound,
88 // and if the idx value cannot be negative:
89 bool need_range_check = true;
90 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
91 need_range_check = false;
92 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
93 }
95 if (!arytype->klass()->is_loaded()) {
96 // Only fails for some -Xcomp runs
97 // The class is unloaded. We have to run this bytecode in the interpreter.
98 uncommon_trap(Deoptimization::Reason_unloaded,
99 Deoptimization::Action_reinterpret,
100 arytype->klass(), "!loaded array");
101 return top();
102 }
104 // Do the range check
105 if (GenerateRangeChecks && need_range_check) {
106 // Range is constant in array-oop, so we can use the original state of mem
107 Node* len = load_array_length(ary);
108 // Test length vs index (standard trick using unsigned compare)
109 Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
110 BoolTest::mask btest = BoolTest::lt;
111 Node* tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) );
112 // Branch to failure if out of bounds
113 { BuildCutout unless(this, tst, PROB_MAX);
114 if (C->allow_range_check_smearing()) {
115 // Do not use builtin_throw, since range checks are sometimes
116 // made more stringent by an optimistic transformation.
117 // This creates "tentative" range checks at this point,
118 // which are not guaranteed to throw exceptions.
119 // See IfNode::Ideal, is_range_check, adjust_check.
120 uncommon_trap(Deoptimization::Reason_range_check,
121 Deoptimization::Action_make_not_entrant,
122 NULL, "range_check");
123 } else {
124 // If we have already recompiled with the range-check-widening
125 // heroic optimization turned off, then we must really be throwing
126 // range check exceptions.
127 builtin_throw(Deoptimization::Reason_range_check, idx);
128 }
129 }
130 }
131 // Check for always knowing you are throwing a range-check exception
132 if (stopped()) return top();
134 Node* ptr = array_element_address( ary, idx, type, sizetype);
136 if (result2 != NULL) *result2 = elemtype;
137 return ptr;
138 }
141 // returns IfNode
142 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
143 Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
144 Node *tst = _gvn.transform( new (C, 2) BoolNode( cmp, mask));
145 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
146 return iff;
147 }
149 // return Region node
150 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
151 Node *region = new (C, 3) RegionNode(3); // 2 results
152 record_for_igvn(region);
153 region->init_req(1, iffalse);
154 region->init_req(2, iftrue );
155 _gvn.set_type(region, Type::CONTROL);
156 region = _gvn.transform(region);
157 set_control (region);
158 return region;
159 }
162 //------------------------------helper for tableswitch-------------------------
163 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
164 // True branch, use existing map info
165 { PreserveJVMState pjvms(this);
166 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
167 set_control( iftrue );
168 profile_switch_case(prof_table_index);
169 merge_new_path(dest_bci_if_true);
170 }
172 // False branch
173 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
174 set_control( iffalse );
175 }
177 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
178 // True branch, use existing map info
179 { PreserveJVMState pjvms(this);
180 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode (iff) );
181 set_control( iffalse );
182 profile_switch_case(prof_table_index);
183 merge_new_path(dest_bci_if_true);
184 }
186 // False branch
187 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff) );
188 set_control( iftrue );
189 }
191 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
192 // False branch, use existing map and control()
193 profile_switch_case(prof_table_index);
194 merge_new_path(dest_bci);
195 }
198 extern "C" {
199 static int jint_cmp(const void *i, const void *j) {
200 int a = *(jint *)i;
201 int b = *(jint *)j;
202 return a > b ? 1 : a < b ? -1 : 0;
203 }
204 }
207 // Default value for methodData switch indexing. Must be a negative value to avoid
208 // conflict with any legal switch index.
209 #define NullTableIndex -1
211 class SwitchRange : public StackObj {
212 // a range of integers coupled with a bci destination
213 jint _lo; // inclusive lower limit
214 jint _hi; // inclusive upper limit
215 int _dest;
216 int _table_index; // index into method data table
218 public:
219 jint lo() const { return _lo; }
220 jint hi() const { return _hi; }
221 int dest() const { return _dest; }
222 int table_index() const { return _table_index; }
223 bool is_singleton() const { return _lo == _hi; }
225 void setRange(jint lo, jint hi, int dest, int table_index) {
226 assert(lo <= hi, "must be a non-empty range");
227 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
228 }
229 bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
230 assert(lo <= hi, "must be a non-empty range");
231 if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
232 _hi = hi;
233 return true;
234 }
235 return false;
236 }
238 void set (jint value, int dest, int table_index) {
239 setRange(value, value, dest, table_index);
240 }
241 bool adjoin(jint value, int dest, int table_index) {
242 return adjoinRange(value, value, dest, table_index);
243 }
245 void print(ciEnv* env) {
246 if (is_singleton())
247 tty->print(" {%d}=>%d", lo(), dest());
248 else if (lo() == min_jint)
249 tty->print(" {..%d}=>%d", hi(), dest());
250 else if (hi() == max_jint)
251 tty->print(" {%d..}=>%d", lo(), dest());
252 else
253 tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
254 }
255 };
258 //-------------------------------do_tableswitch--------------------------------
259 void Parse::do_tableswitch() {
260 Node* lookup = pop();
262 // Get information about tableswitch
263 int default_dest = iter().get_dest_table(0);
264 int lo_index = iter().get_int_table(1);
265 int hi_index = iter().get_int_table(2);
266 int len = hi_index - lo_index + 1;
268 if (len < 1) {
269 // If this is a backward branch, add safepoint
270 maybe_add_safepoint(default_dest);
271 merge(default_dest);
272 return;
273 }
275 // generate decision tree, using trichotomy when possible
276 int rnum = len+2;
277 bool makes_backward_branch = false;
278 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
279 int rp = -1;
280 if (lo_index != min_jint) {
281 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
282 }
283 for (int j = 0; j < len; j++) {
284 jint match_int = lo_index+j;
285 int dest = iter().get_dest_table(j+3);
286 makes_backward_branch |= (dest <= bci());
287 int table_index = method_data_update() ? j : NullTableIndex;
288 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
289 ranges[++rp].set(match_int, dest, table_index);
290 }
291 }
292 jint highest = lo_index+(len-1);
293 assert(ranges[rp].hi() == highest, "");
294 if (highest != max_jint
295 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
296 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
297 }
298 assert(rp < len+2, "not too many ranges");
300 // Safepoint in case if backward branch observed
301 if( makes_backward_branch && UseLoopSafepoints )
302 add_safepoint();
304 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
305 }
308 //------------------------------do_lookupswitch--------------------------------
309 void Parse::do_lookupswitch() {
310 Node *lookup = pop(); // lookup value
311 // Get information about lookupswitch
312 int default_dest = iter().get_dest_table(0);
313 int len = iter().get_int_table(1);
315 if (len < 1) { // If this is a backward branch, add safepoint
316 maybe_add_safepoint(default_dest);
317 merge(default_dest);
318 return;
319 }
321 // generate decision tree, using trichotomy when possible
322 jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
323 {
324 for( int j = 0; j < len; j++ ) {
325 table[j+j+0] = iter().get_int_table(2+j+j);
326 table[j+j+1] = iter().get_dest_table(2+j+j+1);
327 }
328 qsort( table, len, 2*sizeof(table[0]), jint_cmp );
329 }
331 int rnum = len*2+1;
332 bool makes_backward_branch = false;
333 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
334 int rp = -1;
335 for( int j = 0; j < len; j++ ) {
336 jint match_int = table[j+j+0];
337 int dest = table[j+j+1];
338 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
339 int table_index = method_data_update() ? j : NullTableIndex;
340 makes_backward_branch |= (dest <= bci());
341 if( match_int != next_lo ) {
342 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
343 }
344 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
345 ranges[++rp].set(match_int, dest, table_index);
346 }
347 }
348 jint highest = table[2*(len-1)];
349 assert(ranges[rp].hi() == highest, "");
350 if( highest != max_jint
351 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
352 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
353 }
354 assert(rp < rnum, "not too many ranges");
356 // Safepoint in case backward branch observed
357 if( makes_backward_branch && UseLoopSafepoints )
358 add_safepoint();
360 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
361 }
363 //----------------------------create_jump_tables-------------------------------
364 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
365 // Are jumptables enabled
366 if (!UseJumpTables) return false;
368 // Are jumptables supported
369 if (!Matcher::has_match_rule(Op_Jump)) return false;
371 // Don't make jump table if profiling
372 if (method_data_update()) return false;
374 // Decide if a guard is needed to lop off big ranges at either (or
375 // both) end(s) of the input set. We'll call this the default target
376 // even though we can't be sure that it is the true "default".
378 bool needs_guard = false;
379 int default_dest;
380 int64 total_outlier_size = 0;
381 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
382 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
384 if (lo->dest() == hi->dest()) {
385 total_outlier_size = hi_size + lo_size;
386 default_dest = lo->dest();
387 } else if (lo_size > hi_size) {
388 total_outlier_size = lo_size;
389 default_dest = lo->dest();
390 } else {
391 total_outlier_size = hi_size;
392 default_dest = hi->dest();
393 }
395 // If a guard test will eliminate very sparse end ranges, then
396 // it is worth the cost of an extra jump.
397 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
398 needs_guard = true;
399 if (default_dest == lo->dest()) lo++;
400 if (default_dest == hi->dest()) hi--;
401 }
403 // Find the total number of cases and ranges
404 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
405 int num_range = hi - lo + 1;
407 // Don't create table if: too large, too small, or too sparse.
408 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
409 return false;
410 if (num_cases > (MaxJumpTableSparseness * num_range))
411 return false;
413 // Normalize table lookups to zero
414 int lowval = lo->lo();
415 key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) );
417 // Generate a guard to protect against input keyvals that aren't
418 // in the switch domain.
419 if (needs_guard) {
420 Node* size = _gvn.intcon(num_cases);
421 Node* cmp = _gvn.transform( new (C, 3) CmpUNode(key_val, size) );
422 Node* tst = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ge) );
423 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
424 jump_if_true_fork(iff, default_dest, NullTableIndex);
425 }
427 // Create an ideal node JumpTable that has projections
428 // of all possible ranges for a switch statement
429 // The key_val input must be converted to a pointer offset and scaled.
430 // Compare Parse::array_addressing above.
431 #ifdef _LP64
432 // Clean the 32-bit int into a real 64-bit offset.
433 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
434 const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
435 key_val = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) );
436 #endif
437 // Shift the value by wordsize so we have an index into the table, rather
438 // than a switch value
439 Node *shiftWord = _gvn.MakeConX(wordSize);
440 key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord));
442 // Create the JumpNode
443 Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) );
445 // These are the switch destinations hanging off the jumpnode
446 int i = 0;
447 for (SwitchRange* r = lo; r <= hi; r++) {
448 for (int j = r->lo(); j <= r->hi(); j++, i++) {
449 Node* input = _gvn.transform(new (C, 1) JumpProjNode(jtn, i, r->dest(), j - lowval));
450 {
451 PreserveJVMState pjvms(this);
452 set_control(input);
453 jump_if_always_fork(r->dest(), r->table_index());
454 }
455 }
456 }
457 assert(i == num_cases, "miscount of cases");
458 stop_and_kill_map(); // no more uses for this JVMS
459 return true;
460 }
462 //----------------------------jump_switch_ranges-------------------------------
463 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
464 Block* switch_block = block();
466 if (switch_depth == 0) {
467 // Do special processing for the top-level call.
468 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
469 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
471 // Decrement pred-numbers for the unique set of nodes.
472 #ifdef ASSERT
473 // Ensure that the block's successors are a (duplicate-free) set.
474 int successors_counted = 0; // block occurrences in [hi..lo]
475 int unique_successors = switch_block->num_successors();
476 for (int i = 0; i < unique_successors; i++) {
477 Block* target = switch_block->successor_at(i);
479 // Check that the set of successors is the same in both places.
480 int successors_found = 0;
481 for (SwitchRange* p = lo; p <= hi; p++) {
482 if (p->dest() == target->start()) successors_found++;
483 }
484 assert(successors_found > 0, "successor must be known");
485 successors_counted += successors_found;
486 }
487 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
488 #endif
490 // Maybe prune the inputs, based on the type of key_val.
491 jint min_val = min_jint;
492 jint max_val = max_jint;
493 const TypeInt* ti = key_val->bottom_type()->isa_int();
494 if (ti != NULL) {
495 min_val = ti->_lo;
496 max_val = ti->_hi;
497 assert(min_val <= max_val, "invalid int type");
498 }
499 while (lo->hi() < min_val) lo++;
500 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
501 while (hi->lo() > max_val) hi--;
502 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
503 }
505 #ifndef PRODUCT
506 if (switch_depth == 0) {
507 _max_switch_depth = 0;
508 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
509 }
510 #endif
512 assert(lo <= hi, "must be a non-empty set of ranges");
513 if (lo == hi) {
514 jump_if_always_fork(lo->dest(), lo->table_index());
515 } else {
516 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
517 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
519 if (create_jump_tables(key_val, lo, hi)) return;
521 int nr = hi - lo + 1;
523 SwitchRange* mid = lo + nr/2;
524 // if there is an easy choice, pivot at a singleton:
525 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
527 assert(lo < mid && mid <= hi, "good pivot choice");
528 assert(nr != 2 || mid == hi, "should pick higher of 2");
529 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
531 Node *test_val = _gvn.intcon(mid->lo());
533 if (mid->is_singleton()) {
534 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
535 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
537 // Special Case: If there are exactly three ranges, and the high
538 // and low range each go to the same place, omit the "gt" test,
539 // since it will not discriminate anything.
540 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
541 if (eq_test_only) {
542 assert(mid == hi-1, "");
543 }
545 // if there is a higher range, test for it and process it:
546 if (mid < hi && !eq_test_only) {
547 // two comparisons of same values--should enable 1 test for 2 branches
548 // Use BoolTest::le instead of BoolTest::gt
549 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le);
550 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_le) );
551 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_le) );
552 { PreserveJVMState pjvms(this);
553 set_control(iffalse);
554 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
555 }
556 set_control(iftrue);
557 }
559 } else {
560 // mid is a range, not a singleton, so treat mid..hi as a unit
561 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
563 // if there is a higher range, test for it and process it:
564 if (mid == hi) {
565 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
566 } else {
567 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_ge) );
568 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_ge) );
569 { PreserveJVMState pjvms(this);
570 set_control(iftrue);
571 jump_switch_ranges(key_val, mid, hi, switch_depth+1);
572 }
573 set_control(iffalse);
574 }
575 }
577 // in any case, process the lower range
578 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
579 }
581 // Decrease pred_count for each successor after all is done.
582 if (switch_depth == 0) {
583 int unique_successors = switch_block->num_successors();
584 for (int i = 0; i < unique_successors; i++) {
585 Block* target = switch_block->successor_at(i);
586 // Throw away the pre-allocated path for each unique successor.
587 target->next_path_num();
588 }
589 }
591 #ifndef PRODUCT
592 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
593 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
594 SwitchRange* r;
595 int nsing = 0;
596 for( r = lo; r <= hi; r++ ) {
597 if( r->is_singleton() ) nsing++;
598 }
599 tty->print(">>> ");
600 _method->print_short_name();
601 tty->print_cr(" switch decision tree");
602 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
603 hi-lo+1, nsing, _max_switch_depth, _est_switch_depth);
604 if (_max_switch_depth > _est_switch_depth) {
605 tty->print_cr("******** BAD SWITCH DEPTH ********");
606 }
607 tty->print(" ");
608 for( r = lo; r <= hi; r++ ) {
609 r->print(env());
610 }
611 tty->print_cr("");
612 }
613 #endif
614 }
616 void Parse::modf() {
617 Node *f2 = pop();
618 Node *f1 = pop();
619 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
620 CAST_FROM_FN_PTR(address, SharedRuntime::frem),
621 "frem", NULL, //no memory effects
622 f1, f2);
623 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
625 push(res);
626 }
628 void Parse::modd() {
629 Node *d2 = pop_pair();
630 Node *d1 = pop_pair();
631 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
632 CAST_FROM_FN_PTR(address, SharedRuntime::drem),
633 "drem", NULL, //no memory effects
634 d1, top(), d2, top());
635 Node* res_d = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
637 #ifdef ASSERT
638 Node* res_top = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 1));
639 assert(res_top == top(), "second value must be top");
640 #endif
642 push_pair(res_d);
643 }
645 void Parse::l2f() {
646 Node* f2 = pop();
647 Node* f1 = pop();
648 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
649 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
650 "l2f", NULL, //no memory effects
651 f1, f2);
652 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
654 push(res);
655 }
657 void Parse::do_irem() {
658 // Must keep both values on the expression-stack during null-check
659 do_null_check(peek(), T_INT);
660 // Compile-time detect of null-exception?
661 if (stopped()) return;
663 Node* b = pop();
664 Node* a = pop();
666 const Type *t = _gvn.type(b);
667 if (t != Type::TOP) {
668 const TypeInt *ti = t->is_int();
669 if (ti->is_con()) {
670 int divisor = ti->get_con();
671 // check for positive power of 2
672 if (divisor > 0 &&
673 (divisor & ~(divisor-1)) == divisor) {
674 // yes !
675 Node *mask = _gvn.intcon((divisor - 1));
676 // Sigh, must handle negative dividends
677 Node *zero = _gvn.intcon(0);
678 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
679 Node *iff = _gvn.transform( new (C, 1) IfFalseNode(ifff) );
680 Node *ift = _gvn.transform( new (C, 1) IfTrueNode (ifff) );
681 Node *reg = jump_if_join(ift, iff);
682 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
683 // Negative path; negate/and/negate
684 Node *neg = _gvn.transform( new (C, 3) SubINode(zero, a) );
685 Node *andn= _gvn.transform( new (C, 3) AndINode(neg, mask) );
686 Node *negn= _gvn.transform( new (C, 3) SubINode(zero, andn) );
687 phi->init_req(1, negn);
688 // Fast positive case
689 Node *andx = _gvn.transform( new (C, 3) AndINode(a, mask) );
690 phi->init_req(2, andx);
691 // Push the merge
692 push( _gvn.transform(phi) );
693 return;
694 }
695 }
696 }
697 // Default case
698 push( _gvn.transform( new (C, 3) ModINode(control(),a,b) ) );
699 }
701 // Handle jsr and jsr_w bytecode
702 void Parse::do_jsr() {
703 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
705 // Store information about current state, tagged with new _jsr_bci
706 int return_bci = iter().next_bci();
707 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
709 // Update method data
710 profile_taken_branch(jsr_bci);
712 // The way we do things now, there is only one successor block
713 // for the jsr, because the target code is cloned by ciTypeFlow.
714 Block* target = successor_for_bci(jsr_bci);
716 // What got pushed?
717 const Type* ret_addr = target->peek();
718 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
720 // Effect on jsr on stack
721 push(_gvn.makecon(ret_addr));
723 // Flow to the jsr.
724 merge(jsr_bci);
725 }
727 // Handle ret bytecode
728 void Parse::do_ret() {
729 // Find to whom we return.
730 #if 0 // %%%% MAKE THIS WORK
731 Node* con = local();
732 const TypePtr* tp = con->bottom_type()->isa_ptr();
733 assert(tp && tp->singleton(), "");
734 int return_bci = (int) tp->get_con();
735 merge(return_bci);
736 #else
737 assert(block()->num_successors() == 1, "a ret can only go one place now");
738 Block* target = block()->successor_at(0);
739 assert(!target->is_ready(), "our arrival must be expected");
740 profile_ret(target->flow()->start());
741 int pnum = target->next_path_num();
742 merge_common(target, pnum);
743 #endif
744 }
746 //--------------------------dynamic_branch_prediction--------------------------
747 // Try to gather dynamic branch prediction behavior. Return a probability
748 // of the branch being taken and set the "cnt" field. Returns a -1.0
749 // if we need to use static prediction for some reason.
750 float Parse::dynamic_branch_prediction(float &cnt) {
751 ResourceMark rm;
753 cnt = COUNT_UNKNOWN;
755 // Use MethodData information if it is available
756 // FIXME: free the ProfileData structure
757 ciMethodData* methodData = method()->method_data();
758 if (!methodData->is_mature()) return PROB_UNKNOWN;
759 ciProfileData* data = methodData->bci_to_data(bci());
760 if (!data->is_JumpData()) return PROB_UNKNOWN;
762 // get taken and not taken values
763 int taken = data->as_JumpData()->taken();
764 int not_taken = 0;
765 if (data->is_BranchData()) {
766 not_taken = data->as_BranchData()->not_taken();
767 }
769 // scale the counts to be commensurate with invocation counts:
770 taken = method()->scale_count(taken);
771 not_taken = method()->scale_count(not_taken);
773 // Give up if too few counts to be meaningful
774 if (taken + not_taken < 40) {
775 if (C->log() != NULL) {
776 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
777 }
778 return PROB_UNKNOWN;
779 }
781 // Compute frequency that we arrive here
782 int sum = taken + not_taken;
783 // Adjust, if this block is a cloned private block but the
784 // Jump counts are shared. Taken the private counts for
785 // just this path instead of the shared counts.
786 if( block()->count() > 0 )
787 sum = block()->count();
788 cnt = (float)sum / (float)FreqCountInvocations;
790 // Pin probability to sane limits
791 float prob;
792 if( !taken )
793 prob = (0+PROB_MIN) / 2;
794 else if( !not_taken )
795 prob = (1+PROB_MAX) / 2;
796 else { // Compute probability of true path
797 prob = (float)taken / (float)(taken + not_taken);
798 if (prob > PROB_MAX) prob = PROB_MAX;
799 if (prob < PROB_MIN) prob = PROB_MIN;
800 }
802 assert((cnt > 0.0f) && (prob > 0.0f),
803 "Bad frequency assignment in if");
805 if (C->log() != NULL) {
806 const char* prob_str = NULL;
807 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
808 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
809 char prob_str_buf[30];
810 if (prob_str == NULL) {
811 sprintf(prob_str_buf, "%g", prob);
812 prob_str = prob_str_buf;
813 }
814 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
815 iter().get_dest(), taken, not_taken, cnt, prob_str);
816 }
817 return prob;
818 }
820 //-----------------------------branch_prediction-------------------------------
821 float Parse::branch_prediction(float& cnt,
822 BoolTest::mask btest,
823 int target_bci) {
824 float prob = dynamic_branch_prediction(cnt);
825 // If prob is unknown, switch to static prediction
826 if (prob != PROB_UNKNOWN) return prob;
828 prob = PROB_FAIR; // Set default value
829 if (btest == BoolTest::eq) // Exactly equal test?
830 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
831 else if (btest == BoolTest::ne)
832 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
834 // If this is a conditional test guarding a backwards branch,
835 // assume its a loop-back edge. Make it a likely taken branch.
836 if (target_bci < bci()) {
837 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
838 // Since it's an OSR, we probably have profile data, but since
839 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
840 // Let's make a special check here for completely zero counts.
841 ciMethodData* methodData = method()->method_data();
842 if (!methodData->is_empty()) {
843 ciProfileData* data = methodData->bci_to_data(bci());
844 // Only stop for truly zero counts, which mean an unknown part
845 // of the OSR-ed method, and we want to deopt to gather more stats.
846 // If you have ANY counts, then this loop is simply 'cold' relative
847 // to the OSR loop.
848 if (data->as_BranchData()->taken() +
849 data->as_BranchData()->not_taken() == 0 ) {
850 // This is the only way to return PROB_UNKNOWN:
851 return PROB_UNKNOWN;
852 }
853 }
854 }
855 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
856 }
858 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
859 return prob;
860 }
862 // The magic constants are chosen so as to match the output of
863 // branch_prediction() when the profile reports a zero taken count.
864 // It is important to distinguish zero counts unambiguously, because
865 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
866 // very small but nonzero probabilities, which if confused with zero
867 // counts would keep the program recompiling indefinitely.
868 bool Parse::seems_never_taken(float prob) {
869 return prob < PROB_MIN;
870 }
872 inline void Parse::repush_if_args() {
873 #ifndef PRODUCT
874 if (PrintOpto && WizardMode) {
875 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
876 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
877 method()->print_name(); tty->cr();
878 }
879 #endif
880 int bc_depth = - Bytecodes::depth(iter().cur_bc());
881 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
882 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
883 assert(argument(0) != NULL, "must exist");
884 assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
885 _sp += bc_depth;
886 }
888 //----------------------------------do_ifnull----------------------------------
889 void Parse::do_ifnull(BoolTest::mask btest) {
890 int target_bci = iter().get_dest();
892 Block* branch_block = successor_for_bci(target_bci);
893 Block* next_block = successor_for_bci(iter().next_bci());
895 float cnt;
896 float prob = branch_prediction(cnt, btest, target_bci);
897 if (prob == PROB_UNKNOWN) {
898 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
899 #ifndef PRODUCT
900 if (PrintOpto && Verbose)
901 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
902 #endif
903 repush_if_args(); // to gather stats on loop
904 // We need to mark this branch as taken so that if we recompile we will
905 // see that it is possible. In the tiered system the interpreter doesn't
906 // do profiling and by the time we get to the lower tier from the interpreter
907 // the path may be cold again. Make sure it doesn't look untaken
908 profile_taken_branch(target_bci, !ProfileInterpreter);
909 uncommon_trap(Deoptimization::Reason_unreached,
910 Deoptimization::Action_reinterpret,
911 NULL, "cold");
912 if (EliminateAutoBox) {
913 // Mark the successor blocks as parsed
914 branch_block->next_path_num();
915 next_block->next_path_num();
916 }
917 return;
918 }
920 // If this is a backwards branch in the bytecodes, add Safepoint
921 maybe_add_safepoint(target_bci);
923 explicit_null_checks_inserted++;
924 Node* a = null();
925 Node* b = pop();
926 Node* c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
928 // Make a cast-away-nullness that is control dependent on the test
929 const Type *t = _gvn.type(b);
930 const Type *t_not_null = t->join(TypePtr::NOTNULL);
931 Node *cast = new (C, 2) CastPPNode(b,t_not_null);
933 // Generate real control flow
934 Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) );
936 // Sanity check the probability value
937 assert(prob > 0.0f,"Bad probability in Parser");
938 // Need xform to put node in hash table
939 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
940 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
941 // True branch
942 { PreserveJVMState pjvms(this);
943 Node* iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
944 set_control(iftrue);
946 if (stopped()) { // Path is dead?
947 explicit_null_checks_elided++;
948 if (EliminateAutoBox) {
949 // Mark the successor block as parsed
950 branch_block->next_path_num();
951 }
952 } else { // Path is live.
953 // Update method data
954 profile_taken_branch(target_bci);
955 adjust_map_after_if(btest, c, prob, branch_block, next_block);
956 if (!stopped())
957 merge(target_bci);
958 }
959 }
961 // False branch
962 Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
963 set_control(iffalse);
965 if (stopped()) { // Path is dead?
966 explicit_null_checks_elided++;
967 if (EliminateAutoBox) {
968 // Mark the successor block as parsed
969 next_block->next_path_num();
970 }
971 } else { // Path is live.
972 // Update method data
973 profile_not_taken_branch();
974 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
975 next_block, branch_block);
976 }
977 }
979 //------------------------------------do_if------------------------------------
980 void Parse::do_if(BoolTest::mask btest, Node* c) {
981 int target_bci = iter().get_dest();
983 Block* branch_block = successor_for_bci(target_bci);
984 Block* next_block = successor_for_bci(iter().next_bci());
986 float cnt;
987 float prob = branch_prediction(cnt, btest, target_bci);
988 float untaken_prob = 1.0 - prob;
990 if (prob == PROB_UNKNOWN) {
991 #ifndef PRODUCT
992 if (PrintOpto && Verbose)
993 tty->print_cr("Never-taken backedge stops compilation at bci %d",bci());
994 #endif
995 repush_if_args(); // to gather stats on loop
996 // We need to mark this branch as taken so that if we recompile we will
997 // see that it is possible. In the tiered system the interpreter doesn't
998 // do profiling and by the time we get to the lower tier from the interpreter
999 // the path may be cold again. Make sure it doesn't look untaken
1000 profile_taken_branch(target_bci, !ProfileInterpreter);
1001 uncommon_trap(Deoptimization::Reason_unreached,
1002 Deoptimization::Action_reinterpret,
1003 NULL, "cold");
1004 if (EliminateAutoBox) {
1005 // Mark the successor blocks as parsed
1006 branch_block->next_path_num();
1007 next_block->next_path_num();
1008 }
1009 return;
1010 }
1012 // Sanity check the probability value
1013 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1015 bool taken_if_true = true;
1016 // Convert BoolTest to canonical form:
1017 if (!BoolTest(btest).is_canonical()) {
1018 btest = BoolTest(btest).negate();
1019 taken_if_true = false;
1020 // prob is NOT updated here; it remains the probability of the taken
1021 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1022 }
1023 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1025 Node* tst0 = new (C, 2) BoolNode(c, btest);
1026 Node* tst = _gvn.transform(tst0);
1027 BoolTest::mask taken_btest = BoolTest::illegal;
1028 BoolTest::mask untaken_btest = BoolTest::illegal;
1030 if (tst->is_Bool()) {
1031 // Refresh c from the transformed bool node, since it may be
1032 // simpler than the original c. Also re-canonicalize btest.
1033 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1034 // That can arise from statements like: if (x instanceof C) ...
1035 if (tst != tst0) {
1036 // Canonicalize one more time since transform can change it.
1037 btest = tst->as_Bool()->_test._test;
1038 if (!BoolTest(btest).is_canonical()) {
1039 // Reverse edges one more time...
1040 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1041 btest = tst->as_Bool()->_test._test;
1042 assert(BoolTest(btest).is_canonical(), "sanity");
1043 taken_if_true = !taken_if_true;
1044 }
1045 c = tst->in(1);
1046 }
1047 BoolTest::mask neg_btest = BoolTest(btest).negate();
1048 taken_btest = taken_if_true ? btest : neg_btest;
1049 untaken_btest = taken_if_true ? neg_btest : btest;
1050 }
1052 // Generate real control flow
1053 float true_prob = (taken_if_true ? prob : untaken_prob);
1054 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1055 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1056 Node* taken_branch = new (C, 1) IfTrueNode(iff);
1057 Node* untaken_branch = new (C, 1) IfFalseNode(iff);
1058 if (!taken_if_true) { // Finish conversion to canonical form
1059 Node* tmp = taken_branch;
1060 taken_branch = untaken_branch;
1061 untaken_branch = tmp;
1062 }
1064 // Branch is taken:
1065 { PreserveJVMState pjvms(this);
1066 taken_branch = _gvn.transform(taken_branch);
1067 set_control(taken_branch);
1069 if (stopped()) {
1070 if (EliminateAutoBox) {
1071 // Mark the successor block as parsed
1072 branch_block->next_path_num();
1073 }
1074 } else {
1075 // Update method data
1076 profile_taken_branch(target_bci);
1077 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1078 if (!stopped())
1079 merge(target_bci);
1080 }
1081 }
1083 untaken_branch = _gvn.transform(untaken_branch);
1084 set_control(untaken_branch);
1086 // Branch not taken.
1087 if (stopped()) {
1088 if (EliminateAutoBox) {
1089 // Mark the successor block as parsed
1090 next_block->next_path_num();
1091 }
1092 } else {
1093 // Update method data
1094 profile_not_taken_branch();
1095 adjust_map_after_if(untaken_btest, c, untaken_prob,
1096 next_block, branch_block);
1097 }
1098 }
1100 //----------------------------adjust_map_after_if------------------------------
1101 // Adjust the JVM state to reflect the result of taking this path.
1102 // Basically, it means inspecting the CmpNode controlling this
1103 // branch, seeing how it constrains a tested value, and then
1104 // deciding if it's worth our while to encode this constraint
1105 // as graph nodes in the current abstract interpretation map.
1106 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1107 Block* path, Block* other_path) {
1108 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1109 return; // nothing to do
1111 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1113 int cop = c->Opcode();
1114 if (seems_never_taken(prob) && cop == Op_CmpP && btest == BoolTest::eq) {
1115 // (An earlier version of do_if omitted '&& btest == BoolTest::eq'.)
1116 //
1117 // If this might possibly turn into an implicit null check,
1118 // and the null has never yet been seen, we need to generate
1119 // an uncommon trap, so as to recompile instead of suffering
1120 // with very slow branches. (We'll get the slow branches if
1121 // the program ever changes phase and starts seeing nulls here.)
1122 //
1123 // The tests we worry about are of the form (p == null).
1124 // We do not simply inspect for a null constant, since a node may
1125 // optimize to 'null' later on.
1126 repush_if_args();
1127 // We need to mark this branch as taken so that if we recompile we will
1128 // see that it is possible. In the tiered system the interpreter doesn't
1129 // do profiling and by the time we get to the lower tier from the interpreter
1130 // the path may be cold again. Make sure it doesn't look untaken
1131 if (is_fallthrough) {
1132 profile_not_taken_branch(!ProfileInterpreter);
1133 } else {
1134 profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
1135 }
1136 uncommon_trap(Deoptimization::Reason_unreached,
1137 Deoptimization::Action_reinterpret,
1138 NULL,
1139 (is_fallthrough ? "taken always" : "taken never"));
1140 return;
1141 }
1143 Node* val = c->in(1);
1144 Node* con = c->in(2);
1145 const Type* tcon = _gvn.type(con);
1146 const Type* tval = _gvn.type(val);
1147 bool have_con = tcon->singleton();
1148 if (tval->singleton()) {
1149 if (!have_con) {
1150 // Swap, so constant is in con.
1151 con = val;
1152 tcon = tval;
1153 val = c->in(2);
1154 tval = _gvn.type(val);
1155 btest = BoolTest(btest).commute();
1156 have_con = true;
1157 } else {
1158 // Do we have two constants? Then leave well enough alone.
1159 have_con = false;
1160 }
1161 }
1162 if (!have_con) // remaining adjustments need a con
1163 return;
1166 int val_in_map = map()->find_edge(val);
1167 if (val_in_map < 0) return; // replace_in_map would be useless
1168 {
1169 JVMState* jvms = this->jvms();
1170 if (!(jvms->is_loc(val_in_map) ||
1171 jvms->is_stk(val_in_map)))
1172 return; // again, it would be useless
1173 }
1175 // Check for a comparison to a constant, and "know" that the compared
1176 // value is constrained on this path.
1177 assert(tcon->singleton(), "");
1178 ConstraintCastNode* ccast = NULL;
1179 Node* cast = NULL;
1181 switch (btest) {
1182 case BoolTest::eq: // Constant test?
1183 {
1184 const Type* tboth = tcon->join(tval);
1185 if (tboth == tval) break; // Nothing to gain.
1186 if (tcon->isa_int()) {
1187 ccast = new (C, 2) CastIINode(val, tboth);
1188 } else if (tcon == TypePtr::NULL_PTR) {
1189 // Cast to null, but keep the pointer identity temporarily live.
1190 ccast = new (C, 2) CastPPNode(val, tboth);
1191 } else {
1192 const TypeF* tf = tcon->isa_float_constant();
1193 const TypeD* td = tcon->isa_double_constant();
1194 // Exclude tests vs float/double 0 as these could be
1195 // either +0 or -0. Just because you are equal to +0
1196 // doesn't mean you ARE +0!
1197 if ((!tf || tf->_f != 0.0) &&
1198 (!td || td->_d != 0.0))
1199 cast = con; // Replace non-constant val by con.
1200 }
1201 }
1202 break;
1204 case BoolTest::ne:
1205 if (tcon == TypePtr::NULL_PTR) {
1206 cast = cast_not_null(val, false);
1207 }
1208 break;
1210 default:
1211 // (At this point we could record int range types with CastII.)
1212 break;
1213 }
1215 if (ccast != NULL) {
1216 const Type* tcc = ccast->as_Type()->type();
1217 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1218 // Delay transform() call to allow recovery of pre-cast value
1219 // at the control merge.
1220 ccast->set_req(0, control());
1221 _gvn.set_type_bottom(ccast);
1222 record_for_igvn(ccast);
1223 cast = ccast;
1224 }
1226 if (cast != NULL) { // Here's the payoff.
1227 replace_in_map(val, cast);
1228 }
1229 }
1232 //------------------------------do_one_bytecode--------------------------------
1233 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1234 void Parse::do_one_bytecode() {
1235 Node *a, *b, *c, *d; // Handy temps
1236 BoolTest::mask btest;
1237 int i;
1239 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1241 if (C->check_node_count(NodeLimitFudgeFactor * 5,
1242 "out of nodes parsing method")) {
1243 return;
1244 }
1246 #ifdef ASSERT
1247 // for setting breakpoints
1248 if (TraceOptoParse) {
1249 tty->print(" @");
1250 dump_bci(bci());
1251 }
1252 #endif
1254 switch (bc()) {
1255 case Bytecodes::_nop:
1256 // do nothing
1257 break;
1258 case Bytecodes::_lconst_0:
1259 push_pair(longcon(0));
1260 break;
1262 case Bytecodes::_lconst_1:
1263 push_pair(longcon(1));
1264 break;
1266 case Bytecodes::_fconst_0:
1267 push(zerocon(T_FLOAT));
1268 break;
1270 case Bytecodes::_fconst_1:
1271 push(makecon(TypeF::ONE));
1272 break;
1274 case Bytecodes::_fconst_2:
1275 push(makecon(TypeF::make(2.0f)));
1276 break;
1278 case Bytecodes::_dconst_0:
1279 push_pair(zerocon(T_DOUBLE));
1280 break;
1282 case Bytecodes::_dconst_1:
1283 push_pair(makecon(TypeD::ONE));
1284 break;
1286 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1287 case Bytecodes::_iconst_0: push(intcon( 0)); break;
1288 case Bytecodes::_iconst_1: push(intcon( 1)); break;
1289 case Bytecodes::_iconst_2: push(intcon( 2)); break;
1290 case Bytecodes::_iconst_3: push(intcon( 3)); break;
1291 case Bytecodes::_iconst_4: push(intcon( 4)); break;
1292 case Bytecodes::_iconst_5: push(intcon( 5)); break;
1293 case Bytecodes::_bipush: push(intcon( iter().get_byte())); break;
1294 case Bytecodes::_sipush: push(intcon( iter().get_short())); break;
1295 case Bytecodes::_aconst_null: push(null()); break;
1296 case Bytecodes::_ldc:
1297 case Bytecodes::_ldc_w:
1298 case Bytecodes::_ldc2_w:
1299 // If the constant is unresolved, run this BC once in the interpreter.
1300 if (iter().is_unresolved_string()) {
1301 uncommon_trap(Deoptimization::make_trap_request
1302 (Deoptimization::Reason_unloaded,
1303 Deoptimization::Action_reinterpret,
1304 iter().get_constant_index()),
1305 NULL, "unresolved_string");
1306 break;
1307 } else {
1308 ciConstant constant = iter().get_constant();
1309 if (constant.basic_type() == T_OBJECT) {
1310 ciObject* c = constant.as_object();
1311 if (c->is_klass()) {
1312 // The constant returned for a klass is the ciKlass for the
1313 // entry. We want the java_mirror so get it.
1314 ciKlass* klass = c->as_klass();
1315 if (klass->is_loaded()) {
1316 constant = ciConstant(T_OBJECT, klass->java_mirror());
1317 } else {
1318 uncommon_trap(Deoptimization::make_trap_request
1319 (Deoptimization::Reason_unloaded,
1320 Deoptimization::Action_reinterpret,
1321 iter().get_constant_index()),
1322 NULL, "unresolved_klass");
1323 break;
1324 }
1325 }
1326 }
1327 push_constant(constant);
1328 }
1330 break;
1332 case Bytecodes::_aload_0:
1333 push( local(0) );
1334 break;
1335 case Bytecodes::_aload_1:
1336 push( local(1) );
1337 break;
1338 case Bytecodes::_aload_2:
1339 push( local(2) );
1340 break;
1341 case Bytecodes::_aload_3:
1342 push( local(3) );
1343 break;
1344 case Bytecodes::_aload:
1345 push( local(iter().get_index()) );
1346 break;
1348 case Bytecodes::_fload_0:
1349 case Bytecodes::_iload_0:
1350 push( local(0) );
1351 break;
1352 case Bytecodes::_fload_1:
1353 case Bytecodes::_iload_1:
1354 push( local(1) );
1355 break;
1356 case Bytecodes::_fload_2:
1357 case Bytecodes::_iload_2:
1358 push( local(2) );
1359 break;
1360 case Bytecodes::_fload_3:
1361 case Bytecodes::_iload_3:
1362 push( local(3) );
1363 break;
1364 case Bytecodes::_fload:
1365 case Bytecodes::_iload:
1366 push( local(iter().get_index()) );
1367 break;
1368 case Bytecodes::_lload_0:
1369 push_pair_local( 0 );
1370 break;
1371 case Bytecodes::_lload_1:
1372 push_pair_local( 1 );
1373 break;
1374 case Bytecodes::_lload_2:
1375 push_pair_local( 2 );
1376 break;
1377 case Bytecodes::_lload_3:
1378 push_pair_local( 3 );
1379 break;
1380 case Bytecodes::_lload:
1381 push_pair_local( iter().get_index() );
1382 break;
1384 case Bytecodes::_dload_0:
1385 push_pair_local(0);
1386 break;
1387 case Bytecodes::_dload_1:
1388 push_pair_local(1);
1389 break;
1390 case Bytecodes::_dload_2:
1391 push_pair_local(2);
1392 break;
1393 case Bytecodes::_dload_3:
1394 push_pair_local(3);
1395 break;
1396 case Bytecodes::_dload:
1397 push_pair_local(iter().get_index());
1398 break;
1399 case Bytecodes::_fstore_0:
1400 case Bytecodes::_istore_0:
1401 case Bytecodes::_astore_0:
1402 set_local( 0, pop() );
1403 break;
1404 case Bytecodes::_fstore_1:
1405 case Bytecodes::_istore_1:
1406 case Bytecodes::_astore_1:
1407 set_local( 1, pop() );
1408 break;
1409 case Bytecodes::_fstore_2:
1410 case Bytecodes::_istore_2:
1411 case Bytecodes::_astore_2:
1412 set_local( 2, pop() );
1413 break;
1414 case Bytecodes::_fstore_3:
1415 case Bytecodes::_istore_3:
1416 case Bytecodes::_astore_3:
1417 set_local( 3, pop() );
1418 break;
1419 case Bytecodes::_fstore:
1420 case Bytecodes::_istore:
1421 case Bytecodes::_astore:
1422 set_local( iter().get_index(), pop() );
1423 break;
1424 // long stores
1425 case Bytecodes::_lstore_0:
1426 set_pair_local( 0, pop_pair() );
1427 break;
1428 case Bytecodes::_lstore_1:
1429 set_pair_local( 1, pop_pair() );
1430 break;
1431 case Bytecodes::_lstore_2:
1432 set_pair_local( 2, pop_pair() );
1433 break;
1434 case Bytecodes::_lstore_3:
1435 set_pair_local( 3, pop_pair() );
1436 break;
1437 case Bytecodes::_lstore:
1438 set_pair_local( iter().get_index(), pop_pair() );
1439 break;
1441 // double stores
1442 case Bytecodes::_dstore_0:
1443 set_pair_local( 0, dstore_rounding(pop_pair()) );
1444 break;
1445 case Bytecodes::_dstore_1:
1446 set_pair_local( 1, dstore_rounding(pop_pair()) );
1447 break;
1448 case Bytecodes::_dstore_2:
1449 set_pair_local( 2, dstore_rounding(pop_pair()) );
1450 break;
1451 case Bytecodes::_dstore_3:
1452 set_pair_local( 3, dstore_rounding(pop_pair()) );
1453 break;
1454 case Bytecodes::_dstore:
1455 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1456 break;
1458 case Bytecodes::_pop: _sp -= 1; break;
1459 case Bytecodes::_pop2: _sp -= 2; break;
1460 case Bytecodes::_swap:
1461 a = pop();
1462 b = pop();
1463 push(a);
1464 push(b);
1465 break;
1466 case Bytecodes::_dup:
1467 a = pop();
1468 push(a);
1469 push(a);
1470 break;
1471 case Bytecodes::_dup_x1:
1472 a = pop();
1473 b = pop();
1474 push( a );
1475 push( b );
1476 push( a );
1477 break;
1478 case Bytecodes::_dup_x2:
1479 a = pop();
1480 b = pop();
1481 c = pop();
1482 push( a );
1483 push( c );
1484 push( b );
1485 push( a );
1486 break;
1487 case Bytecodes::_dup2:
1488 a = pop();
1489 b = pop();
1490 push( b );
1491 push( a );
1492 push( b );
1493 push( a );
1494 break;
1496 case Bytecodes::_dup2_x1:
1497 // before: .. c, b, a
1498 // after: .. b, a, c, b, a
1499 // not tested
1500 a = pop();
1501 b = pop();
1502 c = pop();
1503 push( b );
1504 push( a );
1505 push( c );
1506 push( b );
1507 push( a );
1508 break;
1509 case Bytecodes::_dup2_x2:
1510 // before: .. d, c, b, a
1511 // after: .. b, a, d, c, b, a
1512 // not tested
1513 a = pop();
1514 b = pop();
1515 c = pop();
1516 d = pop();
1517 push( b );
1518 push( a );
1519 push( d );
1520 push( c );
1521 push( b );
1522 push( a );
1523 break;
1525 case Bytecodes::_arraylength: {
1526 // Must do null-check with value on expression stack
1527 Node *ary = do_null_check(peek(), T_ARRAY);
1528 // Compile-time detect of null-exception?
1529 if (stopped()) return;
1530 a = pop();
1531 push(load_array_length(a));
1532 break;
1533 }
1535 case Bytecodes::_baload: array_load(T_BYTE); break;
1536 case Bytecodes::_caload: array_load(T_CHAR); break;
1537 case Bytecodes::_iaload: array_load(T_INT); break;
1538 case Bytecodes::_saload: array_load(T_SHORT); break;
1539 case Bytecodes::_faload: array_load(T_FLOAT); break;
1540 case Bytecodes::_aaload: array_load(T_OBJECT); break;
1541 case Bytecodes::_laload: {
1542 a = array_addressing(T_LONG, 0);
1543 if (stopped()) return; // guarenteed null or range check
1544 _sp -= 2; // Pop array and index
1545 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
1546 break;
1547 }
1548 case Bytecodes::_daload: {
1549 a = array_addressing(T_DOUBLE, 0);
1550 if (stopped()) return; // guarenteed null or range check
1551 _sp -= 2; // Pop array and index
1552 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
1553 break;
1554 }
1555 case Bytecodes::_bastore: array_store(T_BYTE); break;
1556 case Bytecodes::_castore: array_store(T_CHAR); break;
1557 case Bytecodes::_iastore: array_store(T_INT); break;
1558 case Bytecodes::_sastore: array_store(T_SHORT); break;
1559 case Bytecodes::_fastore: array_store(T_FLOAT); break;
1560 case Bytecodes::_aastore: {
1561 d = array_addressing(T_OBJECT, 1);
1562 if (stopped()) return; // guarenteed null or range check
1563 array_store_check();
1564 c = pop(); // Oop to store
1565 b = pop(); // index (already used)
1566 a = pop(); // the array itself
1567 const Type* elemtype = _gvn.type(a)->is_aryptr()->elem();
1568 const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1569 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
1570 break;
1571 }
1572 case Bytecodes::_lastore: {
1573 a = array_addressing(T_LONG, 2);
1574 if (stopped()) return; // guarenteed null or range check
1575 c = pop_pair();
1576 _sp -= 2; // Pop array and index
1577 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
1578 break;
1579 }
1580 case Bytecodes::_dastore: {
1581 a = array_addressing(T_DOUBLE, 2);
1582 if (stopped()) return; // guarenteed null or range check
1583 c = pop_pair();
1584 _sp -= 2; // Pop array and index
1585 c = dstore_rounding(c);
1586 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
1587 break;
1588 }
1589 case Bytecodes::_getfield:
1590 do_getfield();
1591 break;
1593 case Bytecodes::_getstatic:
1594 do_getstatic();
1595 break;
1597 case Bytecodes::_putfield:
1598 do_putfield();
1599 break;
1601 case Bytecodes::_putstatic:
1602 do_putstatic();
1603 break;
1605 case Bytecodes::_irem:
1606 do_irem();
1607 break;
1608 case Bytecodes::_idiv:
1609 // Must keep both values on the expression-stack during null-check
1610 do_null_check(peek(), T_INT);
1611 // Compile-time detect of null-exception?
1612 if (stopped()) return;
1613 b = pop();
1614 a = pop();
1615 push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) );
1616 break;
1617 case Bytecodes::_imul:
1618 b = pop(); a = pop();
1619 push( _gvn.transform( new (C, 3) MulINode(a,b) ) );
1620 break;
1621 case Bytecodes::_iadd:
1622 b = pop(); a = pop();
1623 push( _gvn.transform( new (C, 3) AddINode(a,b) ) );
1624 break;
1625 case Bytecodes::_ineg:
1626 a = pop();
1627 push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) );
1628 break;
1629 case Bytecodes::_isub:
1630 b = pop(); a = pop();
1631 push( _gvn.transform( new (C, 3) SubINode(a,b) ) );
1632 break;
1633 case Bytecodes::_iand:
1634 b = pop(); a = pop();
1635 push( _gvn.transform( new (C, 3) AndINode(a,b) ) );
1636 break;
1637 case Bytecodes::_ior:
1638 b = pop(); a = pop();
1639 push( _gvn.transform( new (C, 3) OrINode(a,b) ) );
1640 break;
1641 case Bytecodes::_ixor:
1642 b = pop(); a = pop();
1643 push( _gvn.transform( new (C, 3) XorINode(a,b) ) );
1644 break;
1645 case Bytecodes::_ishl:
1646 b = pop(); a = pop();
1647 push( _gvn.transform( new (C, 3) LShiftINode(a,b) ) );
1648 break;
1649 case Bytecodes::_ishr:
1650 b = pop(); a = pop();
1651 push( _gvn.transform( new (C, 3) RShiftINode(a,b) ) );
1652 break;
1653 case Bytecodes::_iushr:
1654 b = pop(); a = pop();
1655 push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) );
1656 break;
1658 case Bytecodes::_fneg:
1659 a = pop();
1660 b = _gvn.transform(new (C, 2) NegFNode (a));
1661 push(b);
1662 break;
1664 case Bytecodes::_fsub:
1665 b = pop();
1666 a = pop();
1667 c = _gvn.transform( new (C, 3) SubFNode(a,b) );
1668 d = precision_rounding(c);
1669 push( d );
1670 break;
1672 case Bytecodes::_fadd:
1673 b = pop();
1674 a = pop();
1675 c = _gvn.transform( new (C, 3) AddFNode(a,b) );
1676 d = precision_rounding(c);
1677 push( d );
1678 break;
1680 case Bytecodes::_fmul:
1681 b = pop();
1682 a = pop();
1683 c = _gvn.transform( new (C, 3) MulFNode(a,b) );
1684 d = precision_rounding(c);
1685 push( d );
1686 break;
1688 case Bytecodes::_fdiv:
1689 b = pop();
1690 a = pop();
1691 c = _gvn.transform( new (C, 3) DivFNode(0,a,b) );
1692 d = precision_rounding(c);
1693 push( d );
1694 break;
1696 case Bytecodes::_frem:
1697 if (Matcher::has_match_rule(Op_ModF)) {
1698 // Generate a ModF node.
1699 b = pop();
1700 a = pop();
1701 c = _gvn.transform( new (C, 3) ModFNode(0,a,b) );
1702 d = precision_rounding(c);
1703 push( d );
1704 }
1705 else {
1706 // Generate a call.
1707 modf();
1708 }
1709 break;
1711 case Bytecodes::_fcmpl:
1712 b = pop();
1713 a = pop();
1714 c = _gvn.transform( new (C, 3) CmpF3Node( a, b));
1715 push(c);
1716 break;
1717 case Bytecodes::_fcmpg:
1718 b = pop();
1719 a = pop();
1721 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
1722 // which negates the result sign except for unordered. Flip the unordered
1723 // as well by using CmpF3 which implements unordered-lesser instead of
1724 // unordered-greater semantics. Finally, commute the result bits. Result
1725 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1726 c = _gvn.transform( new (C, 3) CmpF3Node( b, a));
1727 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1728 push(c);
1729 break;
1731 case Bytecodes::_f2i:
1732 a = pop();
1733 push(_gvn.transform(new (C, 2) ConvF2INode(a)));
1734 break;
1736 case Bytecodes::_d2i:
1737 a = pop_pair();
1738 b = _gvn.transform(new (C, 2) ConvD2INode(a));
1739 push( b );
1740 break;
1742 case Bytecodes::_f2d:
1743 a = pop();
1744 b = _gvn.transform( new (C, 2) ConvF2DNode(a));
1745 push_pair( b );
1746 break;
1748 case Bytecodes::_d2f:
1749 a = pop_pair();
1750 b = _gvn.transform( new (C, 2) ConvD2FNode(a));
1751 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1752 //b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) );
1753 push( b );
1754 break;
1756 case Bytecodes::_l2f:
1757 if (Matcher::convL2FSupported()) {
1758 a = pop_pair();
1759 b = _gvn.transform( new (C, 2) ConvL2FNode(a));
1760 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1761 // Rather than storing the result into an FP register then pushing
1762 // out to memory to round, the machine instruction that implements
1763 // ConvL2D is responsible for rounding.
1764 // c = precision_rounding(b);
1765 c = _gvn.transform(b);
1766 push(c);
1767 } else {
1768 l2f();
1769 }
1770 break;
1772 case Bytecodes::_l2d:
1773 a = pop_pair();
1774 b = _gvn.transform( new (C, 2) ConvL2DNode(a));
1775 // For i486.ad, rounding is always necessary (see _l2f above).
1776 // c = dprecision_rounding(b);
1777 c = _gvn.transform(b);
1778 push_pair(c);
1779 break;
1781 case Bytecodes::_f2l:
1782 a = pop();
1783 b = _gvn.transform( new (C, 2) ConvF2LNode(a));
1784 push_pair(b);
1785 break;
1787 case Bytecodes::_d2l:
1788 a = pop_pair();
1789 b = _gvn.transform( new (C, 2) ConvD2LNode(a));
1790 push_pair(b);
1791 break;
1793 case Bytecodes::_dsub:
1794 b = pop_pair();
1795 a = pop_pair();
1796 c = _gvn.transform( new (C, 3) SubDNode(a,b) );
1797 d = dprecision_rounding(c);
1798 push_pair( d );
1799 break;
1801 case Bytecodes::_dadd:
1802 b = pop_pair();
1803 a = pop_pair();
1804 c = _gvn.transform( new (C, 3) AddDNode(a,b) );
1805 d = dprecision_rounding(c);
1806 push_pair( d );
1807 break;
1809 case Bytecodes::_dmul:
1810 b = pop_pair();
1811 a = pop_pair();
1812 c = _gvn.transform( new (C, 3) MulDNode(a,b) );
1813 d = dprecision_rounding(c);
1814 push_pair( d );
1815 break;
1817 case Bytecodes::_ddiv:
1818 b = pop_pair();
1819 a = pop_pair();
1820 c = _gvn.transform( new (C, 3) DivDNode(0,a,b) );
1821 d = dprecision_rounding(c);
1822 push_pair( d );
1823 break;
1825 case Bytecodes::_dneg:
1826 a = pop_pair();
1827 b = _gvn.transform(new (C, 2) NegDNode (a));
1828 push_pair(b);
1829 break;
1831 case Bytecodes::_drem:
1832 if (Matcher::has_match_rule(Op_ModD)) {
1833 // Generate a ModD node.
1834 b = pop_pair();
1835 a = pop_pair();
1836 // a % b
1838 c = _gvn.transform( new (C, 3) ModDNode(0,a,b) );
1839 d = dprecision_rounding(c);
1840 push_pair( d );
1841 }
1842 else {
1843 // Generate a call.
1844 modd();
1845 }
1846 break;
1848 case Bytecodes::_dcmpl:
1849 b = pop_pair();
1850 a = pop_pair();
1851 c = _gvn.transform( new (C, 3) CmpD3Node( a, b));
1852 push(c);
1853 break;
1855 case Bytecodes::_dcmpg:
1856 b = pop_pair();
1857 a = pop_pair();
1858 // Same as dcmpl but need to flip the unordered case.
1859 // Commute the inputs, which negates the result sign except for unordered.
1860 // Flip the unordered as well by using CmpD3 which implements
1861 // unordered-lesser instead of unordered-greater semantics.
1862 // Finally, negate the result bits. Result is same as using a
1863 // CmpD3Greater except we did it with CmpD3 alone.
1864 c = _gvn.transform( new (C, 3) CmpD3Node( b, a));
1865 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1866 push(c);
1867 break;
1870 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
1871 case Bytecodes::_land:
1872 b = pop_pair();
1873 a = pop_pair();
1874 c = _gvn.transform( new (C, 3) AndLNode(a,b) );
1875 push_pair(c);
1876 break;
1877 case Bytecodes::_lor:
1878 b = pop_pair();
1879 a = pop_pair();
1880 c = _gvn.transform( new (C, 3) OrLNode(a,b) );
1881 push_pair(c);
1882 break;
1883 case Bytecodes::_lxor:
1884 b = pop_pair();
1885 a = pop_pair();
1886 c = _gvn.transform( new (C, 3) XorLNode(a,b) );
1887 push_pair(c);
1888 break;
1890 case Bytecodes::_lshl:
1891 b = pop(); // the shift count
1892 a = pop_pair(); // value to be shifted
1893 c = _gvn.transform( new (C, 3) LShiftLNode(a,b) );
1894 push_pair(c);
1895 break;
1896 case Bytecodes::_lshr:
1897 b = pop(); // the shift count
1898 a = pop_pair(); // value to be shifted
1899 c = _gvn.transform( new (C, 3) RShiftLNode(a,b) );
1900 push_pair(c);
1901 break;
1902 case Bytecodes::_lushr:
1903 b = pop(); // the shift count
1904 a = pop_pair(); // value to be shifted
1905 c = _gvn.transform( new (C, 3) URShiftLNode(a,b) );
1906 push_pair(c);
1907 break;
1908 case Bytecodes::_lmul:
1909 b = pop_pair();
1910 a = pop_pair();
1911 c = _gvn.transform( new (C, 3) MulLNode(a,b) );
1912 push_pair(c);
1913 break;
1915 case Bytecodes::_lrem:
1916 // Must keep both values on the expression-stack during null-check
1917 assert(peek(0) == top(), "long word order");
1918 do_null_check(peek(1), T_LONG);
1919 // Compile-time detect of null-exception?
1920 if (stopped()) return;
1921 b = pop_pair();
1922 a = pop_pair();
1923 c = _gvn.transform( new (C, 3) ModLNode(control(),a,b) );
1924 push_pair(c);
1925 break;
1927 case Bytecodes::_ldiv:
1928 // Must keep both values on the expression-stack during null-check
1929 assert(peek(0) == top(), "long word order");
1930 do_null_check(peek(1), T_LONG);
1931 // Compile-time detect of null-exception?
1932 if (stopped()) return;
1933 b = pop_pair();
1934 a = pop_pair();
1935 c = _gvn.transform( new (C, 3) DivLNode(control(),a,b) );
1936 push_pair(c);
1937 break;
1939 case Bytecodes::_ladd:
1940 b = pop_pair();
1941 a = pop_pair();
1942 c = _gvn.transform( new (C, 3) AddLNode(a,b) );
1943 push_pair(c);
1944 break;
1945 case Bytecodes::_lsub:
1946 b = pop_pair();
1947 a = pop_pair();
1948 c = _gvn.transform( new (C, 3) SubLNode(a,b) );
1949 push_pair(c);
1950 break;
1951 case Bytecodes::_lcmp:
1952 // Safepoints are now inserted _before_ branches. The long-compare
1953 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
1954 // slew of control flow. These are usually followed by a CmpI vs zero and
1955 // a branch; this pattern then optimizes to the obvious long-compare and
1956 // branch. However, if the branch is backwards there's a Safepoint
1957 // inserted. The inserted Safepoint captures the JVM state at the
1958 // pre-branch point, i.e. it captures the 3-way value. Thus if a
1959 // long-compare is used to control a loop the debug info will force
1960 // computation of the 3-way value, even though the generated code uses a
1961 // long-compare and branch. We try to rectify the situation by inserting
1962 // a SafePoint here and have it dominate and kill the safepoint added at a
1963 // following backwards branch. At this point the JVM state merely holds 2
1964 // longs but not the 3-way value.
1965 if( UseLoopSafepoints ) {
1966 switch( iter().next_bc() ) {
1967 case Bytecodes::_ifgt:
1968 case Bytecodes::_iflt:
1969 case Bytecodes::_ifge:
1970 case Bytecodes::_ifle:
1971 case Bytecodes::_ifne:
1972 case Bytecodes::_ifeq:
1973 // If this is a backwards branch in the bytecodes, add Safepoint
1974 maybe_add_safepoint(iter().next_get_dest());
1975 }
1976 }
1977 b = pop_pair();
1978 a = pop_pair();
1979 c = _gvn.transform( new (C, 3) CmpL3Node( a, b ));
1980 push(c);
1981 break;
1983 case Bytecodes::_lneg:
1984 a = pop_pair();
1985 b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a));
1986 push_pair(b);
1987 break;
1988 case Bytecodes::_l2i:
1989 a = pop_pair();
1990 push( _gvn.transform( new (C, 2) ConvL2INode(a)));
1991 break;
1992 case Bytecodes::_i2l:
1993 a = pop();
1994 b = _gvn.transform( new (C, 2) ConvI2LNode(a));
1995 push_pair(b);
1996 break;
1997 case Bytecodes::_i2b:
1998 // Sign extend
1999 a = pop();
2000 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(24)) );
2001 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(24)) );
2002 push( a );
2003 break;
2004 case Bytecodes::_i2s:
2005 a = pop();
2006 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(16)) );
2007 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(16)) );
2008 push( a );
2009 break;
2010 case Bytecodes::_i2c:
2011 a = pop();
2012 push( _gvn.transform( new (C, 3) AndINode(a,_gvn.intcon(0xFFFF)) ) );
2013 break;
2015 case Bytecodes::_i2f:
2016 a = pop();
2017 b = _gvn.transform( new (C, 2) ConvI2FNode(a) ) ;
2018 c = precision_rounding(b);
2019 push (b);
2020 break;
2022 case Bytecodes::_i2d:
2023 a = pop();
2024 b = _gvn.transform( new (C, 2) ConvI2DNode(a));
2025 push_pair(b);
2026 break;
2028 case Bytecodes::_iinc: // Increment local
2029 i = iter().get_index(); // Get local index
2030 set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2031 break;
2033 // Exit points of synchronized methods must have an unlock node
2034 case Bytecodes::_return:
2035 return_current(NULL);
2036 break;
2038 case Bytecodes::_ireturn:
2039 case Bytecodes::_areturn:
2040 case Bytecodes::_freturn:
2041 return_current(pop());
2042 break;
2043 case Bytecodes::_lreturn:
2044 return_current(pop_pair());
2045 break;
2046 case Bytecodes::_dreturn:
2047 return_current(pop_pair());
2048 break;
2050 case Bytecodes::_athrow:
2051 // null exception oop throws NULL pointer exception
2052 do_null_check(peek(), T_OBJECT);
2053 if (stopped()) return;
2054 if (JvmtiExport::can_post_exceptions()) {
2055 // "Full-speed throwing" is not necessary here,
2056 // since we're notifying the VM on every throw.
2057 uncommon_trap(Deoptimization::Reason_unhandled,
2058 Deoptimization::Action_none);
2059 return;
2060 }
2061 // Hook the thrown exception directly to subsequent handlers.
2062 if (BailoutToInterpreterForThrows) {
2063 // Keep method interpreted from now on.
2064 uncommon_trap(Deoptimization::Reason_unhandled,
2065 Deoptimization::Action_make_not_compilable);
2066 return;
2067 }
2068 add_exception_state(make_exception_state(peek()));
2069 break;
2071 case Bytecodes::_goto: // fall through
2072 case Bytecodes::_goto_w: {
2073 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2075 // If this is a backwards branch in the bytecodes, add Safepoint
2076 maybe_add_safepoint(target_bci);
2078 // Update method data
2079 profile_taken_branch(target_bci);
2081 // Merge the current control into the target basic block
2082 merge(target_bci);
2084 // See if we can get some profile data and hand it off to the next block
2085 Block *target_block = block()->successor_for_bci(target_bci);
2086 if (target_block->pred_count() != 1) break;
2087 ciMethodData* methodData = method()->method_data();
2088 if (!methodData->is_mature()) break;
2089 ciProfileData* data = methodData->bci_to_data(bci());
2090 assert( data->is_JumpData(), "" );
2091 int taken = ((ciJumpData*)data)->taken();
2092 taken = method()->scale_count(taken);
2093 target_block->set_count(taken);
2094 break;
2095 }
2097 case Bytecodes::_ifnull:
2098 do_ifnull(BoolTest::eq);
2099 break;
2100 case Bytecodes::_ifnonnull:
2101 do_ifnull(BoolTest::ne);
2102 break;
2104 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2105 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2106 handle_if_acmp:
2107 // If this is a backwards branch in the bytecodes, add Safepoint
2108 maybe_add_safepoint(iter().get_dest());
2109 a = pop();
2110 b = pop();
2111 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2112 do_if(btest, c);
2113 break;
2115 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2116 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2117 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2118 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2119 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2120 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2121 handle_ifxx:
2122 // If this is a backwards branch in the bytecodes, add Safepoint
2123 maybe_add_safepoint(iter().get_dest());
2124 a = _gvn.intcon(0);
2125 b = pop();
2126 c = _gvn.transform( new (C, 3) CmpINode(b, a) );
2127 do_if(btest, c);
2128 break;
2130 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2131 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2132 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2133 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2134 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2135 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2136 handle_if_icmp:
2137 // If this is a backwards branch in the bytecodes, add Safepoint
2138 maybe_add_safepoint(iter().get_dest());
2139 a = pop();
2140 b = pop();
2141 c = _gvn.transform( new (C, 3) CmpINode( b, a ) );
2142 do_if(btest, c);
2143 break;
2145 case Bytecodes::_tableswitch:
2146 do_tableswitch();
2147 break;
2149 case Bytecodes::_lookupswitch:
2150 do_lookupswitch();
2151 break;
2153 case Bytecodes::_invokestatic:
2154 case Bytecodes::_invokespecial:
2155 case Bytecodes::_invokevirtual:
2156 case Bytecodes::_invokeinterface:
2157 do_call();
2158 break;
2159 case Bytecodes::_checkcast:
2160 do_checkcast();
2161 break;
2162 case Bytecodes::_instanceof:
2163 do_instanceof();
2164 break;
2165 case Bytecodes::_anewarray:
2166 do_anewarray();
2167 break;
2168 case Bytecodes::_newarray:
2169 do_newarray((BasicType)iter().get_index());
2170 break;
2171 case Bytecodes::_multianewarray:
2172 do_multianewarray();
2173 break;
2174 case Bytecodes::_new:
2175 do_new();
2176 break;
2178 case Bytecodes::_jsr:
2179 case Bytecodes::_jsr_w:
2180 do_jsr();
2181 break;
2183 case Bytecodes::_ret:
2184 do_ret();
2185 break;
2188 case Bytecodes::_monitorenter:
2189 do_monitor_enter();
2190 break;
2192 case Bytecodes::_monitorexit:
2193 do_monitor_exit();
2194 break;
2196 case Bytecodes::_breakpoint:
2197 // Breakpoint set concurrently to compile
2198 // %%% use an uncommon trap?
2199 C->record_failure("breakpoint in method");
2200 return;
2202 default:
2203 #ifndef PRODUCT
2204 map()->dump(99);
2205 #endif
2206 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2207 ShouldNotReachHere();
2208 }
2210 #ifndef PRODUCT
2211 IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2212 if(printer) {
2213 char buffer[256];
2214 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2215 bool old = printer->traverse_outs();
2216 printer->set_traverse_outs(true);
2217 printer->print_method(C, buffer, 3);
2218 printer->set_traverse_outs(old);
2219 }
2220 #endif
2221 }