Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "interpreter/linkResolver.hpp"
31 #include "memory/universe.inline.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/divnode.hpp"
34 #include "opto/idealGraphPrinter.hpp"
35 #include "opto/matcher.hpp"
36 #include "opto/memnode.hpp"
37 #include "opto/mulnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/runtime.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/sharedRuntime.hpp"
43 extern int explicit_null_checks_inserted,
44 explicit_null_checks_elided;
46 //---------------------------------array_load----------------------------------
47 void Parse::array_load(BasicType elem_type) {
48 const Type* elem = Type::TOP;
49 Node* adr = array_addressing(elem_type, 0, &elem);
50 if (stopped()) return; // guaranteed null or range check
51 _sp -= 2; // Pop array and index
52 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
53 Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
54 push(ld);
55 }
58 //--------------------------------array_store----------------------------------
59 void Parse::array_store(BasicType elem_type) {
60 Node* adr = array_addressing(elem_type, 1);
61 if (stopped()) return; // guaranteed null or range check
62 Node* val = pop();
63 _sp -= 2; // Pop array and index
64 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
65 store_to_memory(control(), adr, val, elem_type, adr_type);
66 }
69 //------------------------------array_addressing-------------------------------
70 // Pull array and index from the stack. Compute pointer-to-element.
71 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
72 Node *idx = peek(0+vals); // Get from stack without popping
73 Node *ary = peek(1+vals); // in case of exception
75 // Null check the array base, with correct stack contents
76 ary = do_null_check(ary, T_ARRAY);
77 // Compile-time detect of null-exception?
78 if (stopped()) return top();
80 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
81 const TypeInt* sizetype = arytype->size();
82 const Type* elemtype = arytype->elem();
84 if (UseUniqueSubclasses && result2 != NULL) {
85 const Type* el = elemtype->make_ptr();
86 if (el && el->isa_instptr()) {
87 const TypeInstPtr* toop = el->is_instptr();
88 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
89 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
90 const Type* subklass = Type::get_const_type(toop->klass());
91 elemtype = subklass->join(el);
92 }
93 }
94 }
96 // Check for big class initializers with all constant offsets
97 // feeding into a known-size array.
98 const TypeInt* idxtype = _gvn.type(idx)->is_int();
99 // See if the highest idx value is less than the lowest array bound,
100 // and if the idx value cannot be negative:
101 bool need_range_check = true;
102 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
103 need_range_check = false;
104 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'");
105 }
107 if (!arytype->klass()->is_loaded()) {
108 // Only fails for some -Xcomp runs
109 // The class is unloaded. We have to run this bytecode in the interpreter.
110 uncommon_trap(Deoptimization::Reason_unloaded,
111 Deoptimization::Action_reinterpret,
112 arytype->klass(), "!loaded array");
113 return top();
114 }
116 // Do the range check
117 if (GenerateRangeChecks && need_range_check) {
118 Node* tst;
119 if (sizetype->_hi <= 0) {
120 // The greatest array bound is negative, so we can conclude that we're
121 // compiling unreachable code, but the unsigned compare trick used below
122 // only works with non-negative lengths. Instead, hack "tst" to be zero so
123 // the uncommon_trap path will always be taken.
124 tst = _gvn.intcon(0);
125 } else {
126 // Range is constant in array-oop, so we can use the original state of mem
127 Node* len = load_array_length(ary);
129 // Test length vs index (standard trick using unsigned compare)
130 Node* chk = _gvn.transform( new (C, 3) CmpUNode(idx, len) );
131 BoolTest::mask btest = BoolTest::lt;
132 tst = _gvn.transform( new (C, 2) BoolNode(chk, btest) );
133 }
134 // Branch to failure if out of bounds
135 { BuildCutout unless(this, tst, PROB_MAX);
136 if (C->allow_range_check_smearing()) {
137 // Do not use builtin_throw, since range checks are sometimes
138 // made more stringent by an optimistic transformation.
139 // This creates "tentative" range checks at this point,
140 // which are not guaranteed to throw exceptions.
141 // See IfNode::Ideal, is_range_check, adjust_check.
142 uncommon_trap(Deoptimization::Reason_range_check,
143 Deoptimization::Action_make_not_entrant,
144 NULL, "range_check");
145 } else {
146 // If we have already recompiled with the range-check-widening
147 // heroic optimization turned off, then we must really be throwing
148 // range check exceptions.
149 builtin_throw(Deoptimization::Reason_range_check, idx);
150 }
151 }
152 }
153 // Check for always knowing you are throwing a range-check exception
154 if (stopped()) return top();
156 Node* ptr = array_element_address(ary, idx, type, sizetype);
158 if (result2 != NULL) *result2 = elemtype;
160 assert(ptr != top(), "top should go hand-in-hand with stopped");
162 return ptr;
163 }
166 // returns IfNode
167 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
168 Node *cmp = _gvn.transform( new (C, 3) CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
169 Node *tst = _gvn.transform( new (C, 2) BoolNode( cmp, mask));
170 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
171 return iff;
172 }
174 // return Region node
175 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) {
176 Node *region = new (C, 3) RegionNode(3); // 2 results
177 record_for_igvn(region);
178 region->init_req(1, iffalse);
179 region->init_req(2, iftrue );
180 _gvn.set_type(region, Type::CONTROL);
181 region = _gvn.transform(region);
182 set_control (region);
183 return region;
184 }
187 //------------------------------helper for tableswitch-------------------------
188 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
189 // True branch, use existing map info
190 { PreserveJVMState pjvms(this);
191 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
192 set_control( iftrue );
193 profile_switch_case(prof_table_index);
194 merge_new_path(dest_bci_if_true);
195 }
197 // False branch
198 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
199 set_control( iffalse );
200 }
202 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
203 // True branch, use existing map info
204 { PreserveJVMState pjvms(this);
205 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode (iff) );
206 set_control( iffalse );
207 profile_switch_case(prof_table_index);
208 merge_new_path(dest_bci_if_true);
209 }
211 // False branch
212 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff) );
213 set_control( iftrue );
214 }
216 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
217 // False branch, use existing map and control()
218 profile_switch_case(prof_table_index);
219 merge_new_path(dest_bci);
220 }
223 extern "C" {
224 static int jint_cmp(const void *i, const void *j) {
225 int a = *(jint *)i;
226 int b = *(jint *)j;
227 return a > b ? 1 : a < b ? -1 : 0;
228 }
229 }
232 // Default value for methodData switch indexing. Must be a negative value to avoid
233 // conflict with any legal switch index.
234 #define NullTableIndex -1
236 class SwitchRange : public StackObj {
237 // a range of integers coupled with a bci destination
238 jint _lo; // inclusive lower limit
239 jint _hi; // inclusive upper limit
240 int _dest;
241 int _table_index; // index into method data table
243 public:
244 jint lo() const { return _lo; }
245 jint hi() const { return _hi; }
246 int dest() const { return _dest; }
247 int table_index() const { return _table_index; }
248 bool is_singleton() const { return _lo == _hi; }
250 void setRange(jint lo, jint hi, int dest, int table_index) {
251 assert(lo <= hi, "must be a non-empty range");
252 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
253 }
254 bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
255 assert(lo <= hi, "must be a non-empty range");
256 if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
257 _hi = hi;
258 return true;
259 }
260 return false;
261 }
263 void set (jint value, int dest, int table_index) {
264 setRange(value, value, dest, table_index);
265 }
266 bool adjoin(jint value, int dest, int table_index) {
267 return adjoinRange(value, value, dest, table_index);
268 }
270 void print(ciEnv* env) {
271 if (is_singleton())
272 tty->print(" {%d}=>%d", lo(), dest());
273 else if (lo() == min_jint)
274 tty->print(" {..%d}=>%d", hi(), dest());
275 else if (hi() == max_jint)
276 tty->print(" {%d..}=>%d", lo(), dest());
277 else
278 tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
279 }
280 };
283 //-------------------------------do_tableswitch--------------------------------
284 void Parse::do_tableswitch() {
285 Node* lookup = pop();
287 // Get information about tableswitch
288 int default_dest = iter().get_dest_table(0);
289 int lo_index = iter().get_int_table(1);
290 int hi_index = iter().get_int_table(2);
291 int len = hi_index - lo_index + 1;
293 if (len < 1) {
294 // If this is a backward branch, add safepoint
295 maybe_add_safepoint(default_dest);
296 if (should_add_predicate(default_dest)){
297 _sp += 1; // set original stack for use by uncommon_trap
298 add_predicate();
299 _sp -= 1;
300 }
301 merge(default_dest);
302 return;
303 }
305 // generate decision tree, using trichotomy when possible
306 int rnum = len+2;
307 bool makes_backward_branch = false;
308 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
309 int rp = -1;
310 if (lo_index != min_jint) {
311 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
312 }
313 for (int j = 0; j < len; j++) {
314 jint match_int = lo_index+j;
315 int dest = iter().get_dest_table(j+3);
316 makes_backward_branch |= (dest <= bci());
317 int table_index = method_data_update() ? j : NullTableIndex;
318 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
319 ranges[++rp].set(match_int, dest, table_index);
320 }
321 }
322 jint highest = lo_index+(len-1);
323 assert(ranges[rp].hi() == highest, "");
324 if (highest != max_jint
325 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
326 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
327 }
328 assert(rp < len+2, "not too many ranges");
330 // Safepoint in case if backward branch observed
331 if( makes_backward_branch && UseLoopSafepoints )
332 add_safepoint();
334 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
335 }
338 //------------------------------do_lookupswitch--------------------------------
339 void Parse::do_lookupswitch() {
340 Node *lookup = pop(); // lookup value
341 // Get information about lookupswitch
342 int default_dest = iter().get_dest_table(0);
343 int len = iter().get_int_table(1);
345 if (len < 1) { // If this is a backward branch, add safepoint
346 maybe_add_safepoint(default_dest);
347 if (should_add_predicate(default_dest)){
348 _sp += 1; // set original stack for use by uncommon_trap
349 add_predicate();
350 _sp -= 1;
351 }
352 merge(default_dest);
353 return;
354 }
356 // generate decision tree, using trichotomy when possible
357 jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
358 {
359 for( int j = 0; j < len; j++ ) {
360 table[j+j+0] = iter().get_int_table(2+j+j);
361 table[j+j+1] = iter().get_dest_table(2+j+j+1);
362 }
363 qsort( table, len, 2*sizeof(table[0]), jint_cmp );
364 }
366 int rnum = len*2+1;
367 bool makes_backward_branch = false;
368 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
369 int rp = -1;
370 for( int j = 0; j < len; j++ ) {
371 jint match_int = table[j+j+0];
372 int dest = table[j+j+1];
373 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1;
374 int table_index = method_data_update() ? j : NullTableIndex;
375 makes_backward_branch |= (dest <= bci());
376 if( match_int != next_lo ) {
377 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
378 }
379 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
380 ranges[++rp].set(match_int, dest, table_index);
381 }
382 }
383 jint highest = table[2*(len-1)];
384 assert(ranges[rp].hi() == highest, "");
385 if( highest != max_jint
386 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
387 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
388 }
389 assert(rp < rnum, "not too many ranges");
391 // Safepoint in case backward branch observed
392 if( makes_backward_branch && UseLoopSafepoints )
393 add_safepoint();
395 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
396 }
398 //----------------------------create_jump_tables-------------------------------
399 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
400 // Are jumptables enabled
401 if (!UseJumpTables) return false;
403 // Are jumptables supported
404 if (!Matcher::has_match_rule(Op_Jump)) return false;
406 // Don't make jump table if profiling
407 if (method_data_update()) return false;
409 // Decide if a guard is needed to lop off big ranges at either (or
410 // both) end(s) of the input set. We'll call this the default target
411 // even though we can't be sure that it is the true "default".
413 bool needs_guard = false;
414 int default_dest;
415 int64 total_outlier_size = 0;
416 int64 hi_size = ((int64)hi->hi()) - ((int64)hi->lo()) + 1;
417 int64 lo_size = ((int64)lo->hi()) - ((int64)lo->lo()) + 1;
419 if (lo->dest() == hi->dest()) {
420 total_outlier_size = hi_size + lo_size;
421 default_dest = lo->dest();
422 } else if (lo_size > hi_size) {
423 total_outlier_size = lo_size;
424 default_dest = lo->dest();
425 } else {
426 total_outlier_size = hi_size;
427 default_dest = hi->dest();
428 }
430 // If a guard test will eliminate very sparse end ranges, then
431 // it is worth the cost of an extra jump.
432 if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
433 needs_guard = true;
434 if (default_dest == lo->dest()) lo++;
435 if (default_dest == hi->dest()) hi--;
436 }
438 // Find the total number of cases and ranges
439 int64 num_cases = ((int64)hi->hi()) - ((int64)lo->lo()) + 1;
440 int num_range = hi - lo + 1;
442 // Don't create table if: too large, too small, or too sparse.
443 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
444 return false;
445 if (num_cases > (MaxJumpTableSparseness * num_range))
446 return false;
448 // Normalize table lookups to zero
449 int lowval = lo->lo();
450 key_val = _gvn.transform( new (C, 3) SubINode(key_val, _gvn.intcon(lowval)) );
452 // Generate a guard to protect against input keyvals that aren't
453 // in the switch domain.
454 if (needs_guard) {
455 Node* size = _gvn.intcon(num_cases);
456 Node* cmp = _gvn.transform( new (C, 3) CmpUNode(key_val, size) );
457 Node* tst = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ge) );
458 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
459 jump_if_true_fork(iff, default_dest, NullTableIndex);
460 }
462 // Create an ideal node JumpTable that has projections
463 // of all possible ranges for a switch statement
464 // The key_val input must be converted to a pointer offset and scaled.
465 // Compare Parse::array_addressing above.
466 #ifdef _LP64
467 // Clean the 32-bit int into a real 64-bit offset.
468 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
469 const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
470 key_val = _gvn.transform( new (C, 2) ConvI2LNode(key_val, lkeytype) );
471 #endif
472 // Shift the value by wordsize so we have an index into the table, rather
473 // than a switch value
474 Node *shiftWord = _gvn.MakeConX(wordSize);
475 key_val = _gvn.transform( new (C, 3) MulXNode( key_val, shiftWord));
477 // Create the JumpNode
478 Node* jtn = _gvn.transform( new (C, 2) JumpNode(control(), key_val, num_cases) );
480 // These are the switch destinations hanging off the jumpnode
481 int i = 0;
482 for (SwitchRange* r = lo; r <= hi; r++) {
483 for (int j = r->lo(); j <= r->hi(); j++, i++) {
484 Node* input = _gvn.transform(new (C, 1) JumpProjNode(jtn, i, r->dest(), j - lowval));
485 {
486 PreserveJVMState pjvms(this);
487 set_control(input);
488 jump_if_always_fork(r->dest(), r->table_index());
489 }
490 }
491 }
492 assert(i == num_cases, "miscount of cases");
493 stop_and_kill_map(); // no more uses for this JVMS
494 return true;
495 }
497 //----------------------------jump_switch_ranges-------------------------------
498 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
499 Block* switch_block = block();
501 if (switch_depth == 0) {
502 // Do special processing for the top-level call.
503 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
504 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
506 // Decrement pred-numbers for the unique set of nodes.
507 #ifdef ASSERT
508 // Ensure that the block's successors are a (duplicate-free) set.
509 int successors_counted = 0; // block occurrences in [hi..lo]
510 int unique_successors = switch_block->num_successors();
511 for (int i = 0; i < unique_successors; i++) {
512 Block* target = switch_block->successor_at(i);
514 // Check that the set of successors is the same in both places.
515 int successors_found = 0;
516 for (SwitchRange* p = lo; p <= hi; p++) {
517 if (p->dest() == target->start()) successors_found++;
518 }
519 assert(successors_found > 0, "successor must be known");
520 successors_counted += successors_found;
521 }
522 assert(successors_counted == (hi-lo)+1, "no unexpected successors");
523 #endif
525 // Maybe prune the inputs, based on the type of key_val.
526 jint min_val = min_jint;
527 jint max_val = max_jint;
528 const TypeInt* ti = key_val->bottom_type()->isa_int();
529 if (ti != NULL) {
530 min_val = ti->_lo;
531 max_val = ti->_hi;
532 assert(min_val <= max_val, "invalid int type");
533 }
534 while (lo->hi() < min_val) lo++;
535 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
536 while (hi->lo() > max_val) hi--;
537 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
538 }
540 #ifndef PRODUCT
541 if (switch_depth == 0) {
542 _max_switch_depth = 0;
543 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1;
544 }
545 #endif
547 assert(lo <= hi, "must be a non-empty set of ranges");
548 if (lo == hi) {
549 jump_if_always_fork(lo->dest(), lo->table_index());
550 } else {
551 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
552 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
554 if (create_jump_tables(key_val, lo, hi)) return;
556 int nr = hi - lo + 1;
558 SwitchRange* mid = lo + nr/2;
559 // if there is an easy choice, pivot at a singleton:
560 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--;
562 assert(lo < mid && mid <= hi, "good pivot choice");
563 assert(nr != 2 || mid == hi, "should pick higher of 2");
564 assert(nr != 3 || mid == hi-1, "should pick middle of 3");
566 Node *test_val = _gvn.intcon(mid->lo());
568 if (mid->is_singleton()) {
569 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
570 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
572 // Special Case: If there are exactly three ranges, and the high
573 // and low range each go to the same place, omit the "gt" test,
574 // since it will not discriminate anything.
575 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
576 if (eq_test_only) {
577 assert(mid == hi-1, "");
578 }
580 // if there is a higher range, test for it and process it:
581 if (mid < hi && !eq_test_only) {
582 // two comparisons of same values--should enable 1 test for 2 branches
583 // Use BoolTest::le instead of BoolTest::gt
584 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le);
585 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_le) );
586 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_le) );
587 { PreserveJVMState pjvms(this);
588 set_control(iffalse);
589 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
590 }
591 set_control(iftrue);
592 }
594 } else {
595 // mid is a range, not a singleton, so treat mid..hi as a unit
596 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
598 // if there is a higher range, test for it and process it:
599 if (mid == hi) {
600 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
601 } else {
602 Node *iftrue = _gvn.transform( new (C, 1) IfTrueNode(iff_ge) );
603 Node *iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff_ge) );
604 { PreserveJVMState pjvms(this);
605 set_control(iftrue);
606 jump_switch_ranges(key_val, mid, hi, switch_depth+1);
607 }
608 set_control(iffalse);
609 }
610 }
612 // in any case, process the lower range
613 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
614 }
616 // Decrease pred_count for each successor after all is done.
617 if (switch_depth == 0) {
618 int unique_successors = switch_block->num_successors();
619 for (int i = 0; i < unique_successors; i++) {
620 Block* target = switch_block->successor_at(i);
621 // Throw away the pre-allocated path for each unique successor.
622 target->next_path_num();
623 }
624 }
626 #ifndef PRODUCT
627 _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
628 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
629 SwitchRange* r;
630 int nsing = 0;
631 for( r = lo; r <= hi; r++ ) {
632 if( r->is_singleton() ) nsing++;
633 }
634 tty->print(">>> ");
635 _method->print_short_name();
636 tty->print_cr(" switch decision tree");
637 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d",
638 hi-lo+1, nsing, _max_switch_depth, _est_switch_depth);
639 if (_max_switch_depth > _est_switch_depth) {
640 tty->print_cr("******** BAD SWITCH DEPTH ********");
641 }
642 tty->print(" ");
643 for( r = lo; r <= hi; r++ ) {
644 r->print(env());
645 }
646 tty->print_cr("");
647 }
648 #endif
649 }
651 void Parse::modf() {
652 Node *f2 = pop();
653 Node *f1 = pop();
654 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(),
655 CAST_FROM_FN_PTR(address, SharedRuntime::frem),
656 "frem", NULL, //no memory effects
657 f1, f2);
658 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
660 push(res);
661 }
663 void Parse::modd() {
664 Node *d2 = pop_pair();
665 Node *d1 = pop_pair();
666 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(),
667 CAST_FROM_FN_PTR(address, SharedRuntime::drem),
668 "drem", NULL, //no memory effects
669 d1, top(), d2, top());
670 Node* res_d = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
672 #ifdef ASSERT
673 Node* res_top = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 1));
674 assert(res_top == top(), "second value must be top");
675 #endif
677 push_pair(res_d);
678 }
680 void Parse::l2f() {
681 Node* f2 = pop();
682 Node* f1 = pop();
683 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
684 CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
685 "l2f", NULL, //no memory effects
686 f1, f2);
687 Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms + 0));
689 push(res);
690 }
692 void Parse::do_irem() {
693 // Must keep both values on the expression-stack during null-check
694 do_null_check(peek(), T_INT);
695 // Compile-time detect of null-exception?
696 if (stopped()) return;
698 Node* b = pop();
699 Node* a = pop();
701 const Type *t = _gvn.type(b);
702 if (t != Type::TOP) {
703 const TypeInt *ti = t->is_int();
704 if (ti->is_con()) {
705 int divisor = ti->get_con();
706 // check for positive power of 2
707 if (divisor > 0 &&
708 (divisor & ~(divisor-1)) == divisor) {
709 // yes !
710 Node *mask = _gvn.intcon((divisor - 1));
711 // Sigh, must handle negative dividends
712 Node *zero = _gvn.intcon(0);
713 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
714 Node *iff = _gvn.transform( new (C, 1) IfFalseNode(ifff) );
715 Node *ift = _gvn.transform( new (C, 1) IfTrueNode (ifff) );
716 Node *reg = jump_if_join(ift, iff);
717 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT);
718 // Negative path; negate/and/negate
719 Node *neg = _gvn.transform( new (C, 3) SubINode(zero, a) );
720 Node *andn= _gvn.transform( new (C, 3) AndINode(neg, mask) );
721 Node *negn= _gvn.transform( new (C, 3) SubINode(zero, andn) );
722 phi->init_req(1, negn);
723 // Fast positive case
724 Node *andx = _gvn.transform( new (C, 3) AndINode(a, mask) );
725 phi->init_req(2, andx);
726 // Push the merge
727 push( _gvn.transform(phi) );
728 return;
729 }
730 }
731 }
732 // Default case
733 push( _gvn.transform( new (C, 3) ModINode(control(),a,b) ) );
734 }
736 // Handle jsr and jsr_w bytecode
737 void Parse::do_jsr() {
738 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
740 // Store information about current state, tagged with new _jsr_bci
741 int return_bci = iter().next_bci();
742 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
744 // Update method data
745 profile_taken_branch(jsr_bci);
747 // The way we do things now, there is only one successor block
748 // for the jsr, because the target code is cloned by ciTypeFlow.
749 Block* target = successor_for_bci(jsr_bci);
751 // What got pushed?
752 const Type* ret_addr = target->peek();
753 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
755 // Effect on jsr on stack
756 push(_gvn.makecon(ret_addr));
758 // Flow to the jsr.
759 if (should_add_predicate(jsr_bci)){
760 add_predicate();
761 }
762 merge(jsr_bci);
763 }
765 // Handle ret bytecode
766 void Parse::do_ret() {
767 // Find to whom we return.
768 #if 0 // %%%% MAKE THIS WORK
769 Node* con = local();
770 const TypePtr* tp = con->bottom_type()->isa_ptr();
771 assert(tp && tp->singleton(), "");
772 int return_bci = (int) tp->get_con();
773 merge(return_bci);
774 #else
775 assert(block()->num_successors() == 1, "a ret can only go one place now");
776 Block* target = block()->successor_at(0);
777 assert(!target->is_ready(), "our arrival must be expected");
778 profile_ret(target->flow()->start());
779 int pnum = target->next_path_num();
780 merge_common(target, pnum);
781 #endif
782 }
784 //--------------------------dynamic_branch_prediction--------------------------
785 // Try to gather dynamic branch prediction behavior. Return a probability
786 // of the branch being taken and set the "cnt" field. Returns a -1.0
787 // if we need to use static prediction for some reason.
788 float Parse::dynamic_branch_prediction(float &cnt) {
789 ResourceMark rm;
791 cnt = COUNT_UNKNOWN;
793 // Use MethodData information if it is available
794 // FIXME: free the ProfileData structure
795 ciMethodData* methodData = method()->method_data();
796 if (!methodData->is_mature()) return PROB_UNKNOWN;
797 ciProfileData* data = methodData->bci_to_data(bci());
798 if (!data->is_JumpData()) return PROB_UNKNOWN;
800 // get taken and not taken values
801 int taken = data->as_JumpData()->taken();
802 int not_taken = 0;
803 if (data->is_BranchData()) {
804 not_taken = data->as_BranchData()->not_taken();
805 }
807 // scale the counts to be commensurate with invocation counts:
808 taken = method()->scale_count(taken);
809 not_taken = method()->scale_count(not_taken);
811 // Give up if too few counts to be meaningful
812 if (taken + not_taken < 40) {
813 if (C->log() != NULL) {
814 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
815 }
816 return PROB_UNKNOWN;
817 }
819 // Compute frequency that we arrive here
820 int sum = taken + not_taken;
821 // Adjust, if this block is a cloned private block but the
822 // Jump counts are shared. Taken the private counts for
823 // just this path instead of the shared counts.
824 if( block()->count() > 0 )
825 sum = block()->count();
826 cnt = (float)sum / (float)FreqCountInvocations;
828 // Pin probability to sane limits
829 float prob;
830 if( !taken )
831 prob = (0+PROB_MIN) / 2;
832 else if( !not_taken )
833 prob = (1+PROB_MAX) / 2;
834 else { // Compute probability of true path
835 prob = (float)taken / (float)(taken + not_taken);
836 if (prob > PROB_MAX) prob = PROB_MAX;
837 if (prob < PROB_MIN) prob = PROB_MIN;
838 }
840 assert((cnt > 0.0f) && (prob > 0.0f),
841 "Bad frequency assignment in if");
843 if (C->log() != NULL) {
844 const char* prob_str = NULL;
845 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always";
846 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never";
847 char prob_str_buf[30];
848 if (prob_str == NULL) {
849 sprintf(prob_str_buf, "%g", prob);
850 prob_str = prob_str_buf;
851 }
852 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
853 iter().get_dest(), taken, not_taken, cnt, prob_str);
854 }
855 return prob;
856 }
858 //-----------------------------branch_prediction-------------------------------
859 float Parse::branch_prediction(float& cnt,
860 BoolTest::mask btest,
861 int target_bci) {
862 float prob = dynamic_branch_prediction(cnt);
863 // If prob is unknown, switch to static prediction
864 if (prob != PROB_UNKNOWN) return prob;
866 prob = PROB_FAIR; // Set default value
867 if (btest == BoolTest::eq) // Exactly equal test?
868 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent
869 else if (btest == BoolTest::ne)
870 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent
872 // If this is a conditional test guarding a backwards branch,
873 // assume its a loop-back edge. Make it a likely taken branch.
874 if (target_bci < bci()) {
875 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt
876 // Since it's an OSR, we probably have profile data, but since
877 // branch_prediction returned PROB_UNKNOWN, the counts are too small.
878 // Let's make a special check here for completely zero counts.
879 ciMethodData* methodData = method()->method_data();
880 if (!methodData->is_empty()) {
881 ciProfileData* data = methodData->bci_to_data(bci());
882 // Only stop for truly zero counts, which mean an unknown part
883 // of the OSR-ed method, and we want to deopt to gather more stats.
884 // If you have ANY counts, then this loop is simply 'cold' relative
885 // to the OSR loop.
886 if (data->as_BranchData()->taken() +
887 data->as_BranchData()->not_taken() == 0 ) {
888 // This is the only way to return PROB_UNKNOWN:
889 return PROB_UNKNOWN;
890 }
891 }
892 }
893 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch
894 }
896 assert(prob != PROB_UNKNOWN, "must have some guess at this point");
897 return prob;
898 }
900 // The magic constants are chosen so as to match the output of
901 // branch_prediction() when the profile reports a zero taken count.
902 // It is important to distinguish zero counts unambiguously, because
903 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
904 // very small but nonzero probabilities, which if confused with zero
905 // counts would keep the program recompiling indefinitely.
906 bool Parse::seems_never_taken(float prob) {
907 return prob < PROB_MIN;
908 }
910 // True if the comparison seems to be the kind that will not change its
911 // statistics from true to false. See comments in adjust_map_after_if.
912 // This question is only asked along paths which are already
913 // classifed as untaken (by seems_never_taken), so really,
914 // if a path is never taken, its controlling comparison is
915 // already acting in a stable fashion. If the comparison
916 // seems stable, we will put an expensive uncommon trap
917 // on the untaken path. To be conservative, and to allow
918 // partially executed counted loops to be compiled fully,
919 // we will plant uncommon traps only after pointer comparisons.
920 bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) {
921 for (int depth = 4; depth > 0; depth--) {
922 // The following switch can find CmpP here over half the time for
923 // dynamic language code rich with type tests.
924 // Code using counted loops or array manipulations (typical
925 // of benchmarks) will have many (>80%) CmpI instructions.
926 switch (cmp->Opcode()) {
927 case Op_CmpP:
928 // A never-taken null check looks like CmpP/BoolTest::eq.
929 // These certainly should be closed off as uncommon traps.
930 if (btest == BoolTest::eq)
931 return true;
932 // A never-failed type check looks like CmpP/BoolTest::ne.
933 // Let's put traps on those, too, so that we don't have to compile
934 // unused paths with indeterminate dynamic type information.
935 if (ProfileDynamicTypes)
936 return true;
937 return false;
939 case Op_CmpI:
940 // A small minority (< 10%) of CmpP are masked as CmpI,
941 // as if by boolean conversion ((p == q? 1: 0) != 0).
942 // Detect that here, even if it hasn't optimized away yet.
943 // Specifically, this covers the 'instanceof' operator.
944 if (btest == BoolTest::ne || btest == BoolTest::eq) {
945 if (_gvn.type(cmp->in(2))->singleton() &&
946 cmp->in(1)->is_Phi()) {
947 PhiNode* phi = cmp->in(1)->as_Phi();
948 int true_path = phi->is_diamond_phi();
949 if (true_path > 0 &&
950 _gvn.type(phi->in(1))->singleton() &&
951 _gvn.type(phi->in(2))->singleton()) {
952 // phi->region->if_proj->ifnode->bool->cmp
953 BoolNode* bol = phi->in(0)->in(1)->in(0)->in(1)->as_Bool();
954 btest = bol->_test._test;
955 cmp = bol->in(1);
956 continue;
957 }
958 }
959 }
960 return false;
961 }
962 }
963 return false;
964 }
966 //-------------------------------repush_if_args--------------------------------
967 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
968 inline int Parse::repush_if_args() {
969 #ifndef PRODUCT
970 if (PrintOpto && WizardMode) {
971 tty->print("defending against excessive implicit null exceptions on %s @%d in ",
972 Bytecodes::name(iter().cur_bc()), iter().cur_bci());
973 method()->print_name(); tty->cr();
974 }
975 #endif
976 int bc_depth = - Bytecodes::depth(iter().cur_bc());
977 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
978 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms
979 assert(argument(0) != NULL, "must exist");
980 assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
981 _sp += bc_depth;
982 return bc_depth;
983 }
985 //----------------------------------do_ifnull----------------------------------
986 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
987 int target_bci = iter().get_dest();
989 Block* branch_block = successor_for_bci(target_bci);
990 Block* next_block = successor_for_bci(iter().next_bci());
992 float cnt;
993 float prob = branch_prediction(cnt, btest, target_bci);
994 if (prob == PROB_UNKNOWN) {
995 // (An earlier version of do_ifnull omitted this trap for OSR methods.)
996 #ifndef PRODUCT
997 if (PrintOpto && Verbose)
998 tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
999 #endif
1000 repush_if_args(); // to gather stats on loop
1001 // We need to mark this branch as taken so that if we recompile we will
1002 // see that it is possible. In the tiered system the interpreter doesn't
1003 // do profiling and by the time we get to the lower tier from the interpreter
1004 // the path may be cold again. Make sure it doesn't look untaken
1005 profile_taken_branch(target_bci, !ProfileInterpreter);
1006 uncommon_trap(Deoptimization::Reason_unreached,
1007 Deoptimization::Action_reinterpret,
1008 NULL, "cold");
1009 if (EliminateAutoBox) {
1010 // Mark the successor blocks as parsed
1011 branch_block->next_path_num();
1012 next_block->next_path_num();
1013 }
1014 return;
1015 }
1017 explicit_null_checks_inserted++;
1019 // Generate real control flow
1020 Node *tst = _gvn.transform( new (C, 2) BoolNode( c, btest ) );
1022 // Sanity check the probability value
1023 assert(prob > 0.0f,"Bad probability in Parser");
1024 // Need xform to put node in hash table
1025 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1026 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1027 // True branch
1028 { PreserveJVMState pjvms(this);
1029 Node* iftrue = _gvn.transform( new (C, 1) IfTrueNode (iff) );
1030 set_control(iftrue);
1032 if (stopped()) { // Path is dead?
1033 explicit_null_checks_elided++;
1034 if (EliminateAutoBox) {
1035 // Mark the successor block as parsed
1036 branch_block->next_path_num();
1037 }
1038 } else { // Path is live.
1039 // Update method data
1040 profile_taken_branch(target_bci);
1041 adjust_map_after_if(btest, c, prob, branch_block, next_block);
1042 if (!stopped()) {
1043 if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
1044 int nargs = repush_if_args(); // set original stack for uncommon_trap
1045 add_predicate();
1046 _sp -= nargs;
1047 }
1048 merge(target_bci);
1049 }
1050 }
1051 }
1053 // False branch
1054 Node* iffalse = _gvn.transform( new (C, 1) IfFalseNode(iff) );
1055 set_control(iffalse);
1057 if (stopped()) { // Path is dead?
1058 explicit_null_checks_elided++;
1059 if (EliminateAutoBox) {
1060 // Mark the successor block as parsed
1061 next_block->next_path_num();
1062 }
1063 } else { // Path is live.
1064 // Update method data
1065 profile_not_taken_branch();
1066 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob,
1067 next_block, branch_block);
1068 }
1069 }
1071 //------------------------------------do_if------------------------------------
1072 void Parse::do_if(BoolTest::mask btest, Node* c) {
1073 int target_bci = iter().get_dest();
1075 Block* branch_block = successor_for_bci(target_bci);
1076 Block* next_block = successor_for_bci(iter().next_bci());
1078 float cnt;
1079 float prob = branch_prediction(cnt, btest, target_bci);
1080 float untaken_prob = 1.0 - prob;
1082 if (prob == PROB_UNKNOWN) {
1083 #ifndef PRODUCT
1084 if (PrintOpto && Verbose)
1085 tty->print_cr("Never-taken edge stops compilation at bci %d",bci());
1086 #endif
1087 repush_if_args(); // to gather stats on loop
1088 // We need to mark this branch as taken so that if we recompile we will
1089 // see that it is possible. In the tiered system the interpreter doesn't
1090 // do profiling and by the time we get to the lower tier from the interpreter
1091 // the path may be cold again. Make sure it doesn't look untaken
1092 profile_taken_branch(target_bci, !ProfileInterpreter);
1093 uncommon_trap(Deoptimization::Reason_unreached,
1094 Deoptimization::Action_reinterpret,
1095 NULL, "cold");
1096 if (EliminateAutoBox) {
1097 // Mark the successor blocks as parsed
1098 branch_block->next_path_num();
1099 next_block->next_path_num();
1100 }
1101 return;
1102 }
1104 // Sanity check the probability value
1105 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1107 bool taken_if_true = true;
1108 // Convert BoolTest to canonical form:
1109 if (!BoolTest(btest).is_canonical()) {
1110 btest = BoolTest(btest).negate();
1111 taken_if_true = false;
1112 // prob is NOT updated here; it remains the probability of the taken
1113 // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1114 }
1115 assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1117 Node* tst0 = new (C, 2) BoolNode(c, btest);
1118 Node* tst = _gvn.transform(tst0);
1119 BoolTest::mask taken_btest = BoolTest::illegal;
1120 BoolTest::mask untaken_btest = BoolTest::illegal;
1122 if (tst->is_Bool()) {
1123 // Refresh c from the transformed bool node, since it may be
1124 // simpler than the original c. Also re-canonicalize btest.
1125 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
1126 // That can arise from statements like: if (x instanceof C) ...
1127 if (tst != tst0) {
1128 // Canonicalize one more time since transform can change it.
1129 btest = tst->as_Bool()->_test._test;
1130 if (!BoolTest(btest).is_canonical()) {
1131 // Reverse edges one more time...
1132 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1133 btest = tst->as_Bool()->_test._test;
1134 assert(BoolTest(btest).is_canonical(), "sanity");
1135 taken_if_true = !taken_if_true;
1136 }
1137 c = tst->in(1);
1138 }
1139 BoolTest::mask neg_btest = BoolTest(btest).negate();
1140 taken_btest = taken_if_true ? btest : neg_btest;
1141 untaken_btest = taken_if_true ? neg_btest : btest;
1142 }
1144 // Generate real control flow
1145 float true_prob = (taken_if_true ? prob : untaken_prob);
1146 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1147 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1148 Node* taken_branch = new (C, 1) IfTrueNode(iff);
1149 Node* untaken_branch = new (C, 1) IfFalseNode(iff);
1150 if (!taken_if_true) { // Finish conversion to canonical form
1151 Node* tmp = taken_branch;
1152 taken_branch = untaken_branch;
1153 untaken_branch = tmp;
1154 }
1156 // Branch is taken:
1157 { PreserveJVMState pjvms(this);
1158 taken_branch = _gvn.transform(taken_branch);
1159 set_control(taken_branch);
1161 if (stopped()) {
1162 if (EliminateAutoBox) {
1163 // Mark the successor block as parsed
1164 branch_block->next_path_num();
1165 }
1166 } else {
1167 // Update method data
1168 profile_taken_branch(target_bci);
1169 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
1170 if (!stopped()) {
1171 if (should_add_predicate(target_bci)){ // add a predicate if it branches to a loop
1172 int nargs = repush_if_args(); // set original stack for the uncommon_trap
1173 add_predicate();
1174 _sp -= nargs;
1175 }
1176 merge(target_bci);
1177 }
1178 }
1179 }
1181 untaken_branch = _gvn.transform(untaken_branch);
1182 set_control(untaken_branch);
1184 // Branch not taken.
1185 if (stopped()) {
1186 if (EliminateAutoBox) {
1187 // Mark the successor block as parsed
1188 next_block->next_path_num();
1189 }
1190 } else {
1191 // Update method data
1192 profile_not_taken_branch();
1193 adjust_map_after_if(untaken_btest, c, untaken_prob,
1194 next_block, branch_block);
1195 }
1196 }
1198 //----------------------------adjust_map_after_if------------------------------
1199 // Adjust the JVM state to reflect the result of taking this path.
1200 // Basically, it means inspecting the CmpNode controlling this
1201 // branch, seeing how it constrains a tested value, and then
1202 // deciding if it's worth our while to encode this constraint
1203 // as graph nodes in the current abstract interpretation map.
1204 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
1205 Block* path, Block* other_path) {
1206 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal)
1207 return; // nothing to do
1209 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1211 if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) {
1212 // If this might possibly turn into an implicit null check,
1213 // and the null has never yet been seen, we need to generate
1214 // an uncommon trap, so as to recompile instead of suffering
1215 // with very slow branches. (We'll get the slow branches if
1216 // the program ever changes phase and starts seeing nulls here.)
1217 //
1218 // We do not inspect for a null constant, since a node may
1219 // optimize to 'null' later on.
1220 //
1221 // Null checks, and other tests which expect inequality,
1222 // show btest == BoolTest::eq along the non-taken branch.
1223 // On the other hand, type tests, must-be-null tests,
1224 // and other tests which expect pointer equality,
1225 // show btest == BoolTest::ne along the non-taken branch.
1226 // We prune both types of branches if they look unused.
1227 repush_if_args();
1228 // We need to mark this branch as taken so that if we recompile we will
1229 // see that it is possible. In the tiered system the interpreter doesn't
1230 // do profiling and by the time we get to the lower tier from the interpreter
1231 // the path may be cold again. Make sure it doesn't look untaken
1232 if (is_fallthrough) {
1233 profile_not_taken_branch(!ProfileInterpreter);
1234 } else {
1235 profile_taken_branch(iter().get_dest(), !ProfileInterpreter);
1236 }
1237 uncommon_trap(Deoptimization::Reason_unreached,
1238 Deoptimization::Action_reinterpret,
1239 NULL,
1240 (is_fallthrough ? "taken always" : "taken never"));
1241 return;
1242 }
1244 Node* val = c->in(1);
1245 Node* con = c->in(2);
1246 const Type* tcon = _gvn.type(con);
1247 const Type* tval = _gvn.type(val);
1248 bool have_con = tcon->singleton();
1249 if (tval->singleton()) {
1250 if (!have_con) {
1251 // Swap, so constant is in con.
1252 con = val;
1253 tcon = tval;
1254 val = c->in(2);
1255 tval = _gvn.type(val);
1256 btest = BoolTest(btest).commute();
1257 have_con = true;
1258 } else {
1259 // Do we have two constants? Then leave well enough alone.
1260 have_con = false;
1261 }
1262 }
1263 if (!have_con) // remaining adjustments need a con
1264 return;
1267 int val_in_map = map()->find_edge(val);
1268 if (val_in_map < 0) return; // replace_in_map would be useless
1269 {
1270 JVMState* jvms = this->jvms();
1271 if (!(jvms->is_loc(val_in_map) ||
1272 jvms->is_stk(val_in_map)))
1273 return; // again, it would be useless
1274 }
1276 // Check for a comparison to a constant, and "know" that the compared
1277 // value is constrained on this path.
1278 assert(tcon->singleton(), "");
1279 ConstraintCastNode* ccast = NULL;
1280 Node* cast = NULL;
1282 switch (btest) {
1283 case BoolTest::eq: // Constant test?
1284 {
1285 const Type* tboth = tcon->join(tval);
1286 if (tboth == tval) break; // Nothing to gain.
1287 if (tcon->isa_int()) {
1288 ccast = new (C, 2) CastIINode(val, tboth);
1289 } else if (tcon == TypePtr::NULL_PTR) {
1290 // Cast to null, but keep the pointer identity temporarily live.
1291 ccast = new (C, 2) CastPPNode(val, tboth);
1292 } else {
1293 const TypeF* tf = tcon->isa_float_constant();
1294 const TypeD* td = tcon->isa_double_constant();
1295 // Exclude tests vs float/double 0 as these could be
1296 // either +0 or -0. Just because you are equal to +0
1297 // doesn't mean you ARE +0!
1298 if ((!tf || tf->_f != 0.0) &&
1299 (!td || td->_d != 0.0))
1300 cast = con; // Replace non-constant val by con.
1301 }
1302 }
1303 break;
1305 case BoolTest::ne:
1306 if (tcon == TypePtr::NULL_PTR) {
1307 cast = cast_not_null(val, false);
1308 }
1309 break;
1311 default:
1312 // (At this point we could record int range types with CastII.)
1313 break;
1314 }
1316 if (ccast != NULL) {
1317 const Type* tcc = ccast->as_Type()->type();
1318 assert(tcc != tval && tcc->higher_equal(tval), "must improve");
1319 // Delay transform() call to allow recovery of pre-cast value
1320 // at the control merge.
1321 ccast->set_req(0, control());
1322 _gvn.set_type_bottom(ccast);
1323 record_for_igvn(ccast);
1324 cast = ccast;
1325 }
1327 if (cast != NULL) { // Here's the payoff.
1328 replace_in_map(val, cast);
1329 }
1330 }
1333 //------------------------------do_one_bytecode--------------------------------
1334 // Parse this bytecode, and alter the Parsers JVM->Node mapping
1335 void Parse::do_one_bytecode() {
1336 Node *a, *b, *c, *d; // Handy temps
1337 BoolTest::mask btest;
1338 int i;
1340 assert(!has_exceptions(), "bytecode entry state must be clear of throws");
1342 if (C->check_node_count(NodeLimitFudgeFactor * 5,
1343 "out of nodes parsing method")) {
1344 return;
1345 }
1347 #ifdef ASSERT
1348 // for setting breakpoints
1349 if (TraceOptoParse) {
1350 tty->print(" @");
1351 dump_bci(bci());
1352 }
1353 #endif
1355 switch (bc()) {
1356 case Bytecodes::_nop:
1357 // do nothing
1358 break;
1359 case Bytecodes::_lconst_0:
1360 push_pair(longcon(0));
1361 break;
1363 case Bytecodes::_lconst_1:
1364 push_pair(longcon(1));
1365 break;
1367 case Bytecodes::_fconst_0:
1368 push(zerocon(T_FLOAT));
1369 break;
1371 case Bytecodes::_fconst_1:
1372 push(makecon(TypeF::ONE));
1373 break;
1375 case Bytecodes::_fconst_2:
1376 push(makecon(TypeF::make(2.0f)));
1377 break;
1379 case Bytecodes::_dconst_0:
1380 push_pair(zerocon(T_DOUBLE));
1381 break;
1383 case Bytecodes::_dconst_1:
1384 push_pair(makecon(TypeD::ONE));
1385 break;
1387 case Bytecodes::_iconst_m1:push(intcon(-1)); break;
1388 case Bytecodes::_iconst_0: push(intcon( 0)); break;
1389 case Bytecodes::_iconst_1: push(intcon( 1)); break;
1390 case Bytecodes::_iconst_2: push(intcon( 2)); break;
1391 case Bytecodes::_iconst_3: push(intcon( 3)); break;
1392 case Bytecodes::_iconst_4: push(intcon( 4)); break;
1393 case Bytecodes::_iconst_5: push(intcon( 5)); break;
1394 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break;
1395 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break;
1396 case Bytecodes::_aconst_null: push(null()); break;
1397 case Bytecodes::_ldc:
1398 case Bytecodes::_ldc_w:
1399 case Bytecodes::_ldc2_w:
1400 // If the constant is unresolved, run this BC once in the interpreter.
1401 {
1402 ciConstant constant = iter().get_constant();
1403 if (constant.basic_type() == T_OBJECT &&
1404 !constant.as_object()->is_loaded()) {
1405 int index = iter().get_constant_pool_index();
1406 constantTag tag = iter().get_constant_pool_tag(index);
1407 uncommon_trap(Deoptimization::make_trap_request
1408 (Deoptimization::Reason_unloaded,
1409 Deoptimization::Action_reinterpret,
1410 index),
1411 NULL, tag.internal_name());
1412 break;
1413 }
1414 assert(constant.basic_type() != T_OBJECT || !constant.as_object()->is_klass(),
1415 "must be java_mirror of klass");
1416 bool pushed = push_constant(constant, true);
1417 guarantee(pushed, "must be possible to push this constant");
1418 }
1420 break;
1422 case Bytecodes::_aload_0:
1423 push( local(0) );
1424 break;
1425 case Bytecodes::_aload_1:
1426 push( local(1) );
1427 break;
1428 case Bytecodes::_aload_2:
1429 push( local(2) );
1430 break;
1431 case Bytecodes::_aload_3:
1432 push( local(3) );
1433 break;
1434 case Bytecodes::_aload:
1435 push( local(iter().get_index()) );
1436 break;
1438 case Bytecodes::_fload_0:
1439 case Bytecodes::_iload_0:
1440 push( local(0) );
1441 break;
1442 case Bytecodes::_fload_1:
1443 case Bytecodes::_iload_1:
1444 push( local(1) );
1445 break;
1446 case Bytecodes::_fload_2:
1447 case Bytecodes::_iload_2:
1448 push( local(2) );
1449 break;
1450 case Bytecodes::_fload_3:
1451 case Bytecodes::_iload_3:
1452 push( local(3) );
1453 break;
1454 case Bytecodes::_fload:
1455 case Bytecodes::_iload:
1456 push( local(iter().get_index()) );
1457 break;
1458 case Bytecodes::_lload_0:
1459 push_pair_local( 0 );
1460 break;
1461 case Bytecodes::_lload_1:
1462 push_pair_local( 1 );
1463 break;
1464 case Bytecodes::_lload_2:
1465 push_pair_local( 2 );
1466 break;
1467 case Bytecodes::_lload_3:
1468 push_pair_local( 3 );
1469 break;
1470 case Bytecodes::_lload:
1471 push_pair_local( iter().get_index() );
1472 break;
1474 case Bytecodes::_dload_0:
1475 push_pair_local(0);
1476 break;
1477 case Bytecodes::_dload_1:
1478 push_pair_local(1);
1479 break;
1480 case Bytecodes::_dload_2:
1481 push_pair_local(2);
1482 break;
1483 case Bytecodes::_dload_3:
1484 push_pair_local(3);
1485 break;
1486 case Bytecodes::_dload:
1487 push_pair_local(iter().get_index());
1488 break;
1489 case Bytecodes::_fstore_0:
1490 case Bytecodes::_istore_0:
1491 case Bytecodes::_astore_0:
1492 set_local( 0, pop() );
1493 break;
1494 case Bytecodes::_fstore_1:
1495 case Bytecodes::_istore_1:
1496 case Bytecodes::_astore_1:
1497 set_local( 1, pop() );
1498 break;
1499 case Bytecodes::_fstore_2:
1500 case Bytecodes::_istore_2:
1501 case Bytecodes::_astore_2:
1502 set_local( 2, pop() );
1503 break;
1504 case Bytecodes::_fstore_3:
1505 case Bytecodes::_istore_3:
1506 case Bytecodes::_astore_3:
1507 set_local( 3, pop() );
1508 break;
1509 case Bytecodes::_fstore:
1510 case Bytecodes::_istore:
1511 case Bytecodes::_astore:
1512 set_local( iter().get_index(), pop() );
1513 break;
1514 // long stores
1515 case Bytecodes::_lstore_0:
1516 set_pair_local( 0, pop_pair() );
1517 break;
1518 case Bytecodes::_lstore_1:
1519 set_pair_local( 1, pop_pair() );
1520 break;
1521 case Bytecodes::_lstore_2:
1522 set_pair_local( 2, pop_pair() );
1523 break;
1524 case Bytecodes::_lstore_3:
1525 set_pair_local( 3, pop_pair() );
1526 break;
1527 case Bytecodes::_lstore:
1528 set_pair_local( iter().get_index(), pop_pair() );
1529 break;
1531 // double stores
1532 case Bytecodes::_dstore_0:
1533 set_pair_local( 0, dstore_rounding(pop_pair()) );
1534 break;
1535 case Bytecodes::_dstore_1:
1536 set_pair_local( 1, dstore_rounding(pop_pair()) );
1537 break;
1538 case Bytecodes::_dstore_2:
1539 set_pair_local( 2, dstore_rounding(pop_pair()) );
1540 break;
1541 case Bytecodes::_dstore_3:
1542 set_pair_local( 3, dstore_rounding(pop_pair()) );
1543 break;
1544 case Bytecodes::_dstore:
1545 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
1546 break;
1548 case Bytecodes::_pop: _sp -= 1; break;
1549 case Bytecodes::_pop2: _sp -= 2; break;
1550 case Bytecodes::_swap:
1551 a = pop();
1552 b = pop();
1553 push(a);
1554 push(b);
1555 break;
1556 case Bytecodes::_dup:
1557 a = pop();
1558 push(a);
1559 push(a);
1560 break;
1561 case Bytecodes::_dup_x1:
1562 a = pop();
1563 b = pop();
1564 push( a );
1565 push( b );
1566 push( a );
1567 break;
1568 case Bytecodes::_dup_x2:
1569 a = pop();
1570 b = pop();
1571 c = pop();
1572 push( a );
1573 push( c );
1574 push( b );
1575 push( a );
1576 break;
1577 case Bytecodes::_dup2:
1578 a = pop();
1579 b = pop();
1580 push( b );
1581 push( a );
1582 push( b );
1583 push( a );
1584 break;
1586 case Bytecodes::_dup2_x1:
1587 // before: .. c, b, a
1588 // after: .. b, a, c, b, a
1589 // not tested
1590 a = pop();
1591 b = pop();
1592 c = pop();
1593 push( b );
1594 push( a );
1595 push( c );
1596 push( b );
1597 push( a );
1598 break;
1599 case Bytecodes::_dup2_x2:
1600 // before: .. d, c, b, a
1601 // after: .. b, a, d, c, b, a
1602 // not tested
1603 a = pop();
1604 b = pop();
1605 c = pop();
1606 d = pop();
1607 push( b );
1608 push( a );
1609 push( d );
1610 push( c );
1611 push( b );
1612 push( a );
1613 break;
1615 case Bytecodes::_arraylength: {
1616 // Must do null-check with value on expression stack
1617 Node *ary = do_null_check(peek(), T_ARRAY);
1618 // Compile-time detect of null-exception?
1619 if (stopped()) return;
1620 a = pop();
1621 push(load_array_length(a));
1622 break;
1623 }
1625 case Bytecodes::_baload: array_load(T_BYTE); break;
1626 case Bytecodes::_caload: array_load(T_CHAR); break;
1627 case Bytecodes::_iaload: array_load(T_INT); break;
1628 case Bytecodes::_saload: array_load(T_SHORT); break;
1629 case Bytecodes::_faload: array_load(T_FLOAT); break;
1630 case Bytecodes::_aaload: array_load(T_OBJECT); break;
1631 case Bytecodes::_laload: {
1632 a = array_addressing(T_LONG, 0);
1633 if (stopped()) return; // guaranteed null or range check
1634 _sp -= 2; // Pop array and index
1635 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
1636 break;
1637 }
1638 case Bytecodes::_daload: {
1639 a = array_addressing(T_DOUBLE, 0);
1640 if (stopped()) return; // guaranteed null or range check
1641 _sp -= 2; // Pop array and index
1642 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
1643 break;
1644 }
1645 case Bytecodes::_bastore: array_store(T_BYTE); break;
1646 case Bytecodes::_castore: array_store(T_CHAR); break;
1647 case Bytecodes::_iastore: array_store(T_INT); break;
1648 case Bytecodes::_sastore: array_store(T_SHORT); break;
1649 case Bytecodes::_fastore: array_store(T_FLOAT); break;
1650 case Bytecodes::_aastore: {
1651 d = array_addressing(T_OBJECT, 1);
1652 if (stopped()) return; // guaranteed null or range check
1653 array_store_check();
1654 c = pop(); // Oop to store
1655 b = pop(); // index (already used)
1656 a = pop(); // the array itself
1657 const TypeOopPtr* elemtype = _gvn.type(a)->is_aryptr()->elem()->make_oopptr();
1658 const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
1659 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
1660 break;
1661 }
1662 case Bytecodes::_lastore: {
1663 a = array_addressing(T_LONG, 2);
1664 if (stopped()) return; // guaranteed null or range check
1665 c = pop_pair();
1666 _sp -= 2; // Pop array and index
1667 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
1668 break;
1669 }
1670 case Bytecodes::_dastore: {
1671 a = array_addressing(T_DOUBLE, 2);
1672 if (stopped()) return; // guaranteed null or range check
1673 c = pop_pair();
1674 _sp -= 2; // Pop array and index
1675 c = dstore_rounding(c);
1676 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
1677 break;
1678 }
1679 case Bytecodes::_getfield:
1680 do_getfield();
1681 break;
1683 case Bytecodes::_getstatic:
1684 do_getstatic();
1685 break;
1687 case Bytecodes::_putfield:
1688 do_putfield();
1689 break;
1691 case Bytecodes::_putstatic:
1692 do_putstatic();
1693 break;
1695 case Bytecodes::_irem:
1696 do_irem();
1697 break;
1698 case Bytecodes::_idiv:
1699 // Must keep both values on the expression-stack during null-check
1700 do_null_check(peek(), T_INT);
1701 // Compile-time detect of null-exception?
1702 if (stopped()) return;
1703 b = pop();
1704 a = pop();
1705 push( _gvn.transform( new (C, 3) DivINode(control(),a,b) ) );
1706 break;
1707 case Bytecodes::_imul:
1708 b = pop(); a = pop();
1709 push( _gvn.transform( new (C, 3) MulINode(a,b) ) );
1710 break;
1711 case Bytecodes::_iadd:
1712 b = pop(); a = pop();
1713 push( _gvn.transform( new (C, 3) AddINode(a,b) ) );
1714 break;
1715 case Bytecodes::_ineg:
1716 a = pop();
1717 push( _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),a)) );
1718 break;
1719 case Bytecodes::_isub:
1720 b = pop(); a = pop();
1721 push( _gvn.transform( new (C, 3) SubINode(a,b) ) );
1722 break;
1723 case Bytecodes::_iand:
1724 b = pop(); a = pop();
1725 push( _gvn.transform( new (C, 3) AndINode(a,b) ) );
1726 break;
1727 case Bytecodes::_ior:
1728 b = pop(); a = pop();
1729 push( _gvn.transform( new (C, 3) OrINode(a,b) ) );
1730 break;
1731 case Bytecodes::_ixor:
1732 b = pop(); a = pop();
1733 push( _gvn.transform( new (C, 3) XorINode(a,b) ) );
1734 break;
1735 case Bytecodes::_ishl:
1736 b = pop(); a = pop();
1737 push( _gvn.transform( new (C, 3) LShiftINode(a,b) ) );
1738 break;
1739 case Bytecodes::_ishr:
1740 b = pop(); a = pop();
1741 push( _gvn.transform( new (C, 3) RShiftINode(a,b) ) );
1742 break;
1743 case Bytecodes::_iushr:
1744 b = pop(); a = pop();
1745 push( _gvn.transform( new (C, 3) URShiftINode(a,b) ) );
1746 break;
1748 case Bytecodes::_fneg:
1749 a = pop();
1750 b = _gvn.transform(new (C, 2) NegFNode (a));
1751 push(b);
1752 break;
1754 case Bytecodes::_fsub:
1755 b = pop();
1756 a = pop();
1757 c = _gvn.transform( new (C, 3) SubFNode(a,b) );
1758 d = precision_rounding(c);
1759 push( d );
1760 break;
1762 case Bytecodes::_fadd:
1763 b = pop();
1764 a = pop();
1765 c = _gvn.transform( new (C, 3) AddFNode(a,b) );
1766 d = precision_rounding(c);
1767 push( d );
1768 break;
1770 case Bytecodes::_fmul:
1771 b = pop();
1772 a = pop();
1773 c = _gvn.transform( new (C, 3) MulFNode(a,b) );
1774 d = precision_rounding(c);
1775 push( d );
1776 break;
1778 case Bytecodes::_fdiv:
1779 b = pop();
1780 a = pop();
1781 c = _gvn.transform( new (C, 3) DivFNode(0,a,b) );
1782 d = precision_rounding(c);
1783 push( d );
1784 break;
1786 case Bytecodes::_frem:
1787 if (Matcher::has_match_rule(Op_ModF)) {
1788 // Generate a ModF node.
1789 b = pop();
1790 a = pop();
1791 c = _gvn.transform( new (C, 3) ModFNode(0,a,b) );
1792 d = precision_rounding(c);
1793 push( d );
1794 }
1795 else {
1796 // Generate a call.
1797 modf();
1798 }
1799 break;
1801 case Bytecodes::_fcmpl:
1802 b = pop();
1803 a = pop();
1804 c = _gvn.transform( new (C, 3) CmpF3Node( a, b));
1805 push(c);
1806 break;
1807 case Bytecodes::_fcmpg:
1808 b = pop();
1809 a = pop();
1811 // Same as fcmpl but need to flip the unordered case. Swap the inputs,
1812 // which negates the result sign except for unordered. Flip the unordered
1813 // as well by using CmpF3 which implements unordered-lesser instead of
1814 // unordered-greater semantics. Finally, commute the result bits. Result
1815 // is same as using a CmpF3Greater except we did it with CmpF3 alone.
1816 c = _gvn.transform( new (C, 3) CmpF3Node( b, a));
1817 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1818 push(c);
1819 break;
1821 case Bytecodes::_f2i:
1822 a = pop();
1823 push(_gvn.transform(new (C, 2) ConvF2INode(a)));
1824 break;
1826 case Bytecodes::_d2i:
1827 a = pop_pair();
1828 b = _gvn.transform(new (C, 2) ConvD2INode(a));
1829 push( b );
1830 break;
1832 case Bytecodes::_f2d:
1833 a = pop();
1834 b = _gvn.transform( new (C, 2) ConvF2DNode(a));
1835 push_pair( b );
1836 break;
1838 case Bytecodes::_d2f:
1839 a = pop_pair();
1840 b = _gvn.transform( new (C, 2) ConvD2FNode(a));
1841 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed)
1842 //b = _gvn.transform(new (C, 2) RoundFloatNode(0, b) );
1843 push( b );
1844 break;
1846 case Bytecodes::_l2f:
1847 if (Matcher::convL2FSupported()) {
1848 a = pop_pair();
1849 b = _gvn.transform( new (C, 2) ConvL2FNode(a));
1850 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits.
1851 // Rather than storing the result into an FP register then pushing
1852 // out to memory to round, the machine instruction that implements
1853 // ConvL2D is responsible for rounding.
1854 // c = precision_rounding(b);
1855 c = _gvn.transform(b);
1856 push(c);
1857 } else {
1858 l2f();
1859 }
1860 break;
1862 case Bytecodes::_l2d:
1863 a = pop_pair();
1864 b = _gvn.transform( new (C, 2) ConvL2DNode(a));
1865 // For i486.ad, rounding is always necessary (see _l2f above).
1866 // c = dprecision_rounding(b);
1867 c = _gvn.transform(b);
1868 push_pair(c);
1869 break;
1871 case Bytecodes::_f2l:
1872 a = pop();
1873 b = _gvn.transform( new (C, 2) ConvF2LNode(a));
1874 push_pair(b);
1875 break;
1877 case Bytecodes::_d2l:
1878 a = pop_pair();
1879 b = _gvn.transform( new (C, 2) ConvD2LNode(a));
1880 push_pair(b);
1881 break;
1883 case Bytecodes::_dsub:
1884 b = pop_pair();
1885 a = pop_pair();
1886 c = _gvn.transform( new (C, 3) SubDNode(a,b) );
1887 d = dprecision_rounding(c);
1888 push_pair( d );
1889 break;
1891 case Bytecodes::_dadd:
1892 b = pop_pair();
1893 a = pop_pair();
1894 c = _gvn.transform( new (C, 3) AddDNode(a,b) );
1895 d = dprecision_rounding(c);
1896 push_pair( d );
1897 break;
1899 case Bytecodes::_dmul:
1900 b = pop_pair();
1901 a = pop_pair();
1902 c = _gvn.transform( new (C, 3) MulDNode(a,b) );
1903 d = dprecision_rounding(c);
1904 push_pair( d );
1905 break;
1907 case Bytecodes::_ddiv:
1908 b = pop_pair();
1909 a = pop_pair();
1910 c = _gvn.transform( new (C, 3) DivDNode(0,a,b) );
1911 d = dprecision_rounding(c);
1912 push_pair( d );
1913 break;
1915 case Bytecodes::_dneg:
1916 a = pop_pair();
1917 b = _gvn.transform(new (C, 2) NegDNode (a));
1918 push_pair(b);
1919 break;
1921 case Bytecodes::_drem:
1922 if (Matcher::has_match_rule(Op_ModD)) {
1923 // Generate a ModD node.
1924 b = pop_pair();
1925 a = pop_pair();
1926 // a % b
1928 c = _gvn.transform( new (C, 3) ModDNode(0,a,b) );
1929 d = dprecision_rounding(c);
1930 push_pair( d );
1931 }
1932 else {
1933 // Generate a call.
1934 modd();
1935 }
1936 break;
1938 case Bytecodes::_dcmpl:
1939 b = pop_pair();
1940 a = pop_pair();
1941 c = _gvn.transform( new (C, 3) CmpD3Node( a, b));
1942 push(c);
1943 break;
1945 case Bytecodes::_dcmpg:
1946 b = pop_pair();
1947 a = pop_pair();
1948 // Same as dcmpl but need to flip the unordered case.
1949 // Commute the inputs, which negates the result sign except for unordered.
1950 // Flip the unordered as well by using CmpD3 which implements
1951 // unordered-lesser instead of unordered-greater semantics.
1952 // Finally, negate the result bits. Result is same as using a
1953 // CmpD3Greater except we did it with CmpD3 alone.
1954 c = _gvn.transform( new (C, 3) CmpD3Node( b, a));
1955 c = _gvn.transform( new (C, 3) SubINode(_gvn.intcon(0),c) );
1956 push(c);
1957 break;
1960 // Note for longs -> lo word is on TOS, hi word is on TOS - 1
1961 case Bytecodes::_land:
1962 b = pop_pair();
1963 a = pop_pair();
1964 c = _gvn.transform( new (C, 3) AndLNode(a,b) );
1965 push_pair(c);
1966 break;
1967 case Bytecodes::_lor:
1968 b = pop_pair();
1969 a = pop_pair();
1970 c = _gvn.transform( new (C, 3) OrLNode(a,b) );
1971 push_pair(c);
1972 break;
1973 case Bytecodes::_lxor:
1974 b = pop_pair();
1975 a = pop_pair();
1976 c = _gvn.transform( new (C, 3) XorLNode(a,b) );
1977 push_pair(c);
1978 break;
1980 case Bytecodes::_lshl:
1981 b = pop(); // the shift count
1982 a = pop_pair(); // value to be shifted
1983 c = _gvn.transform( new (C, 3) LShiftLNode(a,b) );
1984 push_pair(c);
1985 break;
1986 case Bytecodes::_lshr:
1987 b = pop(); // the shift count
1988 a = pop_pair(); // value to be shifted
1989 c = _gvn.transform( new (C, 3) RShiftLNode(a,b) );
1990 push_pair(c);
1991 break;
1992 case Bytecodes::_lushr:
1993 b = pop(); // the shift count
1994 a = pop_pair(); // value to be shifted
1995 c = _gvn.transform( new (C, 3) URShiftLNode(a,b) );
1996 push_pair(c);
1997 break;
1998 case Bytecodes::_lmul:
1999 b = pop_pair();
2000 a = pop_pair();
2001 c = _gvn.transform( new (C, 3) MulLNode(a,b) );
2002 push_pair(c);
2003 break;
2005 case Bytecodes::_lrem:
2006 // Must keep both values on the expression-stack during null-check
2007 assert(peek(0) == top(), "long word order");
2008 do_null_check(peek(1), T_LONG);
2009 // Compile-time detect of null-exception?
2010 if (stopped()) return;
2011 b = pop_pair();
2012 a = pop_pair();
2013 c = _gvn.transform( new (C, 3) ModLNode(control(),a,b) );
2014 push_pair(c);
2015 break;
2017 case Bytecodes::_ldiv:
2018 // Must keep both values on the expression-stack during null-check
2019 assert(peek(0) == top(), "long word order");
2020 do_null_check(peek(1), T_LONG);
2021 // Compile-time detect of null-exception?
2022 if (stopped()) return;
2023 b = pop_pair();
2024 a = pop_pair();
2025 c = _gvn.transform( new (C, 3) DivLNode(control(),a,b) );
2026 push_pair(c);
2027 break;
2029 case Bytecodes::_ladd:
2030 b = pop_pair();
2031 a = pop_pair();
2032 c = _gvn.transform( new (C, 3) AddLNode(a,b) );
2033 push_pair(c);
2034 break;
2035 case Bytecodes::_lsub:
2036 b = pop_pair();
2037 a = pop_pair();
2038 c = _gvn.transform( new (C, 3) SubLNode(a,b) );
2039 push_pair(c);
2040 break;
2041 case Bytecodes::_lcmp:
2042 // Safepoints are now inserted _before_ branches. The long-compare
2043 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
2044 // slew of control flow. These are usually followed by a CmpI vs zero and
2045 // a branch; this pattern then optimizes to the obvious long-compare and
2046 // branch. However, if the branch is backwards there's a Safepoint
2047 // inserted. The inserted Safepoint captures the JVM state at the
2048 // pre-branch point, i.e. it captures the 3-way value. Thus if a
2049 // long-compare is used to control a loop the debug info will force
2050 // computation of the 3-way value, even though the generated code uses a
2051 // long-compare and branch. We try to rectify the situation by inserting
2052 // a SafePoint here and have it dominate and kill the safepoint added at a
2053 // following backwards branch. At this point the JVM state merely holds 2
2054 // longs but not the 3-way value.
2055 if( UseLoopSafepoints ) {
2056 switch( iter().next_bc() ) {
2057 case Bytecodes::_ifgt:
2058 case Bytecodes::_iflt:
2059 case Bytecodes::_ifge:
2060 case Bytecodes::_ifle:
2061 case Bytecodes::_ifne:
2062 case Bytecodes::_ifeq:
2063 // If this is a backwards branch in the bytecodes, add Safepoint
2064 maybe_add_safepoint(iter().next_get_dest());
2065 }
2066 }
2067 b = pop_pair();
2068 a = pop_pair();
2069 c = _gvn.transform( new (C, 3) CmpL3Node( a, b ));
2070 push(c);
2071 break;
2073 case Bytecodes::_lneg:
2074 a = pop_pair();
2075 b = _gvn.transform( new (C, 3) SubLNode(longcon(0),a));
2076 push_pair(b);
2077 break;
2078 case Bytecodes::_l2i:
2079 a = pop_pair();
2080 push( _gvn.transform( new (C, 2) ConvL2INode(a)));
2081 break;
2082 case Bytecodes::_i2l:
2083 a = pop();
2084 b = _gvn.transform( new (C, 2) ConvI2LNode(a));
2085 push_pair(b);
2086 break;
2087 case Bytecodes::_i2b:
2088 // Sign extend
2089 a = pop();
2090 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(24)) );
2091 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(24)) );
2092 push( a );
2093 break;
2094 case Bytecodes::_i2s:
2095 a = pop();
2096 a = _gvn.transform( new (C, 3) LShiftINode(a,_gvn.intcon(16)) );
2097 a = _gvn.transform( new (C, 3) RShiftINode(a,_gvn.intcon(16)) );
2098 push( a );
2099 break;
2100 case Bytecodes::_i2c:
2101 a = pop();
2102 push( _gvn.transform( new (C, 3) AndINode(a,_gvn.intcon(0xFFFF)) ) );
2103 break;
2105 case Bytecodes::_i2f:
2106 a = pop();
2107 b = _gvn.transform( new (C, 2) ConvI2FNode(a) ) ;
2108 c = precision_rounding(b);
2109 push (b);
2110 break;
2112 case Bytecodes::_i2d:
2113 a = pop();
2114 b = _gvn.transform( new (C, 2) ConvI2DNode(a));
2115 push_pair(b);
2116 break;
2118 case Bytecodes::_iinc: // Increment local
2119 i = iter().get_index(); // Get local index
2120 set_local( i, _gvn.transform( new (C, 3) AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
2121 break;
2123 // Exit points of synchronized methods must have an unlock node
2124 case Bytecodes::_return:
2125 return_current(NULL);
2126 break;
2128 case Bytecodes::_ireturn:
2129 case Bytecodes::_areturn:
2130 case Bytecodes::_freturn:
2131 return_current(pop());
2132 break;
2133 case Bytecodes::_lreturn:
2134 return_current(pop_pair());
2135 break;
2136 case Bytecodes::_dreturn:
2137 return_current(pop_pair());
2138 break;
2140 case Bytecodes::_athrow:
2141 // null exception oop throws NULL pointer exception
2142 do_null_check(peek(), T_OBJECT);
2143 if (stopped()) return;
2144 // Hook the thrown exception directly to subsequent handlers.
2145 if (BailoutToInterpreterForThrows) {
2146 // Keep method interpreted from now on.
2147 uncommon_trap(Deoptimization::Reason_unhandled,
2148 Deoptimization::Action_make_not_compilable);
2149 return;
2150 }
2151 if (env()->jvmti_can_post_on_exceptions()) {
2152 // check if we must post exception events, take uncommon trap if so (with must_throw = false)
2153 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
2154 }
2155 // Here if either can_post_on_exceptions or should_post_on_exceptions is false
2156 add_exception_state(make_exception_state(peek()));
2157 break;
2159 case Bytecodes::_goto: // fall through
2160 case Bytecodes::_goto_w: {
2161 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
2163 // If this is a backwards branch in the bytecodes, add Safepoint
2164 maybe_add_safepoint(target_bci);
2166 // Update method data
2167 profile_taken_branch(target_bci);
2169 // Add loop predicate if it goes to a loop
2170 if (should_add_predicate(target_bci)){
2171 add_predicate();
2172 }
2173 // Merge the current control into the target basic block
2174 merge(target_bci);
2176 // See if we can get some profile data and hand it off to the next block
2177 Block *target_block = block()->successor_for_bci(target_bci);
2178 if (target_block->pred_count() != 1) break;
2179 ciMethodData* methodData = method()->method_data();
2180 if (!methodData->is_mature()) break;
2181 ciProfileData* data = methodData->bci_to_data(bci());
2182 assert( data->is_JumpData(), "" );
2183 int taken = ((ciJumpData*)data)->taken();
2184 taken = method()->scale_count(taken);
2185 target_block->set_count(taken);
2186 break;
2187 }
2189 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
2190 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2191 handle_if_null:
2192 // If this is a backwards branch in the bytecodes, add Safepoint
2193 maybe_add_safepoint(iter().get_dest());
2194 a = null();
2195 b = pop();
2196 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2197 do_ifnull(btest, c);
2198 break;
2200 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2201 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2202 handle_if_acmp:
2203 // If this is a backwards branch in the bytecodes, add Safepoint
2204 maybe_add_safepoint(iter().get_dest());
2205 a = pop();
2206 b = pop();
2207 c = _gvn.transform( new (C, 3) CmpPNode(b, a) );
2208 do_if(btest, c);
2209 break;
2211 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2212 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2213 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2214 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2215 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2216 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2217 handle_ifxx:
2218 // If this is a backwards branch in the bytecodes, add Safepoint
2219 maybe_add_safepoint(iter().get_dest());
2220 a = _gvn.intcon(0);
2221 b = pop();
2222 c = _gvn.transform( new (C, 3) CmpINode(b, a) );
2223 do_if(btest, c);
2224 break;
2226 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2227 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2228 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2229 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
2230 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
2231 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
2232 handle_if_icmp:
2233 // If this is a backwards branch in the bytecodes, add Safepoint
2234 maybe_add_safepoint(iter().get_dest());
2235 a = pop();
2236 b = pop();
2237 c = _gvn.transform( new (C, 3) CmpINode( b, a ) );
2238 do_if(btest, c);
2239 break;
2241 case Bytecodes::_tableswitch:
2242 do_tableswitch();
2243 break;
2245 case Bytecodes::_lookupswitch:
2246 do_lookupswitch();
2247 break;
2249 case Bytecodes::_invokestatic:
2250 case Bytecodes::_invokedynamic:
2251 case Bytecodes::_invokespecial:
2252 case Bytecodes::_invokevirtual:
2253 case Bytecodes::_invokeinterface:
2254 do_call();
2255 break;
2256 case Bytecodes::_checkcast:
2257 do_checkcast();
2258 break;
2259 case Bytecodes::_instanceof:
2260 do_instanceof();
2261 break;
2262 case Bytecodes::_anewarray:
2263 do_anewarray();
2264 break;
2265 case Bytecodes::_newarray:
2266 do_newarray((BasicType)iter().get_index());
2267 break;
2268 case Bytecodes::_multianewarray:
2269 do_multianewarray();
2270 break;
2271 case Bytecodes::_new:
2272 do_new();
2273 break;
2275 case Bytecodes::_jsr:
2276 case Bytecodes::_jsr_w:
2277 do_jsr();
2278 break;
2280 case Bytecodes::_ret:
2281 do_ret();
2282 break;
2285 case Bytecodes::_monitorenter:
2286 do_monitor_enter();
2287 break;
2289 case Bytecodes::_monitorexit:
2290 do_monitor_exit();
2291 break;
2293 case Bytecodes::_breakpoint:
2294 // Breakpoint set concurrently to compile
2295 // %%% use an uncommon trap?
2296 C->record_failure("breakpoint in method");
2297 return;
2299 default:
2300 #ifndef PRODUCT
2301 map()->dump(99);
2302 #endif
2303 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
2304 ShouldNotReachHere();
2305 }
2307 #ifndef PRODUCT
2308 IdealGraphPrinter *printer = IdealGraphPrinter::printer();
2309 if(printer) {
2310 char buffer[256];
2311 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
2312 bool old = printer->traverse_outs();
2313 printer->set_traverse_outs(true);
2314 printer->print_method(C, buffer, 4);
2315 printer->set_traverse_outs(old);
2316 }
2317 #endif
2318 }