Thu, 29 Sep 2011 23:09:54 -0700
7096639: Tiered: Incorrect counter overflow handling for inlined methods
Summary: Enable invocation events for inlinees
Reviewed-by: kvn
1 /*
2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciCPCache.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/bitMap.inline.hpp"
38 #ifndef SERIALGC
39 #include "gc_implementation/g1/heapRegion.hpp"
40 #endif
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
48 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
49 #ifdef ARM
50 #define PATCHED_ADDR (204)
51 #else
52 #define PATCHED_ADDR (max_jint)
53 #endif
55 void PhiResolverState::reset(int max_vregs) {
56 // Initialize array sizes
57 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
58 _virtual_operands.trunc_to(0);
59 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
60 _other_operands.trunc_to(0);
61 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
62 _vreg_table.trunc_to(0);
63 }
67 //--------------------------------------------------------------
68 // PhiResolver
70 // Resolves cycles:
71 //
72 // r1 := r2 becomes temp := r1
73 // r2 := r1 r1 := r2
74 // r2 := temp
75 // and orders moves:
76 //
77 // r2 := r3 becomes r1 := r2
78 // r1 := r2 r2 := r3
80 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
81 : _gen(gen)
82 , _state(gen->resolver_state())
83 , _temp(LIR_OprFact::illegalOpr)
84 {
85 // reinitialize the shared state arrays
86 _state.reset(max_vregs);
87 }
90 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
91 assert(src->is_valid(), "");
92 assert(dest->is_valid(), "");
93 __ move(src, dest);
94 }
97 void PhiResolver::move_temp_to(LIR_Opr dest) {
98 assert(_temp->is_valid(), "");
99 emit_move(_temp, dest);
100 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
101 }
104 void PhiResolver::move_to_temp(LIR_Opr src) {
105 assert(_temp->is_illegal(), "");
106 _temp = _gen->new_register(src->type());
107 emit_move(src, _temp);
108 }
111 // Traverse assignment graph in depth first order and generate moves in post order
112 // ie. two assignments: b := c, a := b start with node c:
113 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
114 // Generates moves in this order: move b to a and move c to b
115 // ie. cycle a := b, b := a start with node a
116 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
117 // Generates moves in this order: move b to temp, move a to b, move temp to a
118 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
119 if (!dest->visited()) {
120 dest->set_visited();
121 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
122 move(dest, dest->destination_at(i));
123 }
124 } else if (!dest->start_node()) {
125 // cylce in graph detected
126 assert(_loop == NULL, "only one loop valid!");
127 _loop = dest;
128 move_to_temp(src->operand());
129 return;
130 } // else dest is a start node
132 if (!dest->assigned()) {
133 if (_loop == dest) {
134 move_temp_to(dest->operand());
135 dest->set_assigned();
136 } else if (src != NULL) {
137 emit_move(src->operand(), dest->operand());
138 dest->set_assigned();
139 }
140 }
141 }
144 PhiResolver::~PhiResolver() {
145 int i;
146 // resolve any cycles in moves from and to virtual registers
147 for (i = virtual_operands().length() - 1; i >= 0; i --) {
148 ResolveNode* node = virtual_operands()[i];
149 if (!node->visited()) {
150 _loop = NULL;
151 move(NULL, node);
152 node->set_start_node();
153 assert(_temp->is_illegal(), "move_temp_to() call missing");
154 }
155 }
157 // generate move for move from non virtual register to abitrary destination
158 for (i = other_operands().length() - 1; i >= 0; i --) {
159 ResolveNode* node = other_operands()[i];
160 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
161 emit_move(node->operand(), node->destination_at(j)->operand());
162 }
163 }
164 }
167 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
168 ResolveNode* node;
169 if (opr->is_virtual()) {
170 int vreg_num = opr->vreg_number();
171 node = vreg_table().at_grow(vreg_num, NULL);
172 assert(node == NULL || node->operand() == opr, "");
173 if (node == NULL) {
174 node = new ResolveNode(opr);
175 vreg_table()[vreg_num] = node;
176 }
177 // Make sure that all virtual operands show up in the list when
178 // they are used as the source of a move.
179 if (source && !virtual_operands().contains(node)) {
180 virtual_operands().append(node);
181 }
182 } else {
183 assert(source, "");
184 node = new ResolveNode(opr);
185 other_operands().append(node);
186 }
187 return node;
188 }
191 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
192 assert(dest->is_virtual(), "");
193 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
194 assert(src->is_valid(), "");
195 assert(dest->is_valid(), "");
196 ResolveNode* source = source_node(src);
197 source->append(destination_node(dest));
198 }
201 //--------------------------------------------------------------
202 // LIRItem
204 void LIRItem::set_result(LIR_Opr opr) {
205 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
206 value()->set_operand(opr);
208 if (opr->is_virtual()) {
209 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
210 }
212 _result = opr;
213 }
215 void LIRItem::load_item() {
216 if (result()->is_illegal()) {
217 // update the items result
218 _result = value()->operand();
219 }
220 if (!result()->is_register()) {
221 LIR_Opr reg = _gen->new_register(value()->type());
222 __ move(result(), reg);
223 if (result()->is_constant()) {
224 _result = reg;
225 } else {
226 set_result(reg);
227 }
228 }
229 }
232 void LIRItem::load_for_store(BasicType type) {
233 if (_gen->can_store_as_constant(value(), type)) {
234 _result = value()->operand();
235 if (!_result->is_constant()) {
236 _result = LIR_OprFact::value_type(value()->type());
237 }
238 } else if (type == T_BYTE || type == T_BOOLEAN) {
239 load_byte_item();
240 } else {
241 load_item();
242 }
243 }
245 void LIRItem::load_item_force(LIR_Opr reg) {
246 LIR_Opr r = result();
247 if (r != reg) {
248 #if !defined(ARM) && !defined(E500V2)
249 if (r->type() != reg->type()) {
250 // moves between different types need an intervening spill slot
251 r = _gen->force_to_spill(r, reg->type());
252 }
253 #endif
254 __ move(r, reg);
255 _result = reg;
256 }
257 }
259 ciObject* LIRItem::get_jobject_constant() const {
260 ObjectType* oc = type()->as_ObjectType();
261 if (oc) {
262 return oc->constant_value();
263 }
264 return NULL;
265 }
268 jint LIRItem::get_jint_constant() const {
269 assert(is_constant() && value() != NULL, "");
270 assert(type()->as_IntConstant() != NULL, "type check");
271 return type()->as_IntConstant()->value();
272 }
275 jint LIRItem::get_address_constant() const {
276 assert(is_constant() && value() != NULL, "");
277 assert(type()->as_AddressConstant() != NULL, "type check");
278 return type()->as_AddressConstant()->value();
279 }
282 jfloat LIRItem::get_jfloat_constant() const {
283 assert(is_constant() && value() != NULL, "");
284 assert(type()->as_FloatConstant() != NULL, "type check");
285 return type()->as_FloatConstant()->value();
286 }
289 jdouble LIRItem::get_jdouble_constant() const {
290 assert(is_constant() && value() != NULL, "");
291 assert(type()->as_DoubleConstant() != NULL, "type check");
292 return type()->as_DoubleConstant()->value();
293 }
296 jlong LIRItem::get_jlong_constant() const {
297 assert(is_constant() && value() != NULL, "");
298 assert(type()->as_LongConstant() != NULL, "type check");
299 return type()->as_LongConstant()->value();
300 }
304 //--------------------------------------------------------------
307 void LIRGenerator::init() {
308 _bs = Universe::heap()->barrier_set();
309 }
312 void LIRGenerator::block_do_prolog(BlockBegin* block) {
313 #ifndef PRODUCT
314 if (PrintIRWithLIR) {
315 block->print();
316 }
317 #endif
319 // set up the list of LIR instructions
320 assert(block->lir() == NULL, "LIR list already computed for this block");
321 _lir = new LIR_List(compilation(), block);
322 block->set_lir(_lir);
324 __ branch_destination(block->label());
326 if (LIRTraceExecution &&
327 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
328 !block->is_set(BlockBegin::exception_entry_flag)) {
329 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
330 trace_block_entry(block);
331 }
332 }
335 void LIRGenerator::block_do_epilog(BlockBegin* block) {
336 #ifndef PRODUCT
337 if (PrintIRWithLIR) {
338 tty->cr();
339 }
340 #endif
342 // LIR_Opr for unpinned constants shouldn't be referenced by other
343 // blocks so clear them out after processing the block.
344 for (int i = 0; i < _unpinned_constants.length(); i++) {
345 _unpinned_constants.at(i)->clear_operand();
346 }
347 _unpinned_constants.trunc_to(0);
349 // clear our any registers for other local constants
350 _constants.trunc_to(0);
351 _reg_for_constants.trunc_to(0);
352 }
355 void LIRGenerator::block_do(BlockBegin* block) {
356 CHECK_BAILOUT();
358 block_do_prolog(block);
359 set_block(block);
361 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
362 if (instr->is_pinned()) do_root(instr);
363 }
365 set_block(NULL);
366 block_do_epilog(block);
367 }
370 //-------------------------LIRGenerator-----------------------------
372 // This is where the tree-walk starts; instr must be root;
373 void LIRGenerator::do_root(Value instr) {
374 CHECK_BAILOUT();
376 InstructionMark im(compilation(), instr);
378 assert(instr->is_pinned(), "use only with roots");
379 assert(instr->subst() == instr, "shouldn't have missed substitution");
381 instr->visit(this);
383 assert(!instr->has_uses() || instr->operand()->is_valid() ||
384 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
385 }
388 // This is called for each node in tree; the walk stops if a root is reached
389 void LIRGenerator::walk(Value instr) {
390 InstructionMark im(compilation(), instr);
391 //stop walk when encounter a root
392 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
393 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
394 } else {
395 assert(instr->subst() == instr, "shouldn't have missed substitution");
396 instr->visit(this);
397 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
398 }
399 }
402 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
403 assert(state != NULL, "state must be defined");
405 ValueStack* s = state;
406 for_each_state(s) {
407 if (s->kind() == ValueStack::EmptyExceptionState) {
408 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
409 continue;
410 }
412 int index;
413 Value value;
414 for_each_stack_value(s, index, value) {
415 assert(value->subst() == value, "missed substitution");
416 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
417 walk(value);
418 assert(value->operand()->is_valid(), "must be evaluated now");
419 }
420 }
422 int bci = s->bci();
423 IRScope* scope = s->scope();
424 ciMethod* method = scope->method();
426 MethodLivenessResult liveness = method->liveness_at_bci(bci);
427 if (bci == SynchronizationEntryBCI) {
428 if (x->as_ExceptionObject() || x->as_Throw()) {
429 // all locals are dead on exit from the synthetic unlocker
430 liveness.clear();
431 } else {
432 assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
433 }
434 }
435 if (!liveness.is_valid()) {
436 // Degenerate or breakpointed method.
437 bailout("Degenerate or breakpointed method");
438 } else {
439 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
440 for_each_local_value(s, index, value) {
441 assert(value->subst() == value, "missed substition");
442 if (liveness.at(index) && !value->type()->is_illegal()) {
443 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
444 walk(value);
445 assert(value->operand()->is_valid(), "must be evaluated now");
446 }
447 } else {
448 // NULL out this local so that linear scan can assume that all non-NULL values are live.
449 s->invalidate_local(index);
450 }
451 }
452 }
453 }
455 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
456 }
459 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
460 return state_for(x, x->exception_state());
461 }
464 void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
465 if (!obj->is_loaded() || PatchALot) {
466 assert(info != NULL, "info must be set if class is not loaded");
467 __ oop2reg_patch(NULL, r, info);
468 } else {
469 // no patching needed
470 __ oop2reg(obj->constant_encoding(), r);
471 }
472 }
475 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
476 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
477 CodeStub* stub = new RangeCheckStub(range_check_info, index);
478 if (index->is_constant()) {
479 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
480 index->as_jint(), null_check_info);
481 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
482 } else {
483 cmp_reg_mem(lir_cond_aboveEqual, index, array,
484 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
485 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
486 }
487 }
490 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
491 CodeStub* stub = new RangeCheckStub(info, index, true);
492 if (index->is_constant()) {
493 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
494 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
495 } else {
496 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
497 java_nio_Buffer::limit_offset(), T_INT, info);
498 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
499 }
500 __ move(index, result);
501 }
505 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
506 LIR_Opr result_op = result;
507 LIR_Opr left_op = left;
508 LIR_Opr right_op = right;
510 if (TwoOperandLIRForm && left_op != result_op) {
511 assert(right_op != result_op, "malformed");
512 __ move(left_op, result_op);
513 left_op = result_op;
514 }
516 switch(code) {
517 case Bytecodes::_dadd:
518 case Bytecodes::_fadd:
519 case Bytecodes::_ladd:
520 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
521 case Bytecodes::_fmul:
522 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
524 case Bytecodes::_dmul:
525 {
526 if (is_strictfp) {
527 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
528 } else {
529 __ mul(left_op, right_op, result_op); break;
530 }
531 }
532 break;
534 case Bytecodes::_imul:
535 {
536 bool did_strength_reduce = false;
538 if (right->is_constant()) {
539 int c = right->as_jint();
540 if (is_power_of_2(c)) {
541 // do not need tmp here
542 __ shift_left(left_op, exact_log2(c), result_op);
543 did_strength_reduce = true;
544 } else {
545 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
546 }
547 }
548 // we couldn't strength reduce so just emit the multiply
549 if (!did_strength_reduce) {
550 __ mul(left_op, right_op, result_op);
551 }
552 }
553 break;
555 case Bytecodes::_dsub:
556 case Bytecodes::_fsub:
557 case Bytecodes::_lsub:
558 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
560 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
561 // ldiv and lrem are implemented with a direct runtime call
563 case Bytecodes::_ddiv:
564 {
565 if (is_strictfp) {
566 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
567 } else {
568 __ div (left_op, right_op, result_op); break;
569 }
570 }
571 break;
573 case Bytecodes::_drem:
574 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
576 default: ShouldNotReachHere();
577 }
578 }
581 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
582 arithmetic_op(code, result, left, right, false, tmp);
583 }
586 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
587 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
588 }
591 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
592 arithmetic_op(code, result, left, right, is_strictfp, tmp);
593 }
596 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
597 if (TwoOperandLIRForm && value != result_op) {
598 assert(count != result_op, "malformed");
599 __ move(value, result_op);
600 value = result_op;
601 }
603 assert(count->is_constant() || count->is_register(), "must be");
604 switch(code) {
605 case Bytecodes::_ishl:
606 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
607 case Bytecodes::_ishr:
608 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
609 case Bytecodes::_iushr:
610 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
611 default: ShouldNotReachHere();
612 }
613 }
616 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
617 if (TwoOperandLIRForm && left_op != result_op) {
618 assert(right_op != result_op, "malformed");
619 __ move(left_op, result_op);
620 left_op = result_op;
621 }
623 switch(code) {
624 case Bytecodes::_iand:
625 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
627 case Bytecodes::_ior:
628 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
630 case Bytecodes::_ixor:
631 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
633 default: ShouldNotReachHere();
634 }
635 }
638 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
639 if (!GenerateSynchronizationCode) return;
640 // for slow path, use debug info for state after successful locking
641 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
642 __ load_stack_address_monitor(monitor_no, lock);
643 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
644 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
645 }
648 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
649 if (!GenerateSynchronizationCode) return;
650 // setup registers
651 LIR_Opr hdr = lock;
652 lock = new_hdr;
653 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
654 __ load_stack_address_monitor(monitor_no, lock);
655 __ unlock_object(hdr, object, lock, scratch, slow_path);
656 }
659 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
660 jobject2reg_with_patching(klass_reg, klass, info);
661 // If klass is not loaded we do not know if the klass has finalizers:
662 if (UseFastNewInstance && klass->is_loaded()
663 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
665 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
667 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
669 assert(klass->is_loaded(), "must be loaded");
670 // allocate space for instance
671 assert(klass->size_helper() >= 0, "illegal instance size");
672 const int instance_size = align_object_size(klass->size_helper());
673 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
674 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
675 } else {
676 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
677 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
678 __ branch_destination(slow_path->continuation());
679 }
680 }
683 static bool is_constant_zero(Instruction* inst) {
684 IntConstant* c = inst->type()->as_IntConstant();
685 if (c) {
686 return (c->value() == 0);
687 }
688 return false;
689 }
692 static bool positive_constant(Instruction* inst) {
693 IntConstant* c = inst->type()->as_IntConstant();
694 if (c) {
695 return (c->value() >= 0);
696 }
697 return false;
698 }
701 static ciArrayKlass* as_array_klass(ciType* type) {
702 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
703 return (ciArrayKlass*)type;
704 } else {
705 return NULL;
706 }
707 }
709 static Value maxvalue(IfOp* ifop) {
710 switch (ifop->cond()) {
711 case If::eql: return NULL;
712 case If::neq: return NULL;
713 case If::lss: // x < y ? x : y
714 case If::leq: // x <= y ? x : y
715 if (ifop->x() == ifop->tval() &&
716 ifop->y() == ifop->fval()) return ifop->y();
717 return NULL;
719 case If::gtr: // x > y ? y : x
720 case If::geq: // x >= y ? y : x
721 if (ifop->x() == ifop->tval() &&
722 ifop->y() == ifop->fval()) return ifop->y();
723 return NULL;
725 }
726 }
728 static ciType* phi_declared_type(Phi* phi) {
729 ciType* t = phi->operand_at(0)->declared_type();
730 if (t == NULL) {
731 return NULL;
732 }
733 for(int i = 1; i < phi->operand_count(); i++) {
734 if (t != phi->operand_at(i)->declared_type()) {
735 return NULL;
736 }
737 }
738 return t;
739 }
741 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
742 Instruction* src = x->argument_at(0);
743 Instruction* src_pos = x->argument_at(1);
744 Instruction* dst = x->argument_at(2);
745 Instruction* dst_pos = x->argument_at(3);
746 Instruction* length = x->argument_at(4);
748 // first try to identify the likely type of the arrays involved
749 ciArrayKlass* expected_type = NULL;
750 bool is_exact = false, src_objarray = false, dst_objarray = false;
751 {
752 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
753 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
754 Phi* phi;
755 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
756 src_declared_type = as_array_klass(phi_declared_type(phi));
757 }
758 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
759 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
760 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
761 dst_declared_type = as_array_klass(phi_declared_type(phi));
762 }
764 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
765 // the types exactly match so the type is fully known
766 is_exact = true;
767 expected_type = src_exact_type;
768 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
769 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
770 ciArrayKlass* src_type = NULL;
771 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
772 src_type = (ciArrayKlass*) src_exact_type;
773 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
774 src_type = (ciArrayKlass*) src_declared_type;
775 }
776 if (src_type != NULL) {
777 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
778 is_exact = true;
779 expected_type = dst_type;
780 }
781 }
782 }
783 // at least pass along a good guess
784 if (expected_type == NULL) expected_type = dst_exact_type;
785 if (expected_type == NULL) expected_type = src_declared_type;
786 if (expected_type == NULL) expected_type = dst_declared_type;
788 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
789 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
790 }
792 // if a probable array type has been identified, figure out if any
793 // of the required checks for a fast case can be elided.
794 int flags = LIR_OpArrayCopy::all_flags;
796 if (!src_objarray)
797 flags &= ~LIR_OpArrayCopy::src_objarray;
798 if (!dst_objarray)
799 flags &= ~LIR_OpArrayCopy::dst_objarray;
801 if (!x->arg_needs_null_check(0))
802 flags &= ~LIR_OpArrayCopy::src_null_check;
803 if (!x->arg_needs_null_check(2))
804 flags &= ~LIR_OpArrayCopy::dst_null_check;
807 if (expected_type != NULL) {
808 Value length_limit = NULL;
810 IfOp* ifop = length->as_IfOp();
811 if (ifop != NULL) {
812 // look for expressions like min(v, a.length) which ends up as
813 // x > y ? y : x or x >= y ? y : x
814 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
815 ifop->x() == ifop->fval() &&
816 ifop->y() == ifop->tval()) {
817 length_limit = ifop->y();
818 }
819 }
821 // try to skip null checks and range checks
822 NewArray* src_array = src->as_NewArray();
823 if (src_array != NULL) {
824 flags &= ~LIR_OpArrayCopy::src_null_check;
825 if (length_limit != NULL &&
826 src_array->length() == length_limit &&
827 is_constant_zero(src_pos)) {
828 flags &= ~LIR_OpArrayCopy::src_range_check;
829 }
830 }
832 NewArray* dst_array = dst->as_NewArray();
833 if (dst_array != NULL) {
834 flags &= ~LIR_OpArrayCopy::dst_null_check;
835 if (length_limit != NULL &&
836 dst_array->length() == length_limit &&
837 is_constant_zero(dst_pos)) {
838 flags &= ~LIR_OpArrayCopy::dst_range_check;
839 }
840 }
842 // check from incoming constant values
843 if (positive_constant(src_pos))
844 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
845 if (positive_constant(dst_pos))
846 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
847 if (positive_constant(length))
848 flags &= ~LIR_OpArrayCopy::length_positive_check;
850 // see if the range check can be elided, which might also imply
851 // that src or dst is non-null.
852 ArrayLength* al = length->as_ArrayLength();
853 if (al != NULL) {
854 if (al->array() == src) {
855 // it's the length of the source array
856 flags &= ~LIR_OpArrayCopy::length_positive_check;
857 flags &= ~LIR_OpArrayCopy::src_null_check;
858 if (is_constant_zero(src_pos))
859 flags &= ~LIR_OpArrayCopy::src_range_check;
860 }
861 if (al->array() == dst) {
862 // it's the length of the destination array
863 flags &= ~LIR_OpArrayCopy::length_positive_check;
864 flags &= ~LIR_OpArrayCopy::dst_null_check;
865 if (is_constant_zero(dst_pos))
866 flags &= ~LIR_OpArrayCopy::dst_range_check;
867 }
868 }
869 if (is_exact) {
870 flags &= ~LIR_OpArrayCopy::type_check;
871 }
872 }
874 IntConstant* src_int = src_pos->type()->as_IntConstant();
875 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
876 if (src_int && dst_int) {
877 int s_offs = src_int->value();
878 int d_offs = dst_int->value();
879 if (src_int->value() >= dst_int->value()) {
880 flags &= ~LIR_OpArrayCopy::overlapping;
881 }
882 if (expected_type != NULL) {
883 BasicType t = expected_type->element_type()->basic_type();
884 int element_size = type2aelembytes(t);
885 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
886 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
887 flags &= ~LIR_OpArrayCopy::unaligned;
888 }
889 }
890 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
891 // src and dest positions are the same, or dst is zero so assume
892 // nonoverlapping copy.
893 flags &= ~LIR_OpArrayCopy::overlapping;
894 }
896 if (src == dst) {
897 // moving within a single array so no type checks are needed
898 if (flags & LIR_OpArrayCopy::type_check) {
899 flags &= ~LIR_OpArrayCopy::type_check;
900 }
901 }
902 *flagsp = flags;
903 *expected_typep = (ciArrayKlass*)expected_type;
904 }
907 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
908 assert(opr->is_register(), "why spill if item is not register?");
910 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
911 LIR_Opr result = new_register(T_FLOAT);
912 set_vreg_flag(result, must_start_in_memory);
913 assert(opr->is_register(), "only a register can be spilled");
914 assert(opr->value_type()->is_float(), "rounding only for floats available");
915 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
916 return result;
917 }
918 return opr;
919 }
922 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
923 assert(type2size[t] == type2size[value->type()], "size mismatch");
924 if (!value->is_register()) {
925 // force into a register
926 LIR_Opr r = new_register(value->type());
927 __ move(value, r);
928 value = r;
929 }
931 // create a spill location
932 LIR_Opr tmp = new_register(t);
933 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
935 // move from register to spill
936 __ move(value, tmp);
937 return tmp;
938 }
940 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
941 if (if_instr->should_profile()) {
942 ciMethod* method = if_instr->profiled_method();
943 assert(method != NULL, "method should be set if branch is profiled");
944 ciMethodData* md = method->method_data_or_null();
945 assert(md != NULL, "Sanity");
946 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
947 assert(data != NULL, "must have profiling data");
948 assert(data->is_BranchData(), "need BranchData for two-way branches");
949 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
950 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
951 if (if_instr->is_swapped()) {
952 int t = taken_count_offset;
953 taken_count_offset = not_taken_count_offset;
954 not_taken_count_offset = t;
955 }
957 LIR_Opr md_reg = new_register(T_OBJECT);
958 __ oop2reg(md->constant_encoding(), md_reg);
960 LIR_Opr data_offset_reg = new_pointer_register();
961 __ cmove(lir_cond(cond),
962 LIR_OprFact::intptrConst(taken_count_offset),
963 LIR_OprFact::intptrConst(not_taken_count_offset),
964 data_offset_reg, as_BasicType(if_instr->x()->type()));
966 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
967 LIR_Opr data_reg = new_pointer_register();
968 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
969 __ move(data_addr, data_reg);
970 // Use leal instead of add to avoid destroying condition codes on x86
971 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
972 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
973 __ move(data_reg, data_addr);
974 }
975 }
977 // Phi technique:
978 // This is about passing live values from one basic block to the other.
979 // In code generated with Java it is rather rare that more than one
980 // value is on the stack from one basic block to the other.
981 // We optimize our technique for efficient passing of one value
982 // (of type long, int, double..) but it can be extended.
983 // When entering or leaving a basic block, all registers and all spill
984 // slots are release and empty. We use the released registers
985 // and spill slots to pass the live values from one block
986 // to the other. The topmost value, i.e., the value on TOS of expression
987 // stack is passed in registers. All other values are stored in spilling
988 // area. Every Phi has an index which designates its spill slot
989 // At exit of a basic block, we fill the register(s) and spill slots.
990 // At entry of a basic block, the block_prolog sets up the content of phi nodes
991 // and locks necessary registers and spilling slots.
994 // move current value to referenced phi function
995 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
996 Phi* phi = sux_val->as_Phi();
997 // cur_val can be null without phi being null in conjunction with inlining
998 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
999 LIR_Opr operand = cur_val->operand();
1000 if (cur_val->operand()->is_illegal()) {
1001 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1002 "these can be produced lazily");
1003 operand = operand_for_instruction(cur_val);
1004 }
1005 resolver->move(operand, operand_for_instruction(phi));
1006 }
1007 }
1010 // Moves all stack values into their PHI position
1011 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1012 BlockBegin* bb = block();
1013 if (bb->number_of_sux() == 1) {
1014 BlockBegin* sux = bb->sux_at(0);
1015 assert(sux->number_of_preds() > 0, "invalid CFG");
1017 // a block with only one predecessor never has phi functions
1018 if (sux->number_of_preds() > 1) {
1019 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1020 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1022 ValueStack* sux_state = sux->state();
1023 Value sux_value;
1024 int index;
1026 assert(cur_state->scope() == sux_state->scope(), "not matching");
1027 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1028 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1030 for_each_stack_value(sux_state, index, sux_value) {
1031 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1032 }
1034 for_each_local_value(sux_state, index, sux_value) {
1035 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1036 }
1038 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1039 }
1040 }
1041 }
1044 LIR_Opr LIRGenerator::new_register(BasicType type) {
1045 int vreg = _virtual_register_number;
1046 // add a little fudge factor for the bailout, since the bailout is
1047 // only checked periodically. This gives a few extra registers to
1048 // hand out before we really run out, which helps us keep from
1049 // tripping over assertions.
1050 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1051 bailout("out of virtual registers");
1052 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1053 // wrap it around
1054 _virtual_register_number = LIR_OprDesc::vreg_base;
1055 }
1056 }
1057 _virtual_register_number += 1;
1058 return LIR_OprFact::virtual_register(vreg, type);
1059 }
1062 // Try to lock using register in hint
1063 LIR_Opr LIRGenerator::rlock(Value instr) {
1064 return new_register(instr->type());
1065 }
1068 // does an rlock and sets result
1069 LIR_Opr LIRGenerator::rlock_result(Value x) {
1070 LIR_Opr reg = rlock(x);
1071 set_result(x, reg);
1072 return reg;
1073 }
1076 // does an rlock and sets result
1077 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1078 LIR_Opr reg;
1079 switch (type) {
1080 case T_BYTE:
1081 case T_BOOLEAN:
1082 reg = rlock_byte(type);
1083 break;
1084 default:
1085 reg = rlock(x);
1086 break;
1087 }
1089 set_result(x, reg);
1090 return reg;
1091 }
1094 //---------------------------------------------------------------------
1095 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1096 ObjectType* oc = value->type()->as_ObjectType();
1097 if (oc) {
1098 return oc->constant_value();
1099 }
1100 return NULL;
1101 }
1104 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1105 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1106 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1108 // no moves are created for phi functions at the begin of exception
1109 // handlers, so assign operands manually here
1110 for_each_phi_fun(block(), phi,
1111 operand_for_instruction(phi));
1113 LIR_Opr thread_reg = getThreadPointer();
1114 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1115 exceptionOopOpr());
1116 __ move_wide(LIR_OprFact::oopConst(NULL),
1117 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1118 __ move_wide(LIR_OprFact::oopConst(NULL),
1119 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1121 LIR_Opr result = new_register(T_OBJECT);
1122 __ move(exceptionOopOpr(), result);
1123 set_result(x, result);
1124 }
1127 //----------------------------------------------------------------------
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 // visitor functions
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1137 void LIRGenerator::do_Phi(Phi* x) {
1138 // phi functions are never visited directly
1139 ShouldNotReachHere();
1140 }
1143 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1144 void LIRGenerator::do_Constant(Constant* x) {
1145 if (x->state_before() != NULL) {
1146 // Any constant with a ValueStack requires patching so emit the patch here
1147 LIR_Opr reg = rlock_result(x);
1148 CodeEmitInfo* info = state_for(x, x->state_before());
1149 __ oop2reg_patch(NULL, reg, info);
1150 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1151 if (!x->is_pinned()) {
1152 // unpinned constants are handled specially so that they can be
1153 // put into registers when they are used multiple times within a
1154 // block. After the block completes their operand will be
1155 // cleared so that other blocks can't refer to that register.
1156 set_result(x, load_constant(x));
1157 } else {
1158 LIR_Opr res = x->operand();
1159 if (!res->is_valid()) {
1160 res = LIR_OprFact::value_type(x->type());
1161 }
1162 if (res->is_constant()) {
1163 LIR_Opr reg = rlock_result(x);
1164 __ move(res, reg);
1165 } else {
1166 set_result(x, res);
1167 }
1168 }
1169 } else {
1170 set_result(x, LIR_OprFact::value_type(x->type()));
1171 }
1172 }
1175 void LIRGenerator::do_Local(Local* x) {
1176 // operand_for_instruction has the side effect of setting the result
1177 // so there's no need to do it here.
1178 operand_for_instruction(x);
1179 }
1182 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1183 Unimplemented();
1184 }
1187 void LIRGenerator::do_Return(Return* x) {
1188 if (compilation()->env()->dtrace_method_probes()) {
1189 BasicTypeList signature;
1190 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1191 signature.append(T_OBJECT); // methodOop
1192 LIR_OprList* args = new LIR_OprList();
1193 args->append(getThreadPointer());
1194 LIR_Opr meth = new_register(T_OBJECT);
1195 __ oop2reg(method()->constant_encoding(), meth);
1196 args->append(meth);
1197 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1198 }
1200 if (x->type()->is_void()) {
1201 __ return_op(LIR_OprFact::illegalOpr);
1202 } else {
1203 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1204 LIRItem result(x->result(), this);
1206 result.load_item_force(reg);
1207 __ return_op(result.result());
1208 }
1209 set_no_result(x);
1210 }
1212 // Examble: ref.get()
1213 // Combination of LoadField and g1 pre-write barrier
1214 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1216 const int referent_offset = java_lang_ref_Reference::referent_offset;
1217 guarantee(referent_offset > 0, "referent offset not initialized");
1219 assert(x->number_of_arguments() == 1, "wrong type");
1221 LIRItem reference(x->argument_at(0), this);
1222 reference.load_item();
1224 // need to perform the null check on the reference objecy
1225 CodeEmitInfo* info = NULL;
1226 if (x->needs_null_check()) {
1227 info = state_for(x);
1228 }
1230 LIR_Address* referent_field_adr =
1231 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1233 LIR_Opr result = rlock_result(x);
1235 __ load(referent_field_adr, result, info);
1237 // Register the value in the referent field with the pre-barrier
1238 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1239 result /* pre_val */,
1240 false /* do_load */,
1241 false /* patch */,
1242 NULL /* info */);
1243 }
1245 // Example: object.getClass ()
1246 void LIRGenerator::do_getClass(Intrinsic* x) {
1247 assert(x->number_of_arguments() == 1, "wrong type");
1249 LIRItem rcvr(x->argument_at(0), this);
1250 rcvr.load_item();
1251 LIR_Opr result = rlock_result(x);
1253 // need to perform the null check on the rcvr
1254 CodeEmitInfo* info = NULL;
1255 if (x->needs_null_check()) {
1256 info = state_for(x);
1257 }
1258 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
1259 __ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
1260 klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
1261 }
1264 // Example: Thread.currentThread()
1265 void LIRGenerator::do_currentThread(Intrinsic* x) {
1266 assert(x->number_of_arguments() == 0, "wrong type");
1267 LIR_Opr reg = rlock_result(x);
1268 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1269 }
1272 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1273 assert(x->number_of_arguments() == 1, "wrong type");
1274 LIRItem receiver(x->argument_at(0), this);
1276 receiver.load_item();
1277 BasicTypeList signature;
1278 signature.append(T_OBJECT); // receiver
1279 LIR_OprList* args = new LIR_OprList();
1280 args->append(receiver.result());
1281 CodeEmitInfo* info = state_for(x, x->state());
1282 call_runtime(&signature, args,
1283 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1284 voidType, info);
1286 set_no_result(x);
1287 }
1290 //------------------------local access--------------------------------------
1292 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1293 if (x->operand()->is_illegal()) {
1294 Constant* c = x->as_Constant();
1295 if (c != NULL) {
1296 x->set_operand(LIR_OprFact::value_type(c->type()));
1297 } else {
1298 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1299 // allocate a virtual register for this local or phi
1300 x->set_operand(rlock(x));
1301 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1302 }
1303 }
1304 return x->operand();
1305 }
1308 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1309 if (opr->is_virtual()) {
1310 return instruction_for_vreg(opr->vreg_number());
1311 }
1312 return NULL;
1313 }
1316 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1317 if (reg_num < _instruction_for_operand.length()) {
1318 return _instruction_for_operand.at(reg_num);
1319 }
1320 return NULL;
1321 }
1324 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1325 if (_vreg_flags.size_in_bits() == 0) {
1326 BitMap2D temp(100, num_vreg_flags);
1327 temp.clear();
1328 _vreg_flags = temp;
1329 }
1330 _vreg_flags.at_put_grow(vreg_num, f, true);
1331 }
1333 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1334 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1335 return false;
1336 }
1337 return _vreg_flags.at(vreg_num, f);
1338 }
1341 // Block local constant handling. This code is useful for keeping
1342 // unpinned constants and constants which aren't exposed in the IR in
1343 // registers. Unpinned Constant instructions have their operands
1344 // cleared when the block is finished so that other blocks can't end
1345 // up referring to their registers.
1347 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1348 assert(!x->is_pinned(), "only for unpinned constants");
1349 _unpinned_constants.append(x);
1350 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1351 }
1354 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1355 BasicType t = c->type();
1356 for (int i = 0; i < _constants.length(); i++) {
1357 LIR_Const* other = _constants.at(i);
1358 if (t == other->type()) {
1359 switch (t) {
1360 case T_INT:
1361 case T_FLOAT:
1362 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1363 break;
1364 case T_LONG:
1365 case T_DOUBLE:
1366 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1367 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1368 break;
1369 case T_OBJECT:
1370 if (c->as_jobject() != other->as_jobject()) continue;
1371 break;
1372 }
1373 return _reg_for_constants.at(i);
1374 }
1375 }
1377 LIR_Opr result = new_register(t);
1378 __ move((LIR_Opr)c, result);
1379 _constants.append(c);
1380 _reg_for_constants.append(result);
1381 return result;
1382 }
1384 // Various barriers
1386 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1387 bool do_load, bool patch, CodeEmitInfo* info) {
1388 // Do the pre-write barrier, if any.
1389 switch (_bs->kind()) {
1390 #ifndef SERIALGC
1391 case BarrierSet::G1SATBCT:
1392 case BarrierSet::G1SATBCTLogging:
1393 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1394 break;
1395 #endif // SERIALGC
1396 case BarrierSet::CardTableModRef:
1397 case BarrierSet::CardTableExtension:
1398 // No pre barriers
1399 break;
1400 case BarrierSet::ModRef:
1401 case BarrierSet::Other:
1402 // No pre barriers
1403 break;
1404 default :
1405 ShouldNotReachHere();
1407 }
1408 }
1410 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1411 switch (_bs->kind()) {
1412 #ifndef SERIALGC
1413 case BarrierSet::G1SATBCT:
1414 case BarrierSet::G1SATBCTLogging:
1415 G1SATBCardTableModRef_post_barrier(addr, new_val);
1416 break;
1417 #endif // SERIALGC
1418 case BarrierSet::CardTableModRef:
1419 case BarrierSet::CardTableExtension:
1420 CardTableModRef_post_barrier(addr, new_val);
1421 break;
1422 case BarrierSet::ModRef:
1423 case BarrierSet::Other:
1424 // No post barriers
1425 break;
1426 default :
1427 ShouldNotReachHere();
1428 }
1429 }
1431 ////////////////////////////////////////////////////////////////////////
1432 #ifndef SERIALGC
1434 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1435 bool do_load, bool patch, CodeEmitInfo* info) {
1436 // First we test whether marking is in progress.
1437 BasicType flag_type;
1438 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1439 flag_type = T_INT;
1440 } else {
1441 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1442 "Assumption");
1443 flag_type = T_BYTE;
1444 }
1445 LIR_Opr thrd = getThreadPointer();
1446 LIR_Address* mark_active_flag_addr =
1447 new LIR_Address(thrd,
1448 in_bytes(JavaThread::satb_mark_queue_offset() +
1449 PtrQueue::byte_offset_of_active()),
1450 flag_type);
1451 // Read the marking-in-progress flag.
1452 LIR_Opr flag_val = new_register(T_INT);
1453 __ load(mark_active_flag_addr, flag_val);
1454 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1456 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1458 CodeStub* slow;
1460 if (do_load) {
1461 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1462 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1464 if (patch)
1465 pre_val_patch_code = lir_patch_normal;
1467 pre_val = new_register(T_OBJECT);
1469 if (!addr_opr->is_address()) {
1470 assert(addr_opr->is_register(), "must be");
1471 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1472 }
1473 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1474 } else {
1475 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1476 assert(pre_val->is_register(), "must be");
1477 assert(pre_val->type() == T_OBJECT, "must be an object");
1478 assert(info == NULL, "sanity");
1480 slow = new G1PreBarrierStub(pre_val);
1481 }
1483 __ branch(lir_cond_notEqual, T_INT, slow);
1484 __ branch_destination(slow->continuation());
1485 }
1487 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1488 // If the "new_val" is a constant NULL, no barrier is necessary.
1489 if (new_val->is_constant() &&
1490 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1492 if (!new_val->is_register()) {
1493 LIR_Opr new_val_reg = new_register(T_OBJECT);
1494 if (new_val->is_constant()) {
1495 __ move(new_val, new_val_reg);
1496 } else {
1497 __ leal(new_val, new_val_reg);
1498 }
1499 new_val = new_val_reg;
1500 }
1501 assert(new_val->is_register(), "must be a register at this point");
1503 if (addr->is_address()) {
1504 LIR_Address* address = addr->as_address_ptr();
1505 LIR_Opr ptr = new_pointer_register();
1506 if (!address->index()->is_valid() && address->disp() == 0) {
1507 __ move(address->base(), ptr);
1508 } else {
1509 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1510 __ leal(addr, ptr);
1511 }
1512 addr = ptr;
1513 }
1514 assert(addr->is_register(), "must be a register at this point");
1516 LIR_Opr xor_res = new_pointer_register();
1517 LIR_Opr xor_shift_res = new_pointer_register();
1518 if (TwoOperandLIRForm ) {
1519 __ move(addr, xor_res);
1520 __ logical_xor(xor_res, new_val, xor_res);
1521 __ move(xor_res, xor_shift_res);
1522 __ unsigned_shift_right(xor_shift_res,
1523 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1524 xor_shift_res,
1525 LIR_OprDesc::illegalOpr());
1526 } else {
1527 __ logical_xor(addr, new_val, xor_res);
1528 __ unsigned_shift_right(xor_res,
1529 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1530 xor_shift_res,
1531 LIR_OprDesc::illegalOpr());
1532 }
1534 if (!new_val->is_register()) {
1535 LIR_Opr new_val_reg = new_register(T_OBJECT);
1536 __ leal(new_val, new_val_reg);
1537 new_val = new_val_reg;
1538 }
1539 assert(new_val->is_register(), "must be a register at this point");
1541 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1543 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1544 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1545 __ branch_destination(slow->continuation());
1546 }
1548 #endif // SERIALGC
1549 ////////////////////////////////////////////////////////////////////////
1551 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1553 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1554 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1555 if (addr->is_address()) {
1556 LIR_Address* address = addr->as_address_ptr();
1557 // ptr cannot be an object because we use this barrier for array card marks
1558 // and addr can point in the middle of an array.
1559 LIR_Opr ptr = new_pointer_register();
1560 if (!address->index()->is_valid() && address->disp() == 0) {
1561 __ move(address->base(), ptr);
1562 } else {
1563 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1564 __ leal(addr, ptr);
1565 }
1566 addr = ptr;
1567 }
1568 assert(addr->is_register(), "must be a register at this point");
1570 #ifdef ARM
1571 // TODO: ARM - move to platform-dependent code
1572 LIR_Opr tmp = FrameMap::R14_opr;
1573 if (VM_Version::supports_movw()) {
1574 __ move((LIR_Opr)card_table_base, tmp);
1575 } else {
1576 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1577 }
1579 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1580 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1581 if(((int)ct->byte_map_base & 0xff) == 0) {
1582 __ move(tmp, card_addr);
1583 } else {
1584 LIR_Opr tmp_zero = new_register(T_INT);
1585 __ move(LIR_OprFact::intConst(0), tmp_zero);
1586 __ move(tmp_zero, card_addr);
1587 }
1588 #else // ARM
1589 LIR_Opr tmp = new_pointer_register();
1590 if (TwoOperandLIRForm) {
1591 __ move(addr, tmp);
1592 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1593 } else {
1594 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1595 }
1596 if (can_inline_as_constant(card_table_base)) {
1597 __ move(LIR_OprFact::intConst(0),
1598 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1599 } else {
1600 __ move(LIR_OprFact::intConst(0),
1601 new LIR_Address(tmp, load_constant(card_table_base),
1602 T_BYTE));
1603 }
1604 #endif // ARM
1605 }
1608 //------------------------field access--------------------------------------
1610 // Comment copied form templateTable_i486.cpp
1611 // ----------------------------------------------------------------------------
1612 // Volatile variables demand their effects be made known to all CPU's in
1613 // order. Store buffers on most chips allow reads & writes to reorder; the
1614 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1615 // memory barrier (i.e., it's not sufficient that the interpreter does not
1616 // reorder volatile references, the hardware also must not reorder them).
1617 //
1618 // According to the new Java Memory Model (JMM):
1619 // (1) All volatiles are serialized wrt to each other.
1620 // ALSO reads & writes act as aquire & release, so:
1621 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1622 // the read float up to before the read. It's OK for non-volatile memory refs
1623 // that happen before the volatile read to float down below it.
1624 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1625 // that happen BEFORE the write float down to after the write. It's OK for
1626 // non-volatile memory refs that happen after the volatile write to float up
1627 // before it.
1628 //
1629 // We only put in barriers around volatile refs (they are expensive), not
1630 // _between_ memory refs (that would require us to track the flavor of the
1631 // previous memory refs). Requirements (2) and (3) require some barriers
1632 // before volatile stores and after volatile loads. These nearly cover
1633 // requirement (1) but miss the volatile-store-volatile-load case. This final
1634 // case is placed after volatile-stores although it could just as well go
1635 // before volatile-loads.
1638 void LIRGenerator::do_StoreField(StoreField* x) {
1639 bool needs_patching = x->needs_patching();
1640 bool is_volatile = x->field()->is_volatile();
1641 BasicType field_type = x->field_type();
1642 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1644 CodeEmitInfo* info = NULL;
1645 if (needs_patching) {
1646 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1647 info = state_for(x, x->state_before());
1648 } else if (x->needs_null_check()) {
1649 NullCheck* nc = x->explicit_null_check();
1650 if (nc == NULL) {
1651 info = state_for(x);
1652 } else {
1653 info = state_for(nc);
1654 }
1655 }
1658 LIRItem object(x->obj(), this);
1659 LIRItem value(x->value(), this);
1661 object.load_item();
1663 if (is_volatile || needs_patching) {
1664 // load item if field is volatile (fewer special cases for volatiles)
1665 // load item if field not initialized
1666 // load item if field not constant
1667 // because of code patching we cannot inline constants
1668 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1669 value.load_byte_item();
1670 } else {
1671 value.load_item();
1672 }
1673 } else {
1674 value.load_for_store(field_type);
1675 }
1677 set_no_result(x);
1679 #ifndef PRODUCT
1680 if (PrintNotLoaded && needs_patching) {
1681 tty->print_cr(" ###class not loaded at store_%s bci %d",
1682 x->is_static() ? "static" : "field", x->printable_bci());
1683 }
1684 #endif
1686 if (x->needs_null_check() &&
1687 (needs_patching ||
1688 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1689 // emit an explicit null check because the offset is too large
1690 __ null_check(object.result(), new CodeEmitInfo(info));
1691 }
1693 LIR_Address* address;
1694 if (needs_patching) {
1695 // we need to patch the offset in the instruction so don't allow
1696 // generate_address to try to be smart about emitting the -1.
1697 // Otherwise the patching code won't know how to find the
1698 // instruction to patch.
1699 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1700 } else {
1701 address = generate_address(object.result(), x->offset(), field_type);
1702 }
1704 if (is_volatile && os::is_MP()) {
1705 __ membar_release();
1706 }
1708 if (is_oop) {
1709 // Do the pre-write barrier, if any.
1710 pre_barrier(LIR_OprFact::address(address),
1711 LIR_OprFact::illegalOpr /* pre_val */,
1712 true /* do_load*/,
1713 needs_patching,
1714 (info ? new CodeEmitInfo(info) : NULL));
1715 }
1717 if (is_volatile && !needs_patching) {
1718 volatile_field_store(value.result(), address, info);
1719 } else {
1720 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1721 __ store(value.result(), address, info, patch_code);
1722 }
1724 if (is_oop) {
1725 // Store to object so mark the card of the header
1726 post_barrier(object.result(), value.result());
1727 }
1729 if (is_volatile && os::is_MP()) {
1730 __ membar();
1731 }
1732 }
1735 void LIRGenerator::do_LoadField(LoadField* x) {
1736 bool needs_patching = x->needs_patching();
1737 bool is_volatile = x->field()->is_volatile();
1738 BasicType field_type = x->field_type();
1740 CodeEmitInfo* info = NULL;
1741 if (needs_patching) {
1742 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1743 info = state_for(x, x->state_before());
1744 } else if (x->needs_null_check()) {
1745 NullCheck* nc = x->explicit_null_check();
1746 if (nc == NULL) {
1747 info = state_for(x);
1748 } else {
1749 info = state_for(nc);
1750 }
1751 }
1753 LIRItem object(x->obj(), this);
1755 object.load_item();
1757 #ifndef PRODUCT
1758 if (PrintNotLoaded && needs_patching) {
1759 tty->print_cr(" ###class not loaded at load_%s bci %d",
1760 x->is_static() ? "static" : "field", x->printable_bci());
1761 }
1762 #endif
1764 if (x->needs_null_check() &&
1765 (needs_patching ||
1766 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1767 // emit an explicit null check because the offset is too large
1768 __ null_check(object.result(), new CodeEmitInfo(info));
1769 }
1771 LIR_Opr reg = rlock_result(x, field_type);
1772 LIR_Address* address;
1773 if (needs_patching) {
1774 // we need to patch the offset in the instruction so don't allow
1775 // generate_address to try to be smart about emitting the -1.
1776 // Otherwise the patching code won't know how to find the
1777 // instruction to patch.
1778 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1779 } else {
1780 address = generate_address(object.result(), x->offset(), field_type);
1781 }
1783 if (is_volatile && !needs_patching) {
1784 volatile_field_load(address, reg, info);
1785 } else {
1786 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1787 __ load(address, reg, info, patch_code);
1788 }
1790 if (is_volatile && os::is_MP()) {
1791 __ membar_acquire();
1792 }
1793 }
1796 //------------------------java.nio.Buffer.checkIndex------------------------
1798 // int java.nio.Buffer.checkIndex(int)
1799 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1800 // NOTE: by the time we are in checkIndex() we are guaranteed that
1801 // the buffer is non-null (because checkIndex is package-private and
1802 // only called from within other methods in the buffer).
1803 assert(x->number_of_arguments() == 2, "wrong type");
1804 LIRItem buf (x->argument_at(0), this);
1805 LIRItem index(x->argument_at(1), this);
1806 buf.load_item();
1807 index.load_item();
1809 LIR_Opr result = rlock_result(x);
1810 if (GenerateRangeChecks) {
1811 CodeEmitInfo* info = state_for(x);
1812 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1813 if (index.result()->is_constant()) {
1814 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1815 __ branch(lir_cond_belowEqual, T_INT, stub);
1816 } else {
1817 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1818 java_nio_Buffer::limit_offset(), T_INT, info);
1819 __ branch(lir_cond_aboveEqual, T_INT, stub);
1820 }
1821 __ move(index.result(), result);
1822 } else {
1823 // Just load the index into the result register
1824 __ move(index.result(), result);
1825 }
1826 }
1829 //------------------------array access--------------------------------------
1832 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1833 LIRItem array(x->array(), this);
1834 array.load_item();
1835 LIR_Opr reg = rlock_result(x);
1837 CodeEmitInfo* info = NULL;
1838 if (x->needs_null_check()) {
1839 NullCheck* nc = x->explicit_null_check();
1840 if (nc == NULL) {
1841 info = state_for(x);
1842 } else {
1843 info = state_for(nc);
1844 }
1845 }
1846 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1847 }
1850 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1851 bool use_length = x->length() != NULL;
1852 LIRItem array(x->array(), this);
1853 LIRItem index(x->index(), this);
1854 LIRItem length(this);
1855 bool needs_range_check = true;
1857 if (use_length) {
1858 needs_range_check = x->compute_needs_range_check();
1859 if (needs_range_check) {
1860 length.set_instruction(x->length());
1861 length.load_item();
1862 }
1863 }
1865 array.load_item();
1866 if (index.is_constant() && can_inline_as_constant(x->index())) {
1867 // let it be a constant
1868 index.dont_load_item();
1869 } else {
1870 index.load_item();
1871 }
1873 CodeEmitInfo* range_check_info = state_for(x);
1874 CodeEmitInfo* null_check_info = NULL;
1875 if (x->needs_null_check()) {
1876 NullCheck* nc = x->explicit_null_check();
1877 if (nc != NULL) {
1878 null_check_info = state_for(nc);
1879 } else {
1880 null_check_info = range_check_info;
1881 }
1882 }
1884 // emit array address setup early so it schedules better
1885 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1887 if (GenerateRangeChecks && needs_range_check) {
1888 if (use_length) {
1889 // TODO: use a (modified) version of array_range_check that does not require a
1890 // constant length to be loaded to a register
1891 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1892 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1893 } else {
1894 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1895 // The range check performs the null check, so clear it out for the load
1896 null_check_info = NULL;
1897 }
1898 }
1900 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1901 }
1904 void LIRGenerator::do_NullCheck(NullCheck* x) {
1905 if (x->can_trap()) {
1906 LIRItem value(x->obj(), this);
1907 value.load_item();
1908 CodeEmitInfo* info = state_for(x);
1909 __ null_check(value.result(), info);
1910 }
1911 }
1914 void LIRGenerator::do_Throw(Throw* x) {
1915 LIRItem exception(x->exception(), this);
1916 exception.load_item();
1917 set_no_result(x);
1918 LIR_Opr exception_opr = exception.result();
1919 CodeEmitInfo* info = state_for(x, x->state());
1921 #ifndef PRODUCT
1922 if (PrintC1Statistics) {
1923 increment_counter(Runtime1::throw_count_address(), T_INT);
1924 }
1925 #endif
1927 // check if the instruction has an xhandler in any of the nested scopes
1928 bool unwind = false;
1929 if (info->exception_handlers()->length() == 0) {
1930 // this throw is not inside an xhandler
1931 unwind = true;
1932 } else {
1933 // get some idea of the throw type
1934 bool type_is_exact = true;
1935 ciType* throw_type = x->exception()->exact_type();
1936 if (throw_type == NULL) {
1937 type_is_exact = false;
1938 throw_type = x->exception()->declared_type();
1939 }
1940 if (throw_type != NULL && throw_type->is_instance_klass()) {
1941 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1942 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1943 }
1944 }
1946 // do null check before moving exception oop into fixed register
1947 // to avoid a fixed interval with an oop during the null check.
1948 // Use a copy of the CodeEmitInfo because debug information is
1949 // different for null_check and throw.
1950 if (GenerateCompilerNullChecks &&
1951 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1952 // if the exception object wasn't created using new then it might be null.
1953 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1954 }
1956 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1957 // we need to go through the exception lookup path to get JVMTI
1958 // notification done
1959 unwind = false;
1960 }
1962 // move exception oop into fixed register
1963 __ move(exception_opr, exceptionOopOpr());
1965 if (unwind) {
1966 __ unwind_exception(exceptionOopOpr());
1967 } else {
1968 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
1969 }
1970 }
1973 void LIRGenerator::do_RoundFP(RoundFP* x) {
1974 LIRItem input(x->input(), this);
1975 input.load_item();
1976 LIR_Opr input_opr = input.result();
1977 assert(input_opr->is_register(), "why round if value is not in a register?");
1978 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
1979 if (input_opr->is_single_fpu()) {
1980 set_result(x, round_item(input_opr)); // This code path not currently taken
1981 } else {
1982 LIR_Opr result = new_register(T_DOUBLE);
1983 set_vreg_flag(result, must_start_in_memory);
1984 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
1985 set_result(x, result);
1986 }
1987 }
1989 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
1990 LIRItem base(x->base(), this);
1991 LIRItem idx(this);
1993 base.load_item();
1994 if (x->has_index()) {
1995 idx.set_instruction(x->index());
1996 idx.load_nonconstant();
1997 }
1999 LIR_Opr reg = rlock_result(x, x->basic_type());
2001 int log2_scale = 0;
2002 if (x->has_index()) {
2003 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2004 log2_scale = x->log2_scale();
2005 }
2007 assert(!x->has_index() || idx.value() == x->index(), "should match");
2009 LIR_Opr base_op = base.result();
2010 #ifndef _LP64
2011 if (x->base()->type()->tag() == longTag) {
2012 base_op = new_register(T_INT);
2013 __ convert(Bytecodes::_l2i, base.result(), base_op);
2014 } else {
2015 assert(x->base()->type()->tag() == intTag, "must be");
2016 }
2017 #endif
2019 BasicType dst_type = x->basic_type();
2020 LIR_Opr index_op = idx.result();
2022 LIR_Address* addr;
2023 if (index_op->is_constant()) {
2024 assert(log2_scale == 0, "must not have a scale");
2025 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2026 } else {
2027 #ifdef X86
2028 #ifdef _LP64
2029 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2030 LIR_Opr tmp = new_pointer_register();
2031 __ convert(Bytecodes::_i2l, index_op, tmp);
2032 index_op = tmp;
2033 }
2034 #endif
2035 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2036 #elif defined(ARM)
2037 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2038 #else
2039 if (index_op->is_illegal() || log2_scale == 0) {
2040 #ifdef _LP64
2041 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2042 LIR_Opr tmp = new_pointer_register();
2043 __ convert(Bytecodes::_i2l, index_op, tmp);
2044 index_op = tmp;
2045 }
2046 #endif
2047 addr = new LIR_Address(base_op, index_op, dst_type);
2048 } else {
2049 LIR_Opr tmp = new_pointer_register();
2050 __ shift_left(index_op, log2_scale, tmp);
2051 addr = new LIR_Address(base_op, tmp, dst_type);
2052 }
2053 #endif
2054 }
2056 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2057 __ unaligned_move(addr, reg);
2058 } else {
2059 if (dst_type == T_OBJECT && x->is_wide()) {
2060 __ move_wide(addr, reg);
2061 } else {
2062 __ move(addr, reg);
2063 }
2064 }
2065 }
2068 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2069 int log2_scale = 0;
2070 BasicType type = x->basic_type();
2072 if (x->has_index()) {
2073 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2074 log2_scale = x->log2_scale();
2075 }
2077 LIRItem base(x->base(), this);
2078 LIRItem value(x->value(), this);
2079 LIRItem idx(this);
2081 base.load_item();
2082 if (x->has_index()) {
2083 idx.set_instruction(x->index());
2084 idx.load_item();
2085 }
2087 if (type == T_BYTE || type == T_BOOLEAN) {
2088 value.load_byte_item();
2089 } else {
2090 value.load_item();
2091 }
2093 set_no_result(x);
2095 LIR_Opr base_op = base.result();
2096 #ifndef _LP64
2097 if (x->base()->type()->tag() == longTag) {
2098 base_op = new_register(T_INT);
2099 __ convert(Bytecodes::_l2i, base.result(), base_op);
2100 } else {
2101 assert(x->base()->type()->tag() == intTag, "must be");
2102 }
2103 #endif
2105 LIR_Opr index_op = idx.result();
2106 if (log2_scale != 0) {
2107 // temporary fix (platform dependent code without shift on Intel would be better)
2108 index_op = new_pointer_register();
2109 #ifdef _LP64
2110 if(idx.result()->type() == T_INT) {
2111 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2112 } else {
2113 #endif
2114 // TODO: ARM also allows embedded shift in the address
2115 __ move(idx.result(), index_op);
2116 #ifdef _LP64
2117 }
2118 #endif
2119 __ shift_left(index_op, log2_scale, index_op);
2120 }
2121 #ifdef _LP64
2122 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
2123 LIR_Opr tmp = new_pointer_register();
2124 __ convert(Bytecodes::_i2l, index_op, tmp);
2125 index_op = tmp;
2126 }
2127 #endif
2129 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2130 __ move(value.result(), addr);
2131 }
2134 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2135 BasicType type = x->basic_type();
2136 LIRItem src(x->object(), this);
2137 LIRItem off(x->offset(), this);
2139 off.load_item();
2140 src.load_item();
2142 LIR_Opr reg = rlock_result(x, x->basic_type());
2144 get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
2146 #ifndef SERIALGC
2147 // We might be reading the value of the referent field of a
2148 // Reference object in order to attach it back to the live
2149 // object graph. If G1 is enabled then we need to record
2150 // the value that is being returned in an SATB log buffer.
2151 //
2152 // We need to generate code similar to the following...
2153 //
2154 // if (offset == java_lang_ref_Reference::referent_offset) {
2155 // if (src != NULL) {
2156 // if (klass(src)->reference_type() != REF_NONE) {
2157 // pre_barrier(..., reg, ...);
2158 // }
2159 // }
2160 // }
2161 //
2162 // The first non-constant check of either the offset or
2163 // the src operand will be done here; the remainder
2164 // will take place in the generated code stub.
2166 if (UseG1GC && type == T_OBJECT) {
2167 bool gen_code_stub = true; // Assume we need to generate the slow code stub.
2168 bool gen_offset_check = true; // Assume the code stub has to generate the offset guard.
2169 bool gen_source_check = true; // Assume the code stub has to check the src object for null.
2171 if (off.is_constant()) {
2172 jlong off_con = (off.type()->is_int() ?
2173 (jlong) off.get_jint_constant() :
2174 off.get_jlong_constant());
2177 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2178 // The constant offset is something other than referent_offset.
2179 // We can skip generating/checking the remaining guards and
2180 // skip generation of the code stub.
2181 gen_code_stub = false;
2182 } else {
2183 // The constant offset is the same as referent_offset -
2184 // we do not need to generate a runtime offset check.
2185 gen_offset_check = false;
2186 }
2187 }
2189 // We don't need to generate stub if the source object is an array
2190 if (gen_code_stub && src.type()->is_array()) {
2191 gen_code_stub = false;
2192 }
2194 if (gen_code_stub) {
2195 // We still need to continue with the checks.
2196 if (src.is_constant()) {
2197 ciObject* src_con = src.get_jobject_constant();
2199 if (src_con->is_null_object()) {
2200 // The constant src object is null - We can skip
2201 // generating the code stub.
2202 gen_code_stub = false;
2203 } else {
2204 // Non-null constant source object. We still have to generate
2205 // the slow stub - but we don't need to generate the runtime
2206 // null object check.
2207 gen_source_check = false;
2208 }
2209 }
2210 }
2212 if (gen_code_stub) {
2213 // Temoraries.
2214 LIR_Opr src_klass = new_register(T_OBJECT);
2216 // Get the thread pointer for the pre-barrier
2217 LIR_Opr thread = getThreadPointer();
2219 CodeStub* stub;
2221 // We can have generate one runtime check here. Let's start with
2222 // the offset check.
2223 if (gen_offset_check) {
2224 // if (offset == referent_offset) -> slow code stub
2225 // If offset is an int then we can do the comparison with the
2226 // referent_offset constant; otherwise we need to move
2227 // referent_offset into a temporary register and generate
2228 // a reg-reg compare.
2230 LIR_Opr referent_off;
2232 if (off.type()->is_int()) {
2233 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2234 } else {
2235 assert(off.type()->is_long(), "what else?");
2236 referent_off = new_register(T_LONG);
2237 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2238 }
2240 __ cmp(lir_cond_equal, off.result(), referent_off);
2242 // Optionally generate "src == null" check.
2243 stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
2244 src_klass, thread,
2245 gen_source_check);
2247 __ branch(lir_cond_equal, as_BasicType(off.type()), stub);
2248 } else {
2249 if (gen_source_check) {
2250 // offset is a const and equals referent offset
2251 // if (source != null) -> slow code stub
2252 __ cmp(lir_cond_notEqual, src.result(), LIR_OprFact::oopConst(NULL));
2254 // Since we are generating the "if src == null" guard here,
2255 // there is no need to generate the "src == null" check again.
2256 stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
2257 src_klass, thread,
2258 false);
2260 __ branch(lir_cond_notEqual, T_OBJECT, stub);
2261 } else {
2262 // We have statically determined that offset == referent_offset
2263 // && src != null so we unconditionally branch to code stub
2264 // to perform the guards and record reg in the SATB log buffer.
2266 stub = new G1UnsafeGetObjSATBBarrierStub(reg, src.result(),
2267 src_klass, thread,
2268 false);
2270 __ branch(lir_cond_always, T_ILLEGAL, stub);
2271 }
2272 }
2274 // Continuation point
2275 __ branch_destination(stub->continuation());
2276 }
2277 }
2278 #endif // SERIALGC
2280 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2281 }
2284 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2285 BasicType type = x->basic_type();
2286 LIRItem src(x->object(), this);
2287 LIRItem off(x->offset(), this);
2288 LIRItem data(x->value(), this);
2290 src.load_item();
2291 if (type == T_BOOLEAN || type == T_BYTE) {
2292 data.load_byte_item();
2293 } else {
2294 data.load_item();
2295 }
2296 off.load_item();
2298 set_no_result(x);
2300 if (x->is_volatile() && os::is_MP()) __ membar_release();
2301 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2302 if (x->is_volatile() && os::is_MP()) __ membar();
2303 }
2306 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2307 LIRItem src(x->object(), this);
2308 LIRItem off(x->offset(), this);
2310 src.load_item();
2311 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2312 // let it be a constant
2313 off.dont_load_item();
2314 } else {
2315 off.load_item();
2316 }
2318 set_no_result(x);
2320 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2321 __ prefetch(addr, is_store);
2322 }
2325 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2326 do_UnsafePrefetch(x, false);
2327 }
2330 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2331 do_UnsafePrefetch(x, true);
2332 }
2335 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2336 int lng = x->length();
2338 for (int i = 0; i < lng; i++) {
2339 SwitchRange* one_range = x->at(i);
2340 int low_key = one_range->low_key();
2341 int high_key = one_range->high_key();
2342 BlockBegin* dest = one_range->sux();
2343 if (low_key == high_key) {
2344 __ cmp(lir_cond_equal, value, low_key);
2345 __ branch(lir_cond_equal, T_INT, dest);
2346 } else if (high_key - low_key == 1) {
2347 __ cmp(lir_cond_equal, value, low_key);
2348 __ branch(lir_cond_equal, T_INT, dest);
2349 __ cmp(lir_cond_equal, value, high_key);
2350 __ branch(lir_cond_equal, T_INT, dest);
2351 } else {
2352 LabelObj* L = new LabelObj();
2353 __ cmp(lir_cond_less, value, low_key);
2354 __ branch(lir_cond_less, L->label());
2355 __ cmp(lir_cond_lessEqual, value, high_key);
2356 __ branch(lir_cond_lessEqual, T_INT, dest);
2357 __ branch_destination(L->label());
2358 }
2359 }
2360 __ jump(default_sux);
2361 }
2364 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2365 SwitchRangeList* res = new SwitchRangeList();
2366 int len = x->length();
2367 if (len > 0) {
2368 BlockBegin* sux = x->sux_at(0);
2369 int key = x->lo_key();
2370 BlockBegin* default_sux = x->default_sux();
2371 SwitchRange* range = new SwitchRange(key, sux);
2372 for (int i = 0; i < len; i++, key++) {
2373 BlockBegin* new_sux = x->sux_at(i);
2374 if (sux == new_sux) {
2375 // still in same range
2376 range->set_high_key(key);
2377 } else {
2378 // skip tests which explicitly dispatch to the default
2379 if (sux != default_sux) {
2380 res->append(range);
2381 }
2382 range = new SwitchRange(key, new_sux);
2383 }
2384 sux = new_sux;
2385 }
2386 if (res->length() == 0 || res->last() != range) res->append(range);
2387 }
2388 return res;
2389 }
2392 // we expect the keys to be sorted by increasing value
2393 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2394 SwitchRangeList* res = new SwitchRangeList();
2395 int len = x->length();
2396 if (len > 0) {
2397 BlockBegin* default_sux = x->default_sux();
2398 int key = x->key_at(0);
2399 BlockBegin* sux = x->sux_at(0);
2400 SwitchRange* range = new SwitchRange(key, sux);
2401 for (int i = 1; i < len; i++) {
2402 int new_key = x->key_at(i);
2403 BlockBegin* new_sux = x->sux_at(i);
2404 if (key+1 == new_key && sux == new_sux) {
2405 // still in same range
2406 range->set_high_key(new_key);
2407 } else {
2408 // skip tests which explicitly dispatch to the default
2409 if (range->sux() != default_sux) {
2410 res->append(range);
2411 }
2412 range = new SwitchRange(new_key, new_sux);
2413 }
2414 key = new_key;
2415 sux = new_sux;
2416 }
2417 if (res->length() == 0 || res->last() != range) res->append(range);
2418 }
2419 return res;
2420 }
2423 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2424 LIRItem tag(x->tag(), this);
2425 tag.load_item();
2426 set_no_result(x);
2428 if (x->is_safepoint()) {
2429 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2430 }
2432 // move values into phi locations
2433 move_to_phi(x->state());
2435 int lo_key = x->lo_key();
2436 int hi_key = x->hi_key();
2437 int len = x->length();
2438 LIR_Opr value = tag.result();
2439 if (UseTableRanges) {
2440 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2441 } else {
2442 for (int i = 0; i < len; i++) {
2443 __ cmp(lir_cond_equal, value, i + lo_key);
2444 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2445 }
2446 __ jump(x->default_sux());
2447 }
2448 }
2451 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2452 LIRItem tag(x->tag(), this);
2453 tag.load_item();
2454 set_no_result(x);
2456 if (x->is_safepoint()) {
2457 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2458 }
2460 // move values into phi locations
2461 move_to_phi(x->state());
2463 LIR_Opr value = tag.result();
2464 if (UseTableRanges) {
2465 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2466 } else {
2467 int len = x->length();
2468 for (int i = 0; i < len; i++) {
2469 __ cmp(lir_cond_equal, value, x->key_at(i));
2470 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2471 }
2472 __ jump(x->default_sux());
2473 }
2474 }
2477 void LIRGenerator::do_Goto(Goto* x) {
2478 set_no_result(x);
2480 if (block()->next()->as_OsrEntry()) {
2481 // need to free up storage used for OSR entry point
2482 LIR_Opr osrBuffer = block()->next()->operand();
2483 BasicTypeList signature;
2484 signature.append(T_INT);
2485 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2486 __ move(osrBuffer, cc->args()->at(0));
2487 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2488 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2489 }
2491 if (x->is_safepoint()) {
2492 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2494 // increment backedge counter if needed
2495 CodeEmitInfo* info = state_for(x, state);
2496 increment_backedge_counter(info, info->stack()->bci());
2497 CodeEmitInfo* safepoint_info = state_for(x, state);
2498 __ safepoint(safepoint_poll_register(), safepoint_info);
2499 }
2501 // Gotos can be folded Ifs, handle this case.
2502 if (x->should_profile()) {
2503 ciMethod* method = x->profiled_method();
2504 assert(method != NULL, "method should be set if branch is profiled");
2505 ciMethodData* md = method->method_data_or_null();
2506 assert(md != NULL, "Sanity");
2507 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2508 assert(data != NULL, "must have profiling data");
2509 int offset;
2510 if (x->direction() == Goto::taken) {
2511 assert(data->is_BranchData(), "need BranchData for two-way branches");
2512 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2513 } else if (x->direction() == Goto::not_taken) {
2514 assert(data->is_BranchData(), "need BranchData for two-way branches");
2515 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2516 } else {
2517 assert(data->is_JumpData(), "need JumpData for branches");
2518 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2519 }
2520 LIR_Opr md_reg = new_register(T_OBJECT);
2521 __ oop2reg(md->constant_encoding(), md_reg);
2523 increment_counter(new LIR_Address(md_reg, offset,
2524 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2525 }
2527 // emit phi-instruction move after safepoint since this simplifies
2528 // describing the state as the safepoint.
2529 move_to_phi(x->state());
2531 __ jump(x->default_sux());
2532 }
2535 void LIRGenerator::do_Base(Base* x) {
2536 __ std_entry(LIR_OprFact::illegalOpr);
2537 // Emit moves from physical registers / stack slots to virtual registers
2538 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2539 IRScope* irScope = compilation()->hir()->top_scope();
2540 int java_index = 0;
2541 for (int i = 0; i < args->length(); i++) {
2542 LIR_Opr src = args->at(i);
2543 assert(!src->is_illegal(), "check");
2544 BasicType t = src->type();
2546 // Types which are smaller than int are passed as int, so
2547 // correct the type which passed.
2548 switch (t) {
2549 case T_BYTE:
2550 case T_BOOLEAN:
2551 case T_SHORT:
2552 case T_CHAR:
2553 t = T_INT;
2554 break;
2555 }
2557 LIR_Opr dest = new_register(t);
2558 __ move(src, dest);
2560 // Assign new location to Local instruction for this local
2561 Local* local = x->state()->local_at(java_index)->as_Local();
2562 assert(local != NULL, "Locals for incoming arguments must have been created");
2563 #ifndef __SOFTFP__
2564 // The java calling convention passes double as long and float as int.
2565 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2566 #endif // __SOFTFP__
2567 local->set_operand(dest);
2568 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2569 java_index += type2size[t];
2570 }
2572 if (compilation()->env()->dtrace_method_probes()) {
2573 BasicTypeList signature;
2574 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2575 signature.append(T_OBJECT); // methodOop
2576 LIR_OprList* args = new LIR_OprList();
2577 args->append(getThreadPointer());
2578 LIR_Opr meth = new_register(T_OBJECT);
2579 __ oop2reg(method()->constant_encoding(), meth);
2580 args->append(meth);
2581 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2582 }
2584 if (method()->is_synchronized()) {
2585 LIR_Opr obj;
2586 if (method()->is_static()) {
2587 obj = new_register(T_OBJECT);
2588 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2589 } else {
2590 Local* receiver = x->state()->local_at(0)->as_Local();
2591 assert(receiver != NULL, "must already exist");
2592 obj = receiver->operand();
2593 }
2594 assert(obj->is_valid(), "must be valid");
2596 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2597 LIR_Opr lock = new_register(T_INT);
2598 __ load_stack_address_monitor(0, lock);
2600 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2601 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2603 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2604 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2605 }
2606 }
2608 // increment invocation counters if needed
2609 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2610 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2611 increment_invocation_counter(info);
2612 }
2614 // all blocks with a successor must end with an unconditional jump
2615 // to the successor even if they are consecutive
2616 __ jump(x->default_sux());
2617 }
2620 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2621 // construct our frame and model the production of incoming pointer
2622 // to the OSR buffer.
2623 __ osr_entry(LIR_Assembler::osrBufferPointer());
2624 LIR_Opr result = rlock_result(x);
2625 __ move(LIR_Assembler::osrBufferPointer(), result);
2626 }
2629 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2630 int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
2631 for (; i < args->length(); i++) {
2632 LIRItem* param = args->at(i);
2633 LIR_Opr loc = arg_list->at(i);
2634 if (loc->is_register()) {
2635 param->load_item_force(loc);
2636 } else {
2637 LIR_Address* addr = loc->as_address_ptr();
2638 param->load_for_store(addr->type());
2639 if (addr->type() == T_OBJECT) {
2640 __ move_wide(param->result(), addr);
2641 } else
2642 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2643 __ unaligned_move(param->result(), addr);
2644 } else {
2645 __ move(param->result(), addr);
2646 }
2647 }
2648 }
2650 if (x->has_receiver()) {
2651 LIRItem* receiver = args->at(0);
2652 LIR_Opr loc = arg_list->at(0);
2653 if (loc->is_register()) {
2654 receiver->load_item_force(loc);
2655 } else {
2656 assert(loc->is_address(), "just checking");
2657 receiver->load_for_store(T_OBJECT);
2658 __ move_wide(receiver->result(), loc->as_address_ptr());
2659 }
2660 }
2661 }
2664 // Visits all arguments, returns appropriate items without loading them
2665 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2666 LIRItemList* argument_items = new LIRItemList();
2667 if (x->has_receiver()) {
2668 LIRItem* receiver = new LIRItem(x->receiver(), this);
2669 argument_items->append(receiver);
2670 }
2671 if (x->is_invokedynamic()) {
2672 // Insert a dummy for the synthetic MethodHandle argument.
2673 argument_items->append(NULL);
2674 }
2675 int idx = x->has_receiver() ? 1 : 0;
2676 for (int i = 0; i < x->number_of_arguments(); i++) {
2677 LIRItem* param = new LIRItem(x->argument_at(i), this);
2678 argument_items->append(param);
2679 idx += (param->type()->is_double_word() ? 2 : 1);
2680 }
2681 return argument_items;
2682 }
2685 // The invoke with receiver has following phases:
2686 // a) traverse and load/lock receiver;
2687 // b) traverse all arguments -> item-array (invoke_visit_argument)
2688 // c) push receiver on stack
2689 // d) load each of the items and push on stack
2690 // e) unlock receiver
2691 // f) move receiver into receiver-register %o0
2692 // g) lock result registers and emit call operation
2693 //
2694 // Before issuing a call, we must spill-save all values on stack
2695 // that are in caller-save register. "spill-save" moves thos registers
2696 // either in a free callee-save register or spills them if no free
2697 // callee save register is available.
2698 //
2699 // The problem is where to invoke spill-save.
2700 // - if invoked between e) and f), we may lock callee save
2701 // register in "spill-save" that destroys the receiver register
2702 // before f) is executed
2703 // - if we rearange the f) to be earlier, by loading %o0, it
2704 // may destroy a value on the stack that is currently in %o0
2705 // and is waiting to be spilled
2706 // - if we keep the receiver locked while doing spill-save,
2707 // we cannot spill it as it is spill-locked
2708 //
2709 void LIRGenerator::do_Invoke(Invoke* x) {
2710 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2712 LIR_OprList* arg_list = cc->args();
2713 LIRItemList* args = invoke_visit_arguments(x);
2714 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2716 // setup result register
2717 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2718 if (x->type() != voidType) {
2719 result_register = result_register_for(x->type());
2720 }
2722 CodeEmitInfo* info = state_for(x, x->state());
2724 // invokedynamics can deoptimize.
2725 CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2727 invoke_load_arguments(x, args, arg_list);
2729 if (x->has_receiver()) {
2730 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2731 receiver = args->at(0)->result();
2732 }
2734 // emit invoke code
2735 bool optimized = x->target_is_loaded() && x->target_is_final();
2736 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2738 // JSR 292
2739 // Preserve the SP over MethodHandle call sites.
2740 ciMethod* target = x->target();
2741 if (target->is_method_handle_invoke()) {
2742 info->set_is_method_handle_invoke(true);
2743 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2744 }
2746 switch (x->code()) {
2747 case Bytecodes::_invokestatic:
2748 __ call_static(target, result_register,
2749 SharedRuntime::get_resolve_static_call_stub(),
2750 arg_list, info);
2751 break;
2752 case Bytecodes::_invokespecial:
2753 case Bytecodes::_invokevirtual:
2754 case Bytecodes::_invokeinterface:
2755 // for final target we still produce an inline cache, in order
2756 // to be able to call mixed mode
2757 if (x->code() == Bytecodes::_invokespecial || optimized) {
2758 __ call_opt_virtual(target, receiver, result_register,
2759 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2760 arg_list, info);
2761 } else if (x->vtable_index() < 0) {
2762 __ call_icvirtual(target, receiver, result_register,
2763 SharedRuntime::get_resolve_virtual_call_stub(),
2764 arg_list, info);
2765 } else {
2766 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2767 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2768 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2769 }
2770 break;
2771 case Bytecodes::_invokedynamic: {
2772 ciBytecodeStream bcs(x->scope()->method());
2773 bcs.force_bci(x->state()->bci());
2774 assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2775 ciCPCache* cpcache = bcs.get_cpcache();
2777 // Get CallSite offset from constant pool cache pointer.
2778 int index = bcs.get_method_index();
2779 size_t call_site_offset = cpcache->get_f1_offset(index);
2781 // If this invokedynamic call site hasn't been executed yet in
2782 // the interpreter, the CallSite object in the constant pool
2783 // cache is still null and we need to deoptimize.
2784 if (cpcache->is_f1_null_at(index)) {
2785 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2786 // clone all handlers. This is handled transparently in other
2787 // places by the CodeEmitInfo cloning logic but is handled
2788 // specially here because a stub isn't being used.
2789 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2791 DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2792 __ jump(deopt_stub);
2793 }
2795 // Use the receiver register for the synthetic MethodHandle
2796 // argument.
2797 receiver = LIR_Assembler::receiverOpr();
2798 LIR_Opr tmp = new_register(objectType);
2800 // Load CallSite object from constant pool cache.
2801 __ oop2reg(cpcache->constant_encoding(), tmp);
2802 __ move_wide(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2804 // Load target MethodHandle from CallSite object.
2805 __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2807 __ call_dynamic(target, receiver, result_register,
2808 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2809 arg_list, info);
2810 break;
2811 }
2812 default:
2813 ShouldNotReachHere();
2814 break;
2815 }
2817 // JSR 292
2818 // Restore the SP after MethodHandle call sites.
2819 if (target->is_method_handle_invoke()) {
2820 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2821 }
2823 if (x->type()->is_float() || x->type()->is_double()) {
2824 // Force rounding of results from non-strictfp when in strictfp
2825 // scope (or when we don't know the strictness of the callee, to
2826 // be safe.)
2827 if (method()->is_strict()) {
2828 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2829 result_register = round_item(result_register);
2830 }
2831 }
2832 }
2834 if (result_register->is_valid()) {
2835 LIR_Opr result = rlock_result(x);
2836 __ move(result_register, result);
2837 }
2838 }
2841 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2842 assert(x->number_of_arguments() == 1, "wrong type");
2843 LIRItem value (x->argument_at(0), this);
2844 LIR_Opr reg = rlock_result(x);
2845 value.load_item();
2846 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2847 __ move(tmp, reg);
2848 }
2852 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2853 void LIRGenerator::do_IfOp(IfOp* x) {
2854 #ifdef ASSERT
2855 {
2856 ValueTag xtag = x->x()->type()->tag();
2857 ValueTag ttag = x->tval()->type()->tag();
2858 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2859 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2860 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2861 }
2862 #endif
2864 LIRItem left(x->x(), this);
2865 LIRItem right(x->y(), this);
2866 left.load_item();
2867 if (can_inline_as_constant(right.value())) {
2868 right.dont_load_item();
2869 } else {
2870 right.load_item();
2871 }
2873 LIRItem t_val(x->tval(), this);
2874 LIRItem f_val(x->fval(), this);
2875 t_val.dont_load_item();
2876 f_val.dont_load_item();
2877 LIR_Opr reg = rlock_result(x);
2879 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2880 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2881 }
2884 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2885 switch (x->id()) {
2886 case vmIntrinsics::_intBitsToFloat :
2887 case vmIntrinsics::_doubleToRawLongBits :
2888 case vmIntrinsics::_longBitsToDouble :
2889 case vmIntrinsics::_floatToRawIntBits : {
2890 do_FPIntrinsics(x);
2891 break;
2892 }
2894 case vmIntrinsics::_currentTimeMillis: {
2895 assert(x->number_of_arguments() == 0, "wrong type");
2896 LIR_Opr reg = result_register_for(x->type());
2897 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
2898 reg, new LIR_OprList());
2899 LIR_Opr result = rlock_result(x);
2900 __ move(reg, result);
2901 break;
2902 }
2904 case vmIntrinsics::_nanoTime: {
2905 assert(x->number_of_arguments() == 0, "wrong type");
2906 LIR_Opr reg = result_register_for(x->type());
2907 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
2908 reg, new LIR_OprList());
2909 LIR_Opr result = rlock_result(x);
2910 __ move(reg, result);
2911 break;
2912 }
2914 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2915 case vmIntrinsics::_getClass: do_getClass(x); break;
2916 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2918 case vmIntrinsics::_dlog: // fall through
2919 case vmIntrinsics::_dlog10: // fall through
2920 case vmIntrinsics::_dabs: // fall through
2921 case vmIntrinsics::_dsqrt: // fall through
2922 case vmIntrinsics::_dtan: // fall through
2923 case vmIntrinsics::_dsin : // fall through
2924 case vmIntrinsics::_dcos : do_MathIntrinsic(x); break;
2925 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
2927 // java.nio.Buffer.checkIndex
2928 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
2930 case vmIntrinsics::_compareAndSwapObject:
2931 do_CompareAndSwap(x, objectType);
2932 break;
2933 case vmIntrinsics::_compareAndSwapInt:
2934 do_CompareAndSwap(x, intType);
2935 break;
2936 case vmIntrinsics::_compareAndSwapLong:
2937 do_CompareAndSwap(x, longType);
2938 break;
2940 // sun.misc.AtomicLongCSImpl.attemptUpdate
2941 case vmIntrinsics::_attemptUpdate:
2942 do_AttemptUpdate(x);
2943 break;
2945 case vmIntrinsics::_Reference_get:
2946 do_Reference_get(x);
2947 break;
2949 default: ShouldNotReachHere(); break;
2950 }
2951 }
2953 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
2954 // Need recv in a temporary register so it interferes with the other temporaries
2955 LIR_Opr recv = LIR_OprFact::illegalOpr;
2956 LIR_Opr mdo = new_register(T_OBJECT);
2957 // tmp is used to hold the counters on SPARC
2958 LIR_Opr tmp = new_pointer_register();
2959 if (x->recv() != NULL) {
2960 LIRItem value(x->recv(), this);
2961 value.load_item();
2962 recv = new_register(T_OBJECT);
2963 __ move(value.result(), recv);
2964 }
2965 __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
2966 }
2968 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
2969 // We can safely ignore accessors here, since c2 will inline them anyway,
2970 // accessors are also always mature.
2971 if (!x->inlinee()->is_accessor()) {
2972 CodeEmitInfo* info = state_for(x, x->state(), true);
2973 // Notify the runtime very infrequently only to take care of counter overflows
2974 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
2975 }
2976 }
2978 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
2979 int freq_log;
2980 int level = compilation()->env()->comp_level();
2981 if (level == CompLevel_limited_profile) {
2982 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
2983 } else if (level == CompLevel_full_profile) {
2984 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
2985 } else {
2986 ShouldNotReachHere();
2987 }
2988 // Increment the appropriate invocation/backedge counter and notify the runtime.
2989 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
2990 }
2992 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
2993 ciMethod *method, int frequency,
2994 int bci, bool backedge, bool notify) {
2995 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
2996 int level = _compilation->env()->comp_level();
2997 assert(level > CompLevel_simple, "Shouldn't be here");
2999 int offset = -1;
3000 LIR_Opr counter_holder = new_register(T_OBJECT);
3001 LIR_Opr meth;
3002 if (level == CompLevel_limited_profile) {
3003 offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
3004 methodOopDesc::invocation_counter_offset());
3005 __ oop2reg(method->constant_encoding(), counter_holder);
3006 meth = counter_holder;
3007 } else if (level == CompLevel_full_profile) {
3008 offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
3009 methodDataOopDesc::invocation_counter_offset());
3010 ciMethodData* md = method->method_data_or_null();
3011 assert(md != NULL, "Sanity");
3012 __ oop2reg(md->constant_encoding(), counter_holder);
3013 meth = new_register(T_OBJECT);
3014 __ oop2reg(method->constant_encoding(), meth);
3015 } else {
3016 ShouldNotReachHere();
3017 }
3018 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3019 LIR_Opr result = new_register(T_INT);
3020 __ load(counter, result);
3021 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3022 __ store(result, counter);
3023 if (notify) {
3024 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3025 __ logical_and(result, mask, result);
3026 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3027 // The bci for info can point to cmp for if's we want the if bci
3028 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3029 __ branch(lir_cond_equal, T_INT, overflow);
3030 __ branch_destination(overflow->continuation());
3031 }
3032 }
3034 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3035 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3036 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3038 if (x->pass_thread()) {
3039 signature->append(T_ADDRESS);
3040 args->append(getThreadPointer());
3041 }
3043 for (int i = 0; i < x->number_of_arguments(); i++) {
3044 Value a = x->argument_at(i);
3045 LIRItem* item = new LIRItem(a, this);
3046 item->load_item();
3047 args->append(item->result());
3048 signature->append(as_BasicType(a->type()));
3049 }
3051 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3052 if (x->type() == voidType) {
3053 set_no_result(x);
3054 } else {
3055 __ move(result, rlock_result(x));
3056 }
3057 }
3059 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3060 LIRItemList args(1);
3061 LIRItem value(arg1, this);
3062 args.append(&value);
3063 BasicTypeList signature;
3064 signature.append(as_BasicType(arg1->type()));
3066 return call_runtime(&signature, &args, entry, result_type, info);
3067 }
3070 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3071 LIRItemList args(2);
3072 LIRItem value1(arg1, this);
3073 LIRItem value2(arg2, this);
3074 args.append(&value1);
3075 args.append(&value2);
3076 BasicTypeList signature;
3077 signature.append(as_BasicType(arg1->type()));
3078 signature.append(as_BasicType(arg2->type()));
3080 return call_runtime(&signature, &args, entry, result_type, info);
3081 }
3084 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3085 address entry, ValueType* result_type, CodeEmitInfo* info) {
3086 // get a result register
3087 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3088 LIR_Opr result = LIR_OprFact::illegalOpr;
3089 if (result_type->tag() != voidTag) {
3090 result = new_register(result_type);
3091 phys_reg = result_register_for(result_type);
3092 }
3094 // move the arguments into the correct location
3095 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3096 assert(cc->length() == args->length(), "argument mismatch");
3097 for (int i = 0; i < args->length(); i++) {
3098 LIR_Opr arg = args->at(i);
3099 LIR_Opr loc = cc->at(i);
3100 if (loc->is_register()) {
3101 __ move(arg, loc);
3102 } else {
3103 LIR_Address* addr = loc->as_address_ptr();
3104 // if (!can_store_as_constant(arg)) {
3105 // LIR_Opr tmp = new_register(arg->type());
3106 // __ move(arg, tmp);
3107 // arg = tmp;
3108 // }
3109 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3110 __ unaligned_move(arg, addr);
3111 } else {
3112 __ move(arg, addr);
3113 }
3114 }
3115 }
3117 if (info) {
3118 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3119 } else {
3120 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3121 }
3122 if (result->is_valid()) {
3123 __ move(phys_reg, result);
3124 }
3125 return result;
3126 }
3129 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3130 address entry, ValueType* result_type, CodeEmitInfo* info) {
3131 // get a result register
3132 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3133 LIR_Opr result = LIR_OprFact::illegalOpr;
3134 if (result_type->tag() != voidTag) {
3135 result = new_register(result_type);
3136 phys_reg = result_register_for(result_type);
3137 }
3139 // move the arguments into the correct location
3140 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3142 assert(cc->length() == args->length(), "argument mismatch");
3143 for (int i = 0; i < args->length(); i++) {
3144 LIRItem* arg = args->at(i);
3145 LIR_Opr loc = cc->at(i);
3146 if (loc->is_register()) {
3147 arg->load_item_force(loc);
3148 } else {
3149 LIR_Address* addr = loc->as_address_ptr();
3150 arg->load_for_store(addr->type());
3151 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3152 __ unaligned_move(arg->result(), addr);
3153 } else {
3154 __ move(arg->result(), addr);
3155 }
3156 }
3157 }
3159 if (info) {
3160 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3161 } else {
3162 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3163 }
3164 if (result->is_valid()) {
3165 __ move(phys_reg, result);
3166 }
3167 return result;
3168 }