Thu, 07 Oct 2010 08:06:06 -0700
6983240: guarantee((Solaris::min_stack_allowed >= (StackYellowPages+StackRedPages...) wrong
Summary: min_stack_allowed is a compile time constant and Stack*Pages are settable
Reviewed-by: dholmes, kvn
1 /*
2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_c1_CodeStubs_x86.cpp.incl"
29 #define __ ce->masm()->
31 float ConversionStub::float_zero = 0.0;
32 double ConversionStub::double_zero = 0.0;
34 void ConversionStub::emit_code(LIR_Assembler* ce) {
35 __ bind(_entry);
36 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
39 if (input()->is_single_xmm()) {
40 __ comiss(input()->as_xmm_float_reg(),
41 ExternalAddress((address)&float_zero));
42 } else if (input()->is_double_xmm()) {
43 __ comisd(input()->as_xmm_double_reg(),
44 ExternalAddress((address)&double_zero));
45 } else {
46 LP64_ONLY(ShouldNotReachHere());
47 __ push(rax);
48 __ ftst();
49 __ fnstsw_ax();
50 __ sahf();
51 __ pop(rax);
52 }
54 Label NaN, do_return;
55 __ jccb(Assembler::parity, NaN);
56 __ jccb(Assembler::below, do_return);
58 // input is > 0 -> return maxInt
59 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
60 __ decrement(result()->as_register());
61 __ jmpb(do_return);
63 // input is NaN -> return 0
64 __ bind(NaN);
65 __ xorptr(result()->as_register(), result()->as_register());
67 __ bind(do_return);
68 __ jmp(_continuation);
69 }
71 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
72 __ bind(_entry);
73 ce->store_parameter(_method->as_register(), 1);
74 ce->store_parameter(_bci, 0);
75 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
76 ce->add_call_info_here(_info);
77 ce->verify_oop_map(_info);
78 __ jmp(_continuation);
79 }
81 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
82 bool throw_index_out_of_bounds_exception)
83 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
84 , _index(index)
85 {
86 _info = info == NULL ? NULL : new CodeEmitInfo(info);
87 }
90 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
91 __ bind(_entry);
92 // pass the array index on stack because all registers must be preserved
93 if (_index->is_cpu_register()) {
94 ce->store_parameter(_index->as_register(), 0);
95 } else {
96 ce->store_parameter(_index->as_jint(), 0);
97 }
98 Runtime1::StubID stub_id;
99 if (_throw_index_out_of_bounds_exception) {
100 stub_id = Runtime1::throw_index_exception_id;
101 } else {
102 stub_id = Runtime1::throw_range_check_failed_id;
103 }
104 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
105 ce->add_call_info_here(_info);
106 debug_only(__ should_not_reach_here());
107 }
110 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
111 if (_offset != -1) {
112 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
113 }
114 __ bind(_entry);
115 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
116 ce->add_call_info_here(_info);
117 debug_only(__ should_not_reach_here());
118 }
121 // Implementation of NewInstanceStub
123 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
124 _result = result;
125 _klass = klass;
126 _klass_reg = klass_reg;
127 _info = new CodeEmitInfo(info);
128 assert(stub_id == Runtime1::new_instance_id ||
129 stub_id == Runtime1::fast_new_instance_id ||
130 stub_id == Runtime1::fast_new_instance_init_check_id,
131 "need new_instance id");
132 _stub_id = stub_id;
133 }
136 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
137 assert(__ rsp_offset() == 0, "frame size should be fixed");
138 __ bind(_entry);
139 __ movptr(rdx, _klass_reg->as_register());
140 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
141 ce->add_call_info_here(_info);
142 ce->verify_oop_map(_info);
143 assert(_result->as_register() == rax, "result must in rax,");
144 __ jmp(_continuation);
145 }
148 // Implementation of NewTypeArrayStub
150 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
151 _klass_reg = klass_reg;
152 _length = length;
153 _result = result;
154 _info = new CodeEmitInfo(info);
155 }
158 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
159 assert(__ rsp_offset() == 0, "frame size should be fixed");
160 __ bind(_entry);
161 assert(_length->as_register() == rbx, "length must in rbx,");
162 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
163 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
164 ce->add_call_info_here(_info);
165 ce->verify_oop_map(_info);
166 assert(_result->as_register() == rax, "result must in rax,");
167 __ jmp(_continuation);
168 }
171 // Implementation of NewObjectArrayStub
173 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
174 _klass_reg = klass_reg;
175 _result = result;
176 _length = length;
177 _info = new CodeEmitInfo(info);
178 }
181 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
182 assert(__ rsp_offset() == 0, "frame size should be fixed");
183 __ bind(_entry);
184 assert(_length->as_register() == rbx, "length must in rbx,");
185 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
186 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
187 ce->add_call_info_here(_info);
188 ce->verify_oop_map(_info);
189 assert(_result->as_register() == rax, "result must in rax,");
190 __ jmp(_continuation);
191 }
194 // Implementation of MonitorAccessStubs
196 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
197 : MonitorAccessStub(obj_reg, lock_reg)
198 {
199 _info = new CodeEmitInfo(info);
200 }
203 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
204 assert(__ rsp_offset() == 0, "frame size should be fixed");
205 __ bind(_entry);
206 ce->store_parameter(_obj_reg->as_register(), 1);
207 ce->store_parameter(_lock_reg->as_register(), 0);
208 Runtime1::StubID enter_id;
209 if (ce->compilation()->has_fpu_code()) {
210 enter_id = Runtime1::monitorenter_id;
211 } else {
212 enter_id = Runtime1::monitorenter_nofpu_id;
213 }
214 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
215 ce->add_call_info_here(_info);
216 ce->verify_oop_map(_info);
217 __ jmp(_continuation);
218 }
221 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
222 __ bind(_entry);
223 if (_compute_lock) {
224 // lock_reg was destroyed by fast unlocking attempt => recompute it
225 ce->monitor_address(_monitor_ix, _lock_reg);
226 }
227 ce->store_parameter(_lock_reg->as_register(), 0);
228 // note: non-blocking leaf routine => no call info needed
229 Runtime1::StubID exit_id;
230 if (ce->compilation()->has_fpu_code()) {
231 exit_id = Runtime1::monitorexit_id;
232 } else {
233 exit_id = Runtime1::monitorexit_nofpu_id;
234 }
235 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
236 __ jmp(_continuation);
237 }
240 // Implementation of patching:
241 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
242 // - Replace original code with a call to the stub
243 // At Runtime:
244 // - call to stub, jump to runtime
245 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
246 // - in runtime: after initializing class, restore original code, reexecute instruction
248 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
250 void PatchingStub::align_patch_site(MacroAssembler* masm) {
251 // We're patching a 5-7 byte instruction on intel and we need to
252 // make sure that we don't see a piece of the instruction. It
253 // appears mostly impossible on Intel to simply invalidate other
254 // processors caches and since they may do aggressive prefetch it's
255 // very hard to make a guess about what code might be in the icache.
256 // Force the instruction to be double word aligned so that it
257 // doesn't span a cache line.
258 masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
259 }
261 void PatchingStub::emit_code(LIR_Assembler* ce) {
262 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
264 Label call_patch;
266 // static field accesses have special semantics while the class
267 // initializer is being run so we emit a test which can be used to
268 // check that this code is being executed by the initializing
269 // thread.
270 address being_initialized_entry = __ pc();
271 if (CommentedAssembly) {
272 __ block_comment(" patch template");
273 }
274 if (_id == load_klass_id) {
275 // produce a copy of the load klass instruction for use by the being initialized case
276 address start = __ pc();
277 jobject o = NULL;
278 __ movoop(_obj, o);
279 #ifdef ASSERT
280 for (int i = 0; i < _bytes_to_copy; i++) {
281 address ptr = (address)(_pc_start + i);
282 int a_byte = (*ptr) & 0xFF;
283 assert(a_byte == *start++, "should be the same code");
284 }
285 #endif
286 } else {
287 // make a copy the code which is going to be patched.
288 for ( int i = 0; i < _bytes_to_copy; i++) {
289 address ptr = (address)(_pc_start + i);
290 int a_byte = (*ptr) & 0xFF;
291 __ a_byte (a_byte);
292 *ptr = 0x90; // make the site look like a nop
293 }
294 }
296 address end_of_patch = __ pc();
297 int bytes_to_skip = 0;
298 if (_id == load_klass_id) {
299 int offset = __ offset();
300 if (CommentedAssembly) {
301 __ block_comment(" being_initialized check");
302 }
303 assert(_obj != noreg, "must be a valid register");
304 Register tmp = rax;
305 if (_obj == tmp) tmp = rbx;
306 __ push(tmp);
307 __ get_thread(tmp);
308 __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
309 __ pop(tmp);
310 __ jcc(Assembler::notEqual, call_patch);
312 // access_field patches may execute the patched code before it's
313 // copied back into place so we need to jump back into the main
314 // code of the nmethod to continue execution.
315 __ jmp(_patch_site_continuation);
317 // make sure this extra code gets skipped
318 bytes_to_skip += __ offset() - offset;
319 }
320 if (CommentedAssembly) {
321 __ block_comment("patch data encoded as movl");
322 }
323 // Now emit the patch record telling the runtime how to find the
324 // pieces of the patch. We only need 3 bytes but for readability of
325 // the disassembly we make the data look like a movl reg, imm32,
326 // which requires 5 bytes
327 int sizeof_patch_record = 5;
328 bytes_to_skip += sizeof_patch_record;
330 // emit the offsets needed to find the code to patch
331 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
333 __ a_byte(0xB8);
334 __ a_byte(0);
335 __ a_byte(being_initialized_entry_offset);
336 __ a_byte(bytes_to_skip);
337 __ a_byte(_bytes_to_copy);
338 address patch_info_pc = __ pc();
339 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
341 address entry = __ pc();
342 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
343 address target = NULL;
344 switch (_id) {
345 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
346 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
347 default: ShouldNotReachHere();
348 }
349 __ bind(call_patch);
351 if (CommentedAssembly) {
352 __ block_comment("patch entry point");
353 }
354 __ call(RuntimeAddress(target));
355 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
356 ce->add_call_info_here(_info);
357 int jmp_off = __ offset();
358 __ jmp(_patch_site_entry);
359 // Add enough nops so deoptimization can overwrite the jmp above with a call
360 // and not destroy the world.
361 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
362 __ nop();
363 }
364 if (_id == load_klass_id) {
365 CodeSection* cs = __ code_section();
366 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
367 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none);
368 }
369 }
372 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
373 __ bind(_entry);
374 __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution()));
375 ce->add_call_info_here(_info);
376 debug_only(__ should_not_reach_here());
377 }
380 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
381 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
382 __ bind(_entry);
383 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id)));
384 ce->add_call_info_here(_info);
385 debug_only(__ should_not_reach_here());
386 }
389 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
390 assert(__ rsp_offset() == 0, "frame size should be fixed");
392 __ bind(_entry);
393 // pass the object on stack because all registers must be preserved
394 if (_obj->is_cpu_register()) {
395 ce->store_parameter(_obj->as_register(), 0);
396 }
397 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
398 ce->add_call_info_here(_info);
399 debug_only(__ should_not_reach_here());
400 }
403 ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info):
404 _info(info) {
405 }
408 void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) {
409 assert(__ rsp_offset() == 0, "frame size should be fixed");
410 __ bind(_entry);
411 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_array_store_exception_id)));
412 ce->add_call_info_here(_info);
413 debug_only(__ should_not_reach_here());
414 }
417 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
418 //---------------slow case: call to native-----------------
419 __ bind(_entry);
420 // Figure out where the args should go
421 // This should really convert the IntrinsicID to the methodOop and signature
422 // but I don't know how to do that.
423 //
424 VMRegPair args[5];
425 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
426 SharedRuntime::java_calling_convention(signature, args, 5, true);
428 // push parameters
429 // (src, src_pos, dest, destPos, length)
430 Register r[5];
431 r[0] = src()->as_register();
432 r[1] = src_pos()->as_register();
433 r[2] = dst()->as_register();
434 r[3] = dst_pos()->as_register();
435 r[4] = length()->as_register();
437 // next registers will get stored on the stack
438 for (int i = 0; i < 5 ; i++ ) {
439 VMReg r_1 = args[i].first();
440 if (r_1->is_stack()) {
441 int st_off = r_1->reg2stack() * wordSize;
442 __ movptr (Address(rsp, st_off), r[i]);
443 } else {
444 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
445 }
446 }
448 ce->align_call(lir_static_call);
450 ce->emit_static_call_stub();
451 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
452 relocInfo::static_call_type);
453 __ call(resolve);
454 ce->add_call_info_here(info());
456 #ifndef PRODUCT
457 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
458 #endif
460 __ jmp(_continuation);
461 }
463 /////////////////////////////////////////////////////////////////////////////
464 #ifndef SERIALGC
466 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
468 // At this point we know that marking is in progress
470 __ bind(_entry);
471 assert(pre_val()->is_register(), "Precondition.");
473 Register pre_val_reg = pre_val()->as_register();
475 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
477 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
478 __ jcc(Assembler::equal, _continuation);
479 ce->store_parameter(pre_val()->as_register(), 0);
480 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
481 __ jmp(_continuation);
483 }
485 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
487 jbyte* G1PostBarrierStub::byte_map_base_slow() {
488 BarrierSet* bs = Universe::heap()->barrier_set();
489 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
490 "Must be if we're using this.");
491 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
492 }
494 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
495 __ bind(_entry);
496 assert(addr()->is_register(), "Precondition.");
497 assert(new_val()->is_register(), "Precondition.");
498 Register new_val_reg = new_val()->as_register();
499 __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
500 __ jcc(Assembler::equal, _continuation);
501 ce->store_parameter(addr()->as_register(), 0);
502 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
503 __ jmp(_continuation);
504 }
506 #endif // SERIALGC
507 /////////////////////////////////////////////////////////////////////////////
509 #undef __