Wed, 29 Mar 2017 09:41:51 +0800
#4662 TieredCompilation is turned off.
TieredCompilation is not supported yet.
1 /*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_MacroAssembler.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "nativeInst_mips.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "vmreg_mips.inline.hpp"
35 #ifndef SERIALGC
36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
37 #endif
40 #define __ ce->masm()->
42 float ConversionStub::float_zero = 0.0;
43 double ConversionStub::double_zero = 0.0;
45 void ConversionStub::emit_code(LIR_Assembler* ce) {
46 __ bind(_entry);
47 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
48 }
50 #ifdef TIERED
51 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
52 __ bind(_entry);
53 ce->store_parameter(_bci, 0);
54 //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
55 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
56 __ delayed()->nop();
57 ce->add_call_info_here(_info);
58 ce->verify_oop_map(_info);
60 //__ jmp(_continuation);
61 __ b_far(_continuation);
62 __ delayed()->nop();
63 }
64 #endif // TIERED
68 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
69 bool throw_index_out_of_bounds_exception)
70 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
71 , _index(index)
72 {
73 _info = info == NULL ? NULL : new CodeEmitInfo(info);
74 }
77 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
78 #ifdef OPT_RANGECHECK
79 if (_throw_pc != -1) {
80 ce->compilation()->null_check_table()->append(_throw_pc, __ offset());
81 }
82 #endif
83 __ bind(_entry);
84 //// Pass the array index in eax since the runtime stub will add register state to the stack
85 // pass the array index on stack because all registers must be preserved
87 if (_index->is_cpu_register()) {
88 ce->store_parameter(_index->as_register(), 0);
89 } else {
90 ce->store_parameter(_index->as_jint(), 0);
91 }
93 if (_throw_index_out_of_bounds_exception) {
94 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
95 } else {
96 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
97 }
98 __ delayed()->nop();
99 ce->add_call_info_here(_info);
100 debug_only(__ should_not_reach_here());
101 }
103 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { // Fu:20130814
104 _info = new CodeEmitInfo(info);
105 }
107 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { // Fu:20130814
108 tty->print_cr("PredicateFailedStub::emit_code unimplemented yet!");
109 Unimplemented();
110 }
112 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
113 if (_offset != -1) {
114 // ce->compilation()->null_check_table()->append(_offset, __ offset());
115 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
116 }
117 __ bind(_entry);
118 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
119 __ delayed()->nop();
120 ce->add_call_info_here(_info);
121 debug_only(__ should_not_reach_here());
123 }
126 // Implementation of NewInstanceStub
128 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
129 _result = result;
130 _klass = klass;
131 _klass_reg = klass_reg;
132 _info = new CodeEmitInfo(info);
133 assert(stub_id == Runtime1::new_instance_id ||
134 stub_id == Runtime1::fast_new_instance_id ||
135 stub_id == Runtime1::fast_new_instance_init_check_id,
136 "need new_instance id");
137 _stub_id = stub_id;
138 }
140 // i use T4 as klass register, V0 as result register. MUST accord with Runtime1::generate_code_for.
141 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
142 assert(__ sp_offset() == 0, "frame size should be fixed");
143 __ bind(_entry);
144 //__ movptr(rdx, _klass_reg->as_register());
145 //__ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
146 #ifndef _LP64
147 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
148 #else
149 //FIXME. in A4? aoqi
150 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
151 #endif
154 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
155 __ delayed()->nop();
156 ce->add_call_info_here(_info);
157 ce->verify_oop_map(_info);
158 assert(_result->as_register() == V0, "result must in V0,");
159 __ b_far(_continuation);
160 __ delayed()->nop();
161 }
164 // Implementation of NewTypeArrayStub
166 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
167 _klass_reg = klass_reg;
168 _length = length;
169 _result = result;
170 _info = new CodeEmitInfo(info);
171 }
173 // i use T2 as length register, T4 as klass register, V0 as result register.
174 // MUST accord with Runtime1::generate_code_for
175 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
176 assert(__ sp_offset() == 0, "frame size should be fixed");
177 __ bind(_entry);
178 assert(_length->as_register() == T2, "length must in T2,");
179 #ifndef _LP64
180 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
181 #else
182 //FIXME. in A4? aoqi
183 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
184 #endif
186 //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
187 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
188 __ delayed()->nop();
189 ce->add_call_info_here(_info);
190 ce->verify_oop_map(_info);
192 assert(_result->as_register() == V0, "result must in V0,");
193 __ b_far(_continuation);
194 __ delayed()->nop();
195 }
198 // Implementation of NewObjectArrayStub
200 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
201 _klass_reg = klass_reg;
202 _result = result;
203 _length = length;
204 _info = new CodeEmitInfo(info);
205 }
208 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
209 assert(__ sp_offset() == 0, "frame size should be fixed");
210 __ bind(_entry);
211 //assert(_length->as_register() == rbx, "length must in rbx,");
212 //assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
213 //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
214 assert(_length->as_register() == T2, "length must in ebx");
215 #ifndef _LP64
216 assert(_klass_reg->as_register() == T4, "klass_reg must in T4");
217 #else
218 //FIXME. in A4? aoqi
219 assert(_klass_reg->as_register() == A4, "klass_reg must in A4");
220 #endif
221 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
222 __ delayed()->nop();
223 ce->add_call_info_here(_info);
224 ce->verify_oop_map(_info);
225 //assert(_result->as_register() == rax, "result must in rax,");
226 //__ jmp(_continuation);
227 assert(_result->as_register() == V0, "result must in eax");
228 __ b_far(_continuation);
229 __ delayed()->nop();
230 }
233 // Implementation of MonitorAccessStubs
235 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
236 : MonitorAccessStub(obj_reg, lock_reg)
237 {
238 _info = new CodeEmitInfo(info);
239 }
242 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
243 assert(__ sp_offset() == 0, "frame size should be fixed");
244 __ bind(_entry);
245 ce->store_parameter(_obj_reg->as_register(), 1);
246 ce->store_parameter(_lock_reg->is_single_cpu()? _lock_reg->as_register() : _lock_reg->as_register_lo(), 0);
247 /*
248 Runtime1::StubID enter_id;
249 if (ce->compilation()->has_fpu_code()) {
250 enter_id = Runtime1::monitorenter_id;
251 } else {
252 enter_id = Runtime1::monitorenter_nofpu_id;
253 }
254 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
255 */
256 if (ce->compilation()->has_fpu_code()) {
257 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
258 } else {
259 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
260 }
261 __ delayed()->nop();
262 ce->add_call_info_here(_info);
263 ce->verify_oop_map(_info);
264 //__ jmp(_continuation);
265 __ b_far(_continuation);
266 __ delayed()->nop();
267 }
270 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
271 __ bind(_entry);
272 if (_compute_lock) {
273 // lock_reg was destroyed by fast unlocking attempt => recompute it
274 ce->monitor_address(_monitor_ix, _lock_reg);
275 }
276 ce->store_parameter(_lock_reg->as_register(), 0);
277 // note: non-blocking leaf routine => no call info needed
278 /*
279 Runtime1::StubID exit_id;
280 if (ce->compilation()->has_fpu_code()) {
281 exit_id = Runtime1::monitorexit_id;
282 } else {
283 exit_id = Runtime1::monitorexit_nofpu_id;
284 }
285 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
286 __ jmp(_continuation);
287 */
288 if (ce->compilation()->has_fpu_code()) {
289 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
290 } else {
291 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
292 }
293 __ delayed()->nop();
295 //__ jmp(_continuation);
296 __ b_far(_continuation);
297 __ delayed()->nop();
298 }
301 // Implementation of patching:
302 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
303 // - Replace original code with a call to the stub
304 // At Runtime:
305 // - call to stub, jump to runtime
306 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
307 // - in runtime: after initializing class, restore original code, reexecute instruction
309 //int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
310 int PatchingStub::_patch_info_offset = -NativeCall::instruction_size;
312 void PatchingStub::align_patch_site(MacroAssembler* masm) {
313 // We're patching a 5-7 byte instruction on intel and we need to
314 // make sure that we don't see a piece of the instruction. It
315 // appears mostly impossible on Intel to simply invalidate other
316 // processors caches and since they may do aggressive prefetch it's
317 // very hard to make a guess about what code might be in the icache.
318 // Force the instruction to be double word aligned so that it
319 // doesn't span a cache line.
321 // the NativeJump is not finished, i am not sure what to do here. FIXME
322 //masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));
323 }
325 void PatchingStub::emit_code(LIR_Assembler* ce) {
326 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
327 assert(_bytes_to_copy <= 0xFF, "not enough room for call");
329 Label call_patch;
331 // static field accesses have special semantics while the class
332 // initializer is being run so we emit a test which can be used to
333 // check that this code is being executed by the initializing
334 // thread.
335 address being_initialized_entry = __ pc();
336 if (CommentedAssembly) {
337 __ block_comment(" patch template");
338 }
339 if (_id == load_klass_id) {
340 // produce a copy of the load klass instruction for use by the being initialized case
341 address start = __ pc();
342 jobject o = NULL;
343 int oop_index = __ oop_recorder()->allocate_oop_index(o);
344 RelocationHolder rspec = oop_Relocation::spec(oop_index);
345 __ relocate(rspec);
346 #ifndef _LP64
347 //by_css
348 __ lui(_obj, Assembler::split_high((int)o));
349 __ addiu(_obj, _obj, Assembler::split_low((int)o));
350 #else
351 //This should be same as jobject2reg_with_patching.
352 __ li48(_obj, (long)o);
353 #endif
354 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
355 __ nop();
356 }
357 #ifdef ASSERT
358 for (int i = 0; i < _bytes_to_copy; i++) {
359 address ptr = (address)(_pc_start + i);
360 int a_byte = (*ptr) & 0xFF;
361 assert(a_byte == *start++, "should be the same code");
362 }
363 #endif
364 } else {
366 // make a copy the code which is going to be patched.
367 assert((_bytes_to_copy&3)==0, "change this code");
368 address start = __ pc();
369 for ( int i = 0; i < _bytes_to_copy; i+=4) {
370 __ emit_int32(*(int*)(_pc_start + i));
371 //make the site look like a nop, @jerome
372 *(int*)(_pc_start + i)=0;
373 }
374 while ((intx)__ pc() - (intx)start < NativeCall::instruction_size) {
375 __ nop();
376 }
377 }
379 address end_of_patch = __ pc();
380 int bytes_to_skip = 0;
381 if (_id == load_klass_id) {
382 int offset = __ offset();
383 if (CommentedAssembly) {
384 __ block_comment(" being_initialized check");
385 }
386 assert(_obj != NOREG, "must be a valid register");
387 #ifndef OPT_THREAD
388 //FIXME, T8 need be saved ?
389 Register thread = T8;
390 __ get_thread(thread);
391 #else
392 Register thread = TREG;
393 #endif
394 __ ld(AT, _obj, in_bytes(InstanceKlass::init_thread_offset()));
395 __ bne(thread, AT, call_patch);
396 __ delayed()->nop();
398 // access_field patches may execute the patched code before it's
399 // copied back into place so we need to jump back into the main
400 // code of the nmethod to continue execution.
401 /* address temppc = __ pc();
402 __ b(_patch_site_continuation);
403 __ delayed()->nop();
404 bytes_to_skip += (__ pc() - temppc);
405 */
406 __ b_far(_patch_site_continuation);
407 __ delayed()->nop();
408 bytes_to_skip += __ offset() - offset;
410 }
412 if (CommentedAssembly) {
413 __ block_comment("patch data");
414 }
415 // Now emit the patch record telling the runtime how to find the
416 // pieces of the patch. We only need 3 bytes but for alignment, we
417 // need 4 bytes
418 int sizeof_patch_record = 4;
419 bytes_to_skip += sizeof_patch_record;
421 // emit the offsets needed to find the code to patch
422 int being_initialized_entry_offset = __ pc() - being_initialized_entry + patch_info_size;
424 #ifdef _LP64
425 /* Jin: In MIPS64, byte_skip is much larger than that in X86. It can not be contained in a byte:
426 * - bytes_to_skip = 0x190;
427 * - _bytes_to_copy = 0x20;
428 * - being_initialized_entry_offset = 0x1b0;
429 *
430 * To minimize the modification of share codes, the values are decreased 4 times when generated,
431 * thus can be packed into a long type.
432 *
433 * See [share/vm/c1/c1_Runtime1.cpp 918] Runtime1::patch_code()
434 */
435 being_initialized_entry_offset /= 4;
436 _bytes_to_copy /= 4;
437 bytes_to_skip /= 4;
438 #endif
439 // patch_info_pc offset | size of b instruction(8)| patched code size
440 assert((char)being_initialized_entry_offset==being_initialized_entry_offset, "just check");
441 assert((char)bytes_to_skip==bytes_to_skip, "just check");
442 assert((char)_bytes_to_copy==_bytes_to_copy, "just check");
443 __ emit_int32(being_initialized_entry_offset<<8 | (bytes_to_skip<<16) | (_bytes_to_copy<<24) );
445 address patch_info_pc = __ pc();
446 #ifdef _LP64
447 assert(patch_info_pc - end_of_patch == bytes_to_skip * 4, "incorrect patch info");
448 #else
449 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
450 #endif
452 address entry = __ pc();
453 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
454 address target = NULL;
455 switch (_id) {
456 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
457 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
458 default: ShouldNotReachHere();
459 }
460 __ bind(call_patch);
463 if (CommentedAssembly) {
464 __ block_comment("patch entry point");
465 }
466 //__ call(RuntimeAddress(target));
467 #ifndef _LP64
468 //by_css
469 __ lui(T9, Assembler::split_high((int)target));
470 __ addiu(T9, T9, Assembler::split_low((int)target));
471 #else
472 __ li48(T9, (long)target);
473 #endif
474 __ jalr(T9);
475 __ delayed()->nop();
476 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
477 ce->add_call_info_here(_info);
478 int jmp_off = __ offset();
479 __ b_far(_patch_site_entry);
480 __ delayed()->nop();
481 // Add enough nops so deoptimization can overwrite the jmp above with a call
482 // and not destroy the world.
483 for (int j = __ offset(); j < jmp_off + NativeCall::instruction_size; j += 4 ) {
484 __ nop();
485 }
486 if (_id == load_klass_id) {
487 CodeSection* cs = __ code_section();
488 address pc = (address)_pc_start;
489 RelocIterator iter(cs, pc, pc + 1);
490 relocInfo::change_reloc_info_for_address(&iter, pc, relocInfo::oop_type, relocInfo::none);
491 }
492 }
495 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
496 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
497 __ bind(_entry);
498 __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id), relocInfo::runtime_call_type);
499 __ delayed()->nop();
500 ce->add_call_info_here(_info);
501 debug_only(__ should_not_reach_here());
502 }
505 // i dont know which register to use here, i just assume A1 here. FIXME
506 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
507 assert(__ sp_offset() == 0, "frame size should be fixed");
509 __ bind(_entry);
510 // pass the object on stack because all registers must be preserved
511 if (_obj->is_cpu_register()) {
512 ce->store_parameter(_obj->as_register(), 0);
513 }
514 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
515 __ delayed()->nop();
516 ce->add_call_info_here(_info);
517 debug_only(__ should_not_reach_here());
518 }
520 /*
521 ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info):
522 _info(info) {
523 }
526 void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) {
527 assert(__ sp_offset() == 0, "frame size should be fixed");
528 __ bind(_entry);
529 //__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_array_store_exception_id)));
530 __ call(Runtime1::entry_for(Runtime1::throw_array_store_exception_id), relocInfo::runtime_call_type);
531 __ delayed()->nop();
532 ce->add_call_info_here(_info);
533 debug_only(__ should_not_reach_here());
534 }
536 */
537 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
538 //---------------slow case: call to native-----------------
539 __ bind(_entry);
540 // Figure out where the args should go
541 // This should really convert the IntrinsicID to the methodOop and signature
542 // but I don't know how to do that.
543 //
544 VMRegPair args[5];
545 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
546 SharedRuntime::java_calling_convention(signature, args, 5, true);
548 // push parameters
549 // (src, src_pos, dest, destPos, length)
550 Register r[5];
551 r[0] = src()->as_register();
552 r[1] = src_pos()->as_register();
553 r[2] = dst()->as_register();
554 r[3] = dst_pos()->as_register();
555 r[4] = length()->as_register();
557 // next registers will get stored on the stack
558 for (int i = 0; i < 5 ; i++ ) {
559 VMReg r_1 = args[i].first();
560 if (r_1->is_stack()) {
561 int st_off = r_1->reg2stack() * wordSize;
562 //__ movptr (Address(rsp, st_off), r[i]);
563 __ sw( r[i], SP, st_off);
564 } else {
565 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
566 }
567 }
569 ce->align_call(lir_static_call);
571 ce->emit_static_call_stub();
572 //AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
573 // relocInfo::static_call_type);
574 //__ call(resolve);
575 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
576 __ delayed()->nop();
577 ce->add_call_info_here(info());
579 #ifndef PRODUCT
580 //__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
581 #ifndef _LP64
582 __ lui(T8, Assembler::split_high((int)&Runtime1::_arraycopy_slowcase_cnt));
583 __ lw(AT, T8, Assembler::split_low((int)&Runtime1::_arraycopy_slowcase_cnt));
584 __ addiu(AT, AT, 1);
585 __ sw(AT, T8, Assembler::split_low((int)&Runtime1::_arraycopy_slowcase_cnt));
586 #else
587 __ li(T8, (long)&Runtime1::_arraycopy_slowcase_cnt);
588 __ lw(AT, T8, 0);
589 __ daddiu(AT, AT, 1);
590 __ sw(AT, T8, 0);
591 #endif
592 #endif
594 __ b_far(_continuation);
595 __ delayed()->nop();
596 }
598 /////////////////////////////////////////////////////////////////////////////
599 #ifndef SERIALGC
601 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
602 Unimplemented();
603 }
604 /*
605 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
607 jbyte* G1PostBarrierStub::byte_map_base_slow() {
608 BarrierSet* bs = Universe::heap()->barrier_set();
609 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
610 "Must be if we're using this.");
611 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
612 }
613 */
614 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
615 Unimplemented();
616 }
618 #endif // SERIALGC
619 /////////////////////////////////////////////////////////////////////////////
621 #undef __