Wed, 29 Mar 2017 09:41:51 +0800
#4662 TieredCompilation is turned off.
TieredCompilation is not supported yet.
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/method.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "prims/methodHandles.hpp"
33 #include "runtime/frame.inline.hpp"
34 #include "runtime/handles.inline.hpp"
35 #include "runtime/javaCalls.hpp"
36 #include "runtime/monitorChunk.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "vmreg_mips.inline.hpp"
41 #ifdef COMPILER1
42 #include "c1/c1_Runtime1.hpp"
43 #include "runtime/vframeArray.hpp"
44 #endif
47 // Profiling/safepoint support
49 #ifdef ASSERT
50 void RegisterMap::check_location_valid() {
51 }
52 #endif
55 // Profiling/safepoint support
56 // for Profiling - acting on another frame. walks sender frames
57 // if valid.
58 // frame profile_find_Java_sender_frame(JavaThread *thread);
60 bool frame::safe_for_sender(JavaThread *thread) {
61 address sp = (address)_sp;
62 address fp = (address)_fp;
63 bool sp_safe = (sp != NULL &&
64 (sp <= thread->stack_base()) &&
65 (sp >= thread->stack_base() - thread->stack_size()));
66 bool fp_safe = (fp != NULL &&
67 (fp <= thread->stack_base()) &&
68 (fp >= thread->stack_base() - thread->stack_size()));
69 if (sp_safe && fp_safe) {
70 CodeBlob *cb = CodeCache::find_blob_unsafe(_pc);
71 // First check if frame is complete and tester is reliable
72 // Unfortunately we can only check frame complete for runtime stubs and nmethod
73 // other generic buffer blobs are more problematic so we just assume they are
74 // ok. adapter blobs never have a frame complete and are never ok.
75 if (cb != NULL && !cb->is_frame_complete_at(_pc)) {
76 if (cb->is_nmethod() || cb->is_adapter_blob() || cb->is_runtime_stub()) {
77 return false;
78 }
79 }
80 return true;
81 }
82 // Note: fp == NULL is not really a prerequisite for this to be safe to
83 // walk for c2. However we've modified the code such that if we get
84 // a failure with fp != NULL that we then try with FP == NULL.
85 // This is basically to mimic what a last_frame would look like if
86 // c2 had generated it.
87 if (sp_safe && fp == NULL) {
88 CodeBlob *cb = CodeCache::find_blob_unsafe(_pc);
89 // frame must be complete if fp == NULL as fp == NULL is only sensible
90 // if we are looking at a nmethod and frame complete assures us of that.
91 if (cb != NULL && cb->is_frame_complete_at(_pc) && cb->is_compiled_by_c2()) {
92 return true;
93 }
94 }
95 return false;
96 }
99 void frame::patch_pc(Thread* thread, address pc) {
100 if (TracePcPatching) {
101 tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", &((address *)_sp)[-1], ((address *)_sp)[-1], pc);
102 }
104 RegisterMap map((JavaThread *)thread, false);
105 frame check = ((JavaThread *)thread)->last_frame();
106 if (id() != check.id())
107 {
108 while (id() != check.sender(&map).id()) {
109 check = check.sender(&map);
110 }
111 if (check.is_interpreted_frame())
112 *(check.fp() + 1) = (intptr_t)pc;
113 else
114 ((address *)_sp)[-1] = pc;
115 }
117 _cb = CodeCache::find_blob(pc);
118 if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
119 address orig = (((nmethod*)_cb)->get_original_pc(this));
120 assert(orig == _pc, "expected original to be stored before patching");
121 _deopt_state = is_deoptimized;
122 // leave _pc as is
123 } else {
124 _deopt_state = not_deoptimized;
125 _pc = pc;
126 }
127 }
129 bool frame::is_interpreted_frame() const {
130 return Interpreter::contains(pc());
131 }
133 int frame::frame_size(RegisterMap* map) const {
134 frame sender = this->sender(map);
135 return sender.sp() - sp();
136 }
138 intptr_t* frame::entry_frame_argument_at(int offset) const {
139 // convert offset to index to deal with tsi
140 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
141 // Entry frame's arguments are always in relation to unextended_sp()
142 return &unextended_sp()[index];
143 }
145 // sender_sp
146 #ifdef CC_INTERP
147 intptr_t* frame::interpreter_frame_sender_sp() const {
148 assert(is_interpreted_frame(), "interpreted frame expected");
149 // QQQ why does this specialize method exist if frame::sender_sp() does same thing?
150 // seems odd and if we always know interpreted vs. non then sender_sp() is really
151 // doing too much work.
152 return get_interpreterState()->sender_sp();
153 }
155 // monitor elements
157 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
158 return get_interpreterState()->monitor_base();
159 }
161 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
162 return (BasicObjectLock*) get_interpreterState()->stack_base();
163 }
165 #else // CC_INTERP
167 intptr_t* frame::interpreter_frame_sender_sp() const {
168 assert(is_interpreted_frame(), "interpreted frame expected");
169 return (intptr_t*) at(interpreter_frame_sender_sp_offset);
170 }
172 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
173 assert(is_interpreted_frame(), "interpreted frame expected");
174 int_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
175 }
178 // monitor elements
180 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
181 return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
182 }
184 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
185 BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
186 // make sure the pointer points inside the frame
187 assert((intptr_t) fp() > (intptr_t) result, "result must < than frame pointer");
188 assert((intptr_t) sp() <= (intptr_t) result, "result must >= than stack pointer");
189 return result;
190 }
192 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
193 *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
194 }
196 // Used by template based interpreter deoptimization
197 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
198 *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
199 }
200 #endif // CC_INTERP
202 frame frame::sender_for_entry_frame(RegisterMap* map) const {
203 assert(map != NULL, "map must be set");
204 // Java frame called from C; skip all C frames and return top C
205 // frame of that chunk as the sender
206 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
207 assert(!entry_frame_is_first(), "next Java fp must be non zero");
208 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
209 map->clear();
210 assert(map->include_argument_oops(), "should be set by clear");
211 if (jfa->last_Java_pc() != NULL ) {
212 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
213 return fr;
214 }
215 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp());
216 return fr;
217 }
219 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
220 // sp is the raw sp from the sender after adapter or interpreter extension
221 //intptr_t* sp = (intptr_t*) addr_at(sender_sp_offset);
222 intptr_t* sp = (intptr_t*) at(interpreter_frame_sender_sp_offset);
224 // This is the sp before any possible extension (adapter/locals).
225 //intptr_t* unextended_sp = interpreter_frame_sender_sp();
227 // The interpreter and compiler(s) always save EBP/RBP in a known
228 // location on entry. We must record where that location is
229 // so this if EBP/RBP was live on callout from c2 we can find
230 // the saved copy no matter what it called.
232 // Since the interpreter always saves EBP/RBP if we record where it is then
233 // we don't have to always save EBP/RBP on entry and exit to c2 compiled
234 // code, on entry will be enough.
235 #ifdef COMPILER2
236 //FIXME aoqi
237 if (map->update_map()) {
238 map->set_location(FP->as_VMReg(), (address) addr_at(link_offset));
239 }
240 #endif /* COMPILER2 */
241 //return frame(sp, unextended_sp, link(), sender_pc());
242 return frame(sp, link(), sender_pc());
243 }
246 //------------------------------------------------------------------------------
247 //// frame::verify_deopt_original_pc
248 ////
249 //// Verifies the calculated original PC of a deoptimization PC for the
250 //// given unextended SP. The unextended SP might also be the saved SP
251 //// for MethodHandle call sites.
252 #ifdef ASSERT
253 void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
254 frame fr;
256 // This is ugly but it's better than to change {get,set}_original_pc
257 // // to take an SP value as argument. And it's only a debugging
258 // // method anyway.
259 fr._unextended_sp = unextended_sp;
261 address original_pc = nm->get_original_pc(&fr);
262 assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
263 assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
264 }
265 #endif
268 //------------------------------------------------------------------------------
269 // frame::adjust_unextended_sp
270 void frame::adjust_unextended_sp() {
271 // If we are returning to a compiled MethodHandle call site, the
272 // saved_fp will in fact be a saved value of the unextended SP. The
273 // simplest way to tell whether we are returning to such a call site
274 // is as follows:
276 nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
277 if (sender_nm != NULL) {
278 // If the sender PC is a deoptimization point, get the original
279 // PC. For MethodHandle call site the unextended_sp is stored in
280 // saved_fp.
281 if (sender_nm->is_deopt_mh_entry(_pc)) {
282 DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
283 _unextended_sp = _fp;
284 }
285 else if (sender_nm->is_deopt_entry(_pc)) {
286 DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
287 }
288 else if (sender_nm->is_method_handle_return(_pc)) {
289 _unextended_sp = _fp;
290 }
291 }
292 }
294 //------------------------------sender_for_compiled_frame-----------------------
295 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
296 assert(map != NULL, "map must be set");
298 const bool c1_compiled = _cb->is_compiled_by_c1();
299 // frame owned by optimizing compiler
300 intptr_t* sender_sp = NULL;
301 bool native = _cb->is_nmethod() && ((nmethod*)_cb)->is_native_method();
303 assert(_cb->frame_size() >= 0, "must have non-zero frame size");
304 //FIXME , may be error here , do MIPS have the return address and link address on the stack?
306 sender_sp = sp() + _cb->frame_size();
307 #ifdef ASSERT
308 if (c1_compiled && native) {
309 assert(sender_sp == fp() + frame::sender_sp_offset, "incorrect frame size");
310 }
311 #endif // ASSERT
312 // On Intel the return_address is always the word on the stack
313 // the fp in compiler points to sender fp, but in interpreter, fp points to return address,
314 // so getting sender for compiled frame is not same as interpreter frame.
315 // we hard code here temporarily
316 // spark
317 address sender_pc = (address) *(sender_sp-1);
319 intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
321 // so getting sender for compiled frame is not same as interpreter frame.
322 // we hard code here temporarily
323 // spark
325 if (map->update_map()) {
326 // Tell GC to use argument oopmaps for some runtime stubs that need it.
327 // For C1, the runtime stub might not have oop maps, so set this flag
328 // outside of update_register_map.
330 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
331 if (_cb->oop_maps() != NULL) {
332 OopMapSet::update_register_map(this, map);
333 }
334 // Since the prolog does the save and restore of epb there is no oopmap
335 // for it so we must fill in its location as if there was an oopmap entry
336 // since if our caller was compiled code there could be live jvm state in it.
337 // map->set_location(ebp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
338 map->set_location(FP->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
339 }
340 assert(sender_sp != sp(), "must have changed");
341 return frame(sender_sp, saved_fp, sender_pc);
342 }
344 frame frame::sender(RegisterMap* map) const {
345 // Default is we done have to follow them. The sender_for_xxx will
346 // update it accordingly
347 map->set_include_argument_oops(false);
349 if (is_entry_frame()) return sender_for_entry_frame(map);
350 if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
351 assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
353 if (_cb != NULL) {
354 return sender_for_compiled_frame(map);
355 }
356 // Must be native-compiled frame, i.e. the marshaling code for native
357 // methods that exists in the core system.
358 return frame(sender_sp(), link(), sender_pc());
359 }
362 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
363 assert(is_interpreted_frame(), "must be interpreter frame");
364 Method* method = interpreter_frame_method();
365 // When unpacking an optimized frame the frame pointer is
366 // adjusted with:
367 int diff = (method->max_locals() - method->size_of_parameters()) *
368 Interpreter::stackElementWords;
369 printf("^^^^^^^^^^^^^^^adjust fp in deopt fp = 0%x \n", (intptr_t)(fp - diff));
370 return _fp == (fp - diff);
371 }
373 void frame::pd_gc_epilog() {
374 // nothing done here now
375 }
377 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
378 // QQQ
379 #ifdef CC_INTERP
380 #else
381 assert(is_interpreted_frame(), "Not an interpreted frame");
382 // These are reasonable sanity checks
383 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
384 return false;
385 }
386 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
387 return false;
388 }
389 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
390 return false;
391 }
392 // These are hacks to keep us out of trouble.
393 // The problem with these is that they mask other problems
394 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
395 return false;
396 }
398 // do some validation of frame elements
400 // first the method
402 Method* m = *interpreter_frame_method_addr();
404 // validate the method we'd find in this potential sender
405 if (!m->is_valid_method()) return false;
407 // stack frames shouldn't be much larger than max_stack elements
409 //if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) {
410 if (fp() - sp() > 4096) { // stack frames shouldn't be large.
411 return false;
412 }
414 // validate bci/bcx
416 intptr_t bcx = interpreter_frame_bcx();
417 if (m->validate_bci_from_bcx(bcx) < 0) {
418 return false;
419 }
421 // validate ConstantPoolCache*
423 ConstantPoolCache* cp = *interpreter_frame_cache_addr();
425 // if (cp == NULL || !cp->is_metadata()) return false;
426 if (cp == NULL || !cp->is_metaspace_object()) return false;
428 // validate locals
430 address locals = (address) *interpreter_frame_locals_addr();
432 if (locals > thread->stack_base() || locals < (address) fp()) return false;
434 // We'd have to be pretty unlucky to be mislead at this point
436 #endif // CC_INTERP
437 return true;
438 }
440 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
441 #ifdef CC_INTERP
442 // Needed for JVMTI. The result should always be in the interpreterState object
443 assert(false, "NYI");
444 interpreterState istate = get_interpreterState();
445 #endif // CC_INTERP
446 assert(is_interpreted_frame(), "interpreted frame expected");
447 Method* method = interpreter_frame_method();
448 BasicType type = method->result_type();
450 intptr_t* tos_addr;
451 if (method->is_native()) {
452 // Prior to calling into the runtime to report the method_exit the possible
453 // return value is pushed to the native stack. If the result is a jfloat/jdouble
454 // then ST0 is saved before EAX/EDX. See the note in generate_native_result
455 tos_addr = (intptr_t*)sp();
456 if (type == T_FLOAT || type == T_DOUBLE) {
457 // QQQ seems like this code is equivalent on the two platforms
458 #ifdef AMD64
459 // This is times two because we do a push(ltos) after pushing XMM0
460 // and that takes two interpreter stack slots.
461 tos_addr += 2 * Interpreter::stackElementWords();
462 #else
463 tos_addr += 2;
464 #endif // AMD64
465 }
466 } else {
467 tos_addr = (intptr_t*)interpreter_frame_tos_address();
468 }
470 switch (type) {
471 case T_OBJECT :
472 case T_ARRAY : {
473 oop obj;
474 if (method->is_native()) {
475 #ifdef CC_INTERP
476 obj = istate->_oop_temp;
477 #else
478 obj = (oop) at(interpreter_frame_oop_temp_offset);
479 #endif // CC_INTERP
480 } else {
481 oop* obj_p = (oop*)tos_addr;
482 obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
483 }
484 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
485 *oop_result = obj;
486 break;
487 }
488 case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
489 case T_BYTE : value_result->b = *(jbyte*)tos_addr; break;
490 case T_CHAR : value_result->c = *(jchar*)tos_addr; break;
491 case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
492 case T_INT : value_result->i = *(jint*)tos_addr; break;
493 case T_LONG : value_result->j = *(jlong*)tos_addr; break;
494 case T_FLOAT : {
495 #ifdef AMD64
496 value_result->f = *(jfloat*)tos_addr;
497 #else
498 if (method->is_native()) {
499 jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat
500 value_result->f = (jfloat)d;
501 } else {
502 value_result->f = *(jfloat*)tos_addr;
503 }
504 #endif // AMD64
505 break;
506 }
507 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
508 case T_VOID : /* Nothing to do */ break;
509 default : ShouldNotReachHere();
510 }
512 return type;
513 }
516 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
517 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
518 return &interpreter_frame_tos_address()[index];
519 }
521 #ifndef PRODUCT
523 #define DESCRIBE_FP_OFFSET(name) \
524 values.describe(frame_no, fp() + frame::name##_offset, #name)
526 void frame::describe_pd(FrameValues& values, int frame_no) {
527 if (is_interpreted_frame()) {
528 DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
529 DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
530 DESCRIBE_FP_OFFSET(interpreter_frame_method);
531 DESCRIBE_FP_OFFSET(interpreter_frame_mdx);
532 DESCRIBE_FP_OFFSET(interpreter_frame_cache);
533 DESCRIBE_FP_OFFSET(interpreter_frame_locals);
534 DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
535 DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
536 }
537 }
538 #endif
540 intptr_t *frame::initial_deoptimization_info() {
541 // used to reset the saved FP
542 return fp();
543 }
545 intptr_t* frame::real_fp() const {
546 if (_cb != NULL) {
547 // use the frame size if valid
548 int size = _cb->frame_size();
549 if (size > 0) {
550 return unextended_sp() + size;
551 }
552 }
553 // else rely on fp()
554 assert(! is_compiled_frame(), "unknown compiled frame size");
555 return fp();
556 }