757 JVMWrapper2("JVM_NativePath (%s)", path); |
757 JVMWrapper2("JVM_NativePath (%s)", path); |
758 return os::native_path(path); |
758 return os::native_path(path); |
759 JVM_END |
759 JVM_END |
760 |
760 |
761 |
761 |
|
762 // java.nio.Bits /////////////////////////////////////////////////////////////// |
|
763 |
|
764 #define MAX_OBJECT_SIZE \ |
|
765 ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \ |
|
766 + ((julong)max_jint * sizeof(double)) ) |
|
767 |
|
768 static inline jlong field_offset_to_byte_offset(jlong field_offset) { |
|
769 return field_offset; |
|
770 } |
|
771 |
|
772 static inline void assert_field_offset_sane(oop p, jlong field_offset) { |
|
773 #ifdef ASSERT |
|
774 jlong byte_offset = field_offset_to_byte_offset(field_offset); |
|
775 |
|
776 if (p != NULL) { |
|
777 assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset"); |
|
778 if (byte_offset == (jint)byte_offset) { |
|
779 void* ptr_plus_disp = (address)p + byte_offset; |
|
780 assert((void*)p->obj_field_addr<oop>((jint)byte_offset) == ptr_plus_disp, |
|
781 "raw [ptr+disp] must be consistent with oop::field_base"); |
|
782 } |
|
783 jlong p_size = HeapWordSize * (jlong)(p->size()); |
|
784 assert(byte_offset < p_size, err_msg("Unsafe access: offset " INT64_FORMAT |
|
785 " > object's size " INT64_FORMAT, |
|
786 (int64_t)byte_offset, (int64_t)p_size)); |
|
787 } |
|
788 #endif |
|
789 } |
|
790 |
|
791 static inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) { |
|
792 assert_field_offset_sane(p, field_offset); |
|
793 jlong byte_offset = field_offset_to_byte_offset(field_offset); |
|
794 |
|
795 if (sizeof(char*) == sizeof(jint)) { // (this constant folds!) |
|
796 return (address)p + (jint) byte_offset; |
|
797 } else { |
|
798 return (address)p + byte_offset; |
|
799 } |
|
800 } |
|
801 |
|
802 // This function is a leaf since if the source and destination are both in native memory |
|
803 // the copy may potentially be very large, and we don't want to disable GC if we can avoid it. |
|
804 // If either source or destination (or both) are on the heap, the function will enter VM using |
|
805 // JVM_ENTRY_FROM_LEAF |
|
806 JVM_LEAF(void, JVM_CopySwapMemory(JNIEnv *env, jobject srcObj, jlong srcOffset, |
|
807 jobject dstObj, jlong dstOffset, jlong size, |
|
808 jlong elemSize)) { |
|
809 |
|
810 size_t sz = (size_t)size; |
|
811 size_t esz = (size_t)elemSize; |
|
812 |
|
813 if (srcObj == NULL && dstObj == NULL) { |
|
814 // Both src & dst are in native memory |
|
815 address src = (address)srcOffset; |
|
816 address dst = (address)dstOffset; |
|
817 |
|
818 Copy::conjoint_swap(src, dst, sz, esz); |
|
819 } else { |
|
820 // At least one of src/dst are on heap, transition to VM to access raw pointers |
|
821 |
|
822 JVM_ENTRY_FROM_LEAF(env, void, JVM_CopySwapMemory) { |
|
823 oop srcp = JNIHandles::resolve(srcObj); |
|
824 oop dstp = JNIHandles::resolve(dstObj); |
|
825 |
|
826 address src = (address)index_oop_from_field_offset_long(srcp, srcOffset); |
|
827 address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset); |
|
828 |
|
829 Copy::conjoint_swap(src, dst, sz, esz); |
|
830 } JVM_END |
|
831 } |
|
832 } JVM_END |
|
833 |
|
834 |
762 // Misc. class handling /////////////////////////////////////////////////////////// |
835 // Misc. class handling /////////////////////////////////////////////////////////// |
763 |
836 |
764 |
837 |
765 JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth)) |
838 JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth)) |
766 JVMWrapper("JVM_GetCallerClass"); |
839 JVMWrapper("JVM_GetCallerClass"); |