408 |
408 |
409 inline intptr_t align_size_down(intptr_t size, intptr_t alignment) { |
409 inline intptr_t align_size_down(intptr_t size, intptr_t alignment) { |
410 return align_size_down_(size, alignment); |
410 return align_size_down_(size, alignment); |
411 } |
411 } |
412 |
412 |
|
413 #define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment))) |
|
414 |
413 // Align objects by rounding up their size, in HeapWord units. |
415 // Align objects by rounding up their size, in HeapWord units. |
414 |
416 |
415 #define align_object_size_(size) align_size_up_(size, MinObjAlignment) |
417 #define align_object_size_(size) align_size_up_(size, MinObjAlignment) |
416 |
418 |
417 inline intptr_t align_object_size(intptr_t size) { |
419 inline intptr_t align_object_size(intptr_t size) { |
424 |
426 |
425 // Pad out certain offsets to jlong alignment, in HeapWord units. |
427 // Pad out certain offsets to jlong alignment, in HeapWord units. |
426 |
428 |
427 inline intptr_t align_object_offset(intptr_t offset) { |
429 inline intptr_t align_object_offset(intptr_t offset) { |
428 return align_size_up(offset, HeapWordsPerLong); |
430 return align_size_up(offset, HeapWordsPerLong); |
|
431 } |
|
432 |
|
433 inline void* align_pointer_up(const void* addr, size_t size) { |
|
434 return (void*) align_size_up_((uintptr_t)addr, size); |
429 } |
435 } |
430 |
436 |
431 // Clamp an address to be within a specific page |
437 // Clamp an address to be within a specific page |
432 // 1. If addr is on the page it is returned as is |
438 // 1. If addr is on the page it is returned as is |
433 // 2. If addr is above the page_address the start of the *next* page will be returned |
439 // 2. If addr is above the page_address the start of the *next* page will be returned |
447 |
453 |
448 |
454 |
449 // The expected size in bytes of a cache line, used to pad data structures. |
455 // The expected size in bytes of a cache line, used to pad data structures. |
450 #define DEFAULT_CACHE_LINE_SIZE 64 |
456 #define DEFAULT_CACHE_LINE_SIZE 64 |
451 |
457 |
452 // Bytes needed to pad type to avoid cache-line sharing; alignment should be the |
|
453 // expected cache line size (a power of two). The first addend avoids sharing |
|
454 // when the start address is not a multiple of alignment; the second maintains |
|
455 // alignment of starting addresses that happen to be a multiple. |
|
456 #define PADDING_SIZE(type, alignment) \ |
|
457 ((alignment) + align_size_up_(sizeof(type), alignment)) |
|
458 |
|
459 // Templates to create a subclass padded to avoid cache line sharing. These are |
|
460 // effective only when applied to derived-most (leaf) classes. |
|
461 |
|
462 // When no args are passed to the base ctor. |
|
463 template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE> |
|
464 class Padded: public T { |
|
465 private: |
|
466 char _pad_buf_[PADDING_SIZE(T, alignment)]; |
|
467 }; |
|
468 |
|
469 // When either 0 or 1 args may be passed to the base ctor. |
|
470 template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE> |
|
471 class Padded01: public T { |
|
472 public: |
|
473 Padded01(): T() { } |
|
474 Padded01(Arg1T arg1): T(arg1) { } |
|
475 private: |
|
476 char _pad_buf_[PADDING_SIZE(T, alignment)]; |
|
477 }; |
|
478 |
458 |
479 //---------------------------------------------------------------------------------------------------- |
459 //---------------------------------------------------------------------------------------------------- |
480 // Utility macros for compilers |
460 // Utility macros for compilers |
481 // used to silence compiler warnings |
461 // used to silence compiler warnings |
482 |
462 |