This is the mail archive of the
gcc-patches@gcc.gnu.org
mailing list for the GCC project.
[PATCH] Reduce pool allocator overhead for !CHECKING_P
- From: Richard Biener <rguenther at suse dot de>
- To: gcc-patches at gcc dot gnu dot org
- Date: Tue, 23 Feb 2016 15:28:21 +0100 (CET)
- Subject: [PATCH] Reduce pool allocator overhead for !CHECKING_P
- Authentication-results: sourceware.org; auth=none
The following patch reverts an earlier decision of me to make the id
member unconditional. On PR26854 we can see
df_chain_block pool df-problems.c:2398 (df_chain_alloc)
152 0: 0.0% 937593072 61860737: 90.1% 24
thus a peak of 900MB used for df chains (16 bytes each but with overhead
24 bytes). With the patch (and release checking) that's down to
df_chain_block pool alloc-pool.h:478 (df_chain_alloc)
146 0: 0.0% 625062048 61860715: 90.1% 16
which is a good savings.
In the future somebody might make that checking code more intelligently
(using some compile-time walking the allocated blocks and doing some
range checking on the object pointers instead).
Build with and without checking, going to commit if a release checking
build reaches stage3 (I missed no -Werror).
Richard.
2016-02-23 Richard Biener <rguenther@suse.de>
* alloc-pool.h (struct allocation_object): Make id member
conditional on CHECKING_P again.
(get_instance): Adjust.
(base_pool_allocator): Likewise.
Index: gcc/alloc-pool.h
===================================================================
*** gcc/alloc-pool.h (revision 233633)
--- gcc/alloc-pool.h (working copy)
*************** private:
*** 156,163 ****
--- 156,165 ----
struct allocation_object
{
+ #if CHECKING_P
/* The ID of alloc pool which the object was allocated from. */
ALLOC_POOL_ID_TYPE id;
+ #endif
union
{
*************** private:
*** 172,177 ****
--- 174,180 ----
int64_t align_i;
} u;
+ #if CHECKING_P
static inline allocation_object*
get_instance (void *data_ptr)
{
*************** private:
*** 179,184 ****
--- 182,188 ----
- offsetof (allocation_object,
u.data));
}
+ #endif
static inline void*
get_data (void *instance_ptr)
*************** base_pool_allocator <TBlockAllocator>::a
*** 388,394 ****
--- 392,400 ----
header->next = NULL;
/* Mark the element to be free. */
+ #if CHECKING_P
((allocation_object*) block)->id = 0;
+ #endif
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
m_returned_free_list = header;
m_virgin_free_list += m_elt_size;
*************** base_pool_allocator <TBlockAllocator>::a
*** 403,409 ****
--- 409,417 ----
m_elts_free--;
/* Set the ID for element. */
+ #if CHECKING_P
allocation_object::get_instance (header)->id = m_id;
+ #endif
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
return (void *)(header);
*************** base_pool_allocator <TBlockAllocator>::r
*** 420,435 ****
{
gcc_assert (m_initialized);
gcc_assert (object
! /* Check if we free more than we allocated, which is Bad (TM). */
! && m_elts_free < m_elts_allocated
! /* Check whether the PTR was allocated from POOL. */
! && m_id == allocation_object::get_instance (object)->id);
memset (object, 0xaf, size);
}
/* Mark the element to be free. */
allocation_object::get_instance (object)->id = 0;
allocation_pool_list *header = new (object) allocation_pool_list;
header->next = m_returned_free_list;
--- 428,447 ----
{
gcc_assert (m_initialized);
gcc_assert (object
! /* Check if we free more than we allocated. */
! && m_elts_free < m_elts_allocated);
! #if CHECKING_P
! /* Check whether the PTR was allocated from POOL. */
! gcc_assert (m_id == allocation_object::get_instance (object)->id);
! #endif
memset (object, 0xaf, size);
}
+ #if CHECKING_P
/* Mark the element to be free. */
allocation_object::get_instance (object)->id = 0;
+ #endif
allocation_pool_list *header = new (object) allocation_pool_list;
header->next = m_returned_free_list;