diff --git a/jerry-core/CMakeLists.txt b/jerry-core/CMakeLists.txt index 52fd9cf1f3..084291e674 100644 --- a/jerry-core/CMakeLists.txt +++ b/jerry-core/CMakeLists.txt @@ -105,16 +105,16 @@ project (JerryCore C ASM) # Include directories set(INCLUDE_CORE ${CMAKE_SOURCE_DIR}/jerry-core - ${CMAKE_SOURCE_DIR}/jerry-core/lit - ${CMAKE_SOURCE_DIR}/jerry-core/rcs - ${CMAKE_SOURCE_DIR}/jerry-core/jmem - ${CMAKE_SOURCE_DIR}/jerry-core/vm - ${CMAKE_SOURCE_DIR}/jerry-core/ecma/builtin-objects ${CMAKE_SOURCE_DIR}/jerry-core/ecma/base + ${CMAKE_SOURCE_DIR}/jerry-core/ecma/builtin-objects ${CMAKE_SOURCE_DIR}/jerry-core/ecma/operations + ${CMAKE_SOURCE_DIR}/jerry-core/jcontext + ${CMAKE_SOURCE_DIR}/jerry-core/jmem + ${CMAKE_SOURCE_DIR}/jerry-core/jrt + ${CMAKE_SOURCE_DIR}/jerry-core/lit ${CMAKE_SOURCE_DIR}/jerry-core/parser/js ${CMAKE_SOURCE_DIR}/jerry-core/parser/regexp - ${CMAKE_SOURCE_DIR}/jerry-core/jrt) + ${CMAKE_SOURCE_DIR}/jerry-core/vm) # Third-party # Valgrind @@ -123,29 +123,29 @@ project (JerryCore C ASM) # Sources # Jerry core file(GLOB SOURCE_CORE_API *.c) - file(GLOB SOURCE_CORE_LIT lit/*.c) - file(GLOB SOURCE_CORE_RCS rcs/*.c) - file(GLOB SOURCE_CORE_MEM jmem/*.c) - file(GLOB SOURCE_CORE_VM vm/*.c) - file(GLOB SOURCE_CORE_ECMA_BUILTINS ecma/builtin-objects/*.c) file(GLOB SOURCE_CORE_ECMA_BASE ecma/base/*.c) + file(GLOB SOURCE_CORE_ECMA_BUILTINS ecma/builtin-objects/*.c) file(GLOB SOURCE_CORE_ECMA_OPERATIONS ecma/operations/*.c) + file(GLOB SOURCE_CORE_JCONTEXT jcontext/*.c) + file(GLOB SOURCE_CORE_JMEM jmem/*.c) + file(GLOB SOURCE_CORE_JRT jrt/*.c) + file(GLOB SOURCE_CORE_LIT lit/*.c) file(GLOB SOURCE_CORE_PARSER_JS parser/js/*.c) file(GLOB SOURCE_CORE_PARSER_REGEXP parser/regexp/*.c) - file(GLOB SOURCE_CORE_JRT jrt/*.c) + file(GLOB SOURCE_CORE_VM vm/*.c) set(SOURCE_CORE_FILES ${SOURCE_CORE_API} - ${SOURCE_CORE_LIT} - ${SOURCE_CORE_RCS} - ${SOURCE_CORE_MEM} - ${SOURCE_CORE_VM} - ${SOURCE_CORE_ECMA_BUILTINS} ${SOURCE_CORE_ECMA_BASE} + ${SOURCE_CORE_ECMA_BUILTINS} ${SOURCE_CORE_ECMA_OPERATIONS} + ${SOURCE_CORE_JCONTEXT} + ${SOURCE_CORE_JMEM} + ${SOURCE_CORE_JRT} + ${SOURCE_CORE_LIT} ${SOURCE_CORE_PARSER_JS} ${SOURCE_CORE_PARSER_REGEXP} - ${SOURCE_CORE_JRT}) + ${SOURCE_CORE_VM}) # Jerry port if(NOT ("${PORT_DIR}" STREQUAL "")) diff --git a/jerry-core/jcontext/jcontext.c b/jerry-core/jcontext/jcontext.c new file mode 100644 index 0000000000..8186a39427 --- /dev/null +++ b/jerry-core/jcontext/jcontext.c @@ -0,0 +1,48 @@ +/* Copyright 2016 Samsung Electronics Co., Ltd. + * Copyright 2016 University of Szeged. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jcontext.h" + +/** \addtogroup context Jerry context + * @{ + * + * \addtogroup context Context + * @{ + */ + +/** + * Global context. + */ +jerry_context_t jerry_global_context; + +/** + * Jerry global heap section attribute. + */ +#ifndef JERRY_HEAP_SECTION_ATTR +#define JERRY_GLOBAL_HEAP_SECTION +#else /* JERRY_HEAP_SECTION_ATTR */ +#define JERRY_GLOBAL_HEAP_SECTION __attribute__ ((section (JERRY_HEAP_SECTION_ATTR))) +#endif /* !JERRY_HEAP_SECTION_ATTR */ + +/** + * Global heap. + */ +jmem_heap_t jerry_global_heap __attribute__ ((aligned (JMEM_ALIGNMENT))) JERRY_GLOBAL_HEAP_SECTION; + +/** + * @} + * @} + */ diff --git a/jerry-core/jcontext/jcontext.h b/jerry-core/jcontext/jcontext.h new file mode 100644 index 0000000000..4a918a7eb7 --- /dev/null +++ b/jerry-core/jcontext/jcontext.h @@ -0,0 +1,111 @@ +/* Copyright 2016 Samsung Electronics Co., Ltd. + * Copyright 2016 University of Szeged. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Memory context for JerryScript + */ +#ifndef JCONTEXT_H +#define JCONTEXT_H + +#include "jrt.h" +#include "jmem-allocator.h" +#include "jmem-config.h" + +/** \addtogroup context Jerry context + * @{ + * + * \addtogroup context Context + * @{ + */ + +/** + * Calculate heap area size, leaving space for a pointer to the free list + */ +#define JMEM_HEAP_AREA_SIZE (JMEM_HEAP_SIZE - JMEM_ALIGNMENT) + +/** + * Heap structure + * + * Memory blocks returned by the allocator must not start from the + * beginning of the heap area because offset 0 is reserved for + * JMEM_CP_NULL. This special constant is used in several places, + * e.g. it marks the end of the property chain list, so it cannot + * be eliminated from the project. Although the allocator cannot + * use the first 8 bytes of the heap, nothing prevents to use it + * for other purposes. Currently the free region start is stored + * there. + */ +typedef struct +{ + jmem_heap_free_t first; /**< first node in free region list */ + uint8_t area[JMEM_HEAP_AREA_SIZE]; /**< heap area */ +} jmem_heap_t; + +/** + * JerryScript context + * + * The purpose of this header is storing + * all global variables for Jerry + */ +typedef struct +{ + /** + * Memory manager part. + */ + size_t jmem_heap_allocated_size; /**< size of allocated regions */ + size_t jmem_heap_limit; /**< current limit of heap usage, that is upon being reached, + * causes call of "try give memory back" callbacks */ + jmem_heap_free_t *jmem_heap_list_skip_p; /**< This is used to speed up deallocation. */ + jmem_pools_chunk_t *jmem_free_chunk_p; /**< list of free pool chunks */ + jmem_free_unused_memory_callback_t jmem_free_unused_memory_callback; /**< Callback for freeing up memory. */ + +#ifdef JMEM_STATS + jmem_heap_stats_t jmem_heap_stats; /**< heap's memory usage statistics */ + jmem_pools_stats_t jmem_pools_stats; /**< pools' memory usage statistics */ +#endif /* MEM_STATS */ + +#ifdef JERRY_VALGRIND_FREYA + bool valgrind_freya_mempool_request; /**< Tells whether a pool manager + * allocator request is in progress */ +#endif /* JERRY_VALGRIND_FREYA */ +} jerry_context_t; + +/** + * Jerry global context. + */ +extern jerry_context_t jerry_global_context; + +/** + * Jerry global heap. + */ +extern jmem_heap_t jerry_global_heap; + +/** + * Provides a reference to a field in the current context. + */ +#define JERRY_CONTEXT(field) (jerry_global_context.field) + +/** + * Provides a reference to the area field of the heap. + */ +#define JERRY_HEAP_CONTEXT(field) (jerry_global_heap.field) + +/** + * @} + * @} + */ + +#endif /* !JCONTEXT_H */ diff --git a/jerry-core/jmem/jmem-allocator.c b/jerry-core/jmem/jmem-allocator.c index de00eab1eb..671671fb59 100644 --- a/jerry-core/jmem/jmem-allocator.c +++ b/jerry-core/jmem/jmem-allocator.c @@ -18,20 +18,15 @@ * Allocator implementation */ -#include "jrt.h" -#include "jrt-libc-includes.h" +#include "jcontext.h" #include "jmem-allocator.h" #include "jmem-heap.h" #include "jmem-poolman.h" +#include "jrt-libc-includes.h" #define JMEM_ALLOCATOR_INTERNAL #include "jmem-allocator-internal.h" -/** - * The 'try to give memory back' callback - */ -static jmem_free_unused_memory_callback_t jmem_free_unused_memory_callback = NULL; - /** * Initialize memory allocators. */ @@ -94,9 +89,9 @@ void jmem_register_free_unused_memory_callback (jmem_free_unused_memory_callback_t callback) /**< callback routine */ { /* Currently only one callback is supported */ - JERRY_ASSERT (jmem_free_unused_memory_callback == NULL); + JERRY_ASSERT (JERRY_CONTEXT (jmem_free_unused_memory_callback) == NULL); - jmem_free_unused_memory_callback = callback; + JERRY_CONTEXT (jmem_free_unused_memory_callback) = callback; } /* jmem_register_free_unused_memory_callback */ /** @@ -106,9 +101,9 @@ void jmem_unregister_free_unused_memory_callback (jmem_free_unused_memory_callback_t callback) /**< callback routine */ { /* Currently only one callback is supported */ - JERRY_ASSERT (jmem_free_unused_memory_callback == callback); + JERRY_ASSERT (JERRY_CONTEXT (jmem_free_unused_memory_callback) == callback); - jmem_free_unused_memory_callback = NULL; + JERRY_CONTEXT (jmem_free_unused_memory_callback) = NULL; } /* jmem_unregister_free_unused_memory_callback */ /** @@ -117,9 +112,9 @@ jmem_unregister_free_unused_memory_callback (jmem_free_unused_memory_callback_t void jmem_run_free_unused_memory_callbacks (jmem_free_unused_memory_severity_t severity) /**< severity of the request */ { - if (jmem_free_unused_memory_callback != NULL) + if (JERRY_CONTEXT (jmem_free_unused_memory_callback) != NULL) { - jmem_free_unused_memory_callback (severity); + JERRY_CONTEXT (jmem_free_unused_memory_callback) (severity); } jmem_pools_collect_empty (); diff --git a/jerry-core/jmem/jmem-allocator.h b/jerry-core/jmem/jmem-allocator.h index 0c39deae76..a9fa082b09 100644 --- a/jerry-core/jmem/jmem-allocator.h +++ b/jerry-core/jmem/jmem-allocator.h @@ -69,7 +69,24 @@ typedef enum } jmem_free_unused_memory_severity_t; /** - * A 'try give memory back' callback routine type. + * Free region node + */ +typedef struct +{ + uint32_t next_offset; /* Offset of next region in list */ + uint32_t size; /* Size of region */ +} jmem_heap_free_t; + +/** + * Node for free chunk list + */ +typedef struct mem_pools_chunk +{ + struct mem_pools_chunk *next_p; /* pointer to next pool chunk */ +} jmem_pools_chunk_t; + +/** + * A free memory callback routine type. */ typedef void (*jmem_free_unused_memory_callback_t) (jmem_free_unused_memory_severity_t); diff --git a/jerry-core/jmem/jmem-heap.c b/jerry-core/jmem/jmem-heap.c index 32fdaf250b..49d5a9f6a5 100644 --- a/jerry-core/jmem/jmem-heap.c +++ b/jerry-core/jmem/jmem-heap.c @@ -18,12 +18,12 @@ * Heap implementation */ -#include "jrt.h" -#include "jrt-bit-fields.h" -#include "jrt-libc-includes.h" +#include "jcontext.h" #include "jmem-allocator.h" #include "jmem-config.h" #include "jmem-heap.h" +#include "jrt-bit-fields.h" +#include "jrt-libc-includes.h" #define JMEM_ALLOCATOR_INTERNAL #include "jmem-allocator-internal.h" @@ -54,22 +54,17 @@ #ifdef JERRY_VALGRIND_FREYA # include "memcheck.h" -/** - * Tells whether a pool manager allocator request is in progress. - */ -static bool valgrind_freya_mempool_request = false; - /** * Called by pool manager before a heap allocation or free. */ void jmem_heap_valgrind_freya_mempool_request (void) { - valgrind_freya_mempool_request = true; + JERRY_CONTEXT (valgrind_freya_mempool_request) = true; } /* jmem_heap_valgrind_freya_mempool_request */ # define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST \ - bool mempool_request = valgrind_freya_mempool_request; \ - valgrind_freya_mempool_request = false + bool mempool_request = JERRY_CONTEXT (valgrind_freya_mempool_request); \ + JERRY_CONTEXT (valgrind_freya_mempool_request) = false # define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) \ if (!mempool_request) \ @@ -89,26 +84,18 @@ void jmem_heap_valgrind_freya_mempool_request (void) # define VALGRIND_FREYA_FREELIKE_SPACE(p) #endif /* JERRY_VALGRIND_FREYA */ -/* Calculate heap area size, leaving space for a pointer to the free list */ -#define JMEM_HEAP_AREA_SIZE (JMEM_HEAP_SIZE - JMEM_ALIGNMENT) -#define JMEM_HEAP_END_OF_LIST ((jmem_heap_free_t *const) ~((uint32_t) 0x0)) - /** - * Free region node + * End of list marker. */ -typedef struct -{ - uint32_t next_offset; /**< Offset of next region in list */ - uint32_t size; /**< Size of region */ -} jmem_heap_free_t; +#define JMEM_HEAP_END_OF_LIST ((jmem_heap_free_t *const) ~((uint32_t) 0x0)) #if UINTPTR_MAX > UINT32_MAX -#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - (uint8_t *) jmem_heap.area)) -#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) &jmem_heap.area[u]) +#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - JERRY_HEAP_CONTEXT (area))) +#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (JERRY_HEAP_CONTEXT (area) + (u))) #else /* UINTPTR_MAX <= UINT32_MAX */ /* In this case we simply store the pointer, since it fits anyway. */ #define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p)) -#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *)(u)) +#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (u)) #endif /* UINTPTR_MAX > UINT32_MAX */ /** @@ -120,54 +107,13 @@ jmem_heap_get_region_end (jmem_heap_free_t *curr_p) /**< current region */ return (jmem_heap_free_t *)((uint8_t *) curr_p + curr_p->size); } /* jmem_heap_get_region_end */ -/** - * Heap structure - */ -typedef struct -{ - /** First node in free region list */ - jmem_heap_free_t first; - - /** - * Heap area - */ - uint8_t area[JMEM_HEAP_AREA_SIZE] __attribute__ ((aligned (JMEM_ALIGNMENT))); -} jmem_heap_t; - -/** - * Heap - */ -#ifndef JERRY_HEAP_SECTION_ATTR -jmem_heap_t jmem_heap; -#else /* JERRY_HEAP_SECTION_ATTR */ -jmem_heap_t jmem_heap __attribute__ ((section (JERRY_HEAP_SECTION_ATTR))); -#endif /* !JERRY_HEAP_SECTION_ATTR */ - /** * Check size of heap is corresponding to configuration */ -JERRY_STATIC_ASSERT (sizeof (jmem_heap) <= JMEM_HEAP_SIZE, +JERRY_STATIC_ASSERT (sizeof (jmem_heap_t) <= JMEM_HEAP_SIZE, size_of_mem_heap_must_be_less_than_or_equal_to_MEM_HEAP_SIZE); -/** - * Size of allocated regions - */ -size_t jmem_heap_allocated_size; - -/** - * Current limit of heap usage, that is upon being reached, causes call of "try give memory back" callbacks - */ -size_t jmem_heap_limit; - -/* This is used to speed up deallocation. */ -jmem_heap_free_t *jmem_heap_list_skip_p; - #ifdef JMEM_STATS -/** - * Heap's memory usage statistics - */ -static jmem_heap_stats_t jmem_heap_stats; - static void jmem_heap_stat_init (void); static void jmem_heap_stat_alloc (size_t num); static void jmem_heap_stat_free (size_t num); @@ -199,22 +145,30 @@ static void jmem_heap_stat_free_iter (); void jmem_heap_init (void) { - JERRY_ASSERT ((uintptr_t) jmem_heap.area % JMEM_ALIGNMENT == 0); - JERRY_STATIC_ASSERT ((1u << JMEM_HEAP_OFFSET_LOG) >= JMEM_HEAP_SIZE, two_pow_mem_heap_offset_should_not_be_less_than_mem_heap_size); - jmem_heap_allocated_size = 0; - jmem_heap_limit = CONFIG_MEM_HEAP_DESIRED_LIMIT; - jmem_heap.first.size = 0; - jmem_heap_free_t *const region_p = (jmem_heap_free_t *) jmem_heap.area; - jmem_heap.first.next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p); - region_p->size = sizeof (jmem_heap.area); + JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0); + + JERRY_CONTEXT (jmem_heap_allocated_size) = 0; + JERRY_CONTEXT (jmem_heap_limit) = CONFIG_MEM_HEAP_DESIRED_LIMIT; + JERRY_CONTEXT (jmem_free_unused_memory_callback) = NULL; + + jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area); + + region_p->size = JMEM_HEAP_AREA_SIZE; region_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST); - jmem_heap_list_skip_p = &jmem_heap.first; + JERRY_HEAP_CONTEXT (first).size = 0; + JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p); + + JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first); + +#ifdef JERRY_VALGRIND_FREYA + JERRY_CONTEXT (valgrind_freya_mempool_request) = false; +#endif /* JERRY_VALGRIND_FREYA */ - VALGRIND_NOACCESS_SPACE (jmem_heap.area, JMEM_HEAP_AREA_SIZE); + VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_AREA_SIZE); JMEM_HEAP_STAT_INIT (); } /* jmem_heap_init */ @@ -224,8 +178,8 @@ jmem_heap_init (void) */ void jmem_heap_finalize (void) { - JERRY_ASSERT (jmem_heap_allocated_size == 0); - VALGRIND_NOACCESS_SPACE (&jmem_heap, sizeof (jmem_heap)); + JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) == 0); + VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_t)); } /* jmem_heap_finalize */ /** @@ -244,48 +198,51 @@ void *jmem_heap_alloc_block_internal (const size_t size) const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT; jmem_heap_free_t *data_space_p = NULL; - VALGRIND_DEFINED_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t)); + VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); // Fast path for 8 byte chunks, first region is guaranteed to be sufficient if (required_size == JMEM_ALIGNMENT - && likely (jmem_heap.first.next_offset != JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST))) + && likely (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST))) { - data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset); + data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset); JERRY_ASSERT (jmem_is_heap_pointer (data_space_p)); VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t)); - jmem_heap_allocated_size += JMEM_ALIGNMENT; + JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT; JMEM_HEAP_STAT_ALLOC_ITER (); if (data_space_p->size == JMEM_ALIGNMENT) { - jmem_heap.first.next_offset = data_space_p->next_offset; + JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset; } else { JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT); - jmem_heap_free_t *const remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset) + 1; + + jmem_heap_free_t *remaining_p; + remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1; VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t)); remaining_p->size = data_space_p->size - JMEM_ALIGNMENT; remaining_p->next_offset = data_space_p->next_offset; VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t)); - jmem_heap.first.next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p); + JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p); } VALGRIND_UNDEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t)); - if (unlikely (data_space_p == jmem_heap_list_skip_p)) + if (unlikely (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p))) { - jmem_heap_list_skip_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset); + JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset); } } // Slow path for larger regions else { - jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset); - jmem_heap_free_t *prev_p = &jmem_heap.first; + jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset); + jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first); + while (current_p != JMEM_HEAP_END_OF_LIST) { JERRY_ASSERT (jmem_is_heap_pointer (current_p)); @@ -300,7 +257,7 @@ void *jmem_heap_alloc_block_internal (const size_t size) { // Region is sufficiently big, store address data_space_p = current_p; - jmem_heap_allocated_size += required_size; + JERRY_CONTEXT (jmem_heap_allocated_size) += required_size; // Region was larger than necessary if (current_p->size > required_size) @@ -328,7 +285,7 @@ void *jmem_heap_alloc_block_internal (const size_t size) VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); } - jmem_heap_list_skip_p = prev_p; + JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p; // Found enough space break; @@ -341,12 +298,12 @@ void *jmem_heap_alloc_block_internal (const size_t size) } } - while (jmem_heap_allocated_size >= jmem_heap_limit) + while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit)) { - jmem_heap_limit += CONFIG_MEM_HEAP_DESIRED_LIMIT; + JERRY_CONTEXT (jmem_heap_limit) += CONFIG_MEM_HEAP_DESIRED_LIMIT; } - VALGRIND_NOACCESS_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t)); + VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); if (unlikely (!data_space_p)) { @@ -382,7 +339,7 @@ jmem_heap_alloc_block (const size_t size) jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH); #endif /* JMEM_GC_BEFORE_EACH_ALLOC */ - if (jmem_heap_allocated_size + size >= jmem_heap_limit) + if (JERRY_CONTEXT (jmem_heap_allocated_size) + size >= JERRY_CONTEXT (jmem_heap_limit)) { jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_LOW); } @@ -447,7 +404,7 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the /* checking that ptr points to the heap */ JERRY_ASSERT (jmem_is_heap_pointer (ptr)); JERRY_ASSERT (size > 0); - JERRY_ASSERT (jmem_heap_limit >= jmem_heap_allocated_size); + JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size)); VALGRIND_FREYA_FREELIKE_SPACE (ptr); VALGRIND_NOACCESS_SPACE (ptr, size); @@ -457,16 +414,16 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the jmem_heap_free_t *prev_p; jmem_heap_free_t *next_p; - VALGRIND_DEFINED_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t)); + VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); - if (block_p > jmem_heap_list_skip_p) + if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p)) { - prev_p = jmem_heap_list_skip_p; + prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p); JMEM_HEAP_STAT_SKIP (); } else { - prev_p = &jmem_heap.first; + prev_p = &JERRY_HEAP_CONTEXT (first); JMEM_HEAP_STAT_NONSKIP (); } @@ -513,9 +470,9 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the // Update next if (jmem_heap_get_region_end (block_p) == next_p) { - if (unlikely (next_p == jmem_heap_list_skip_p)) + if (unlikely (next_p == JERRY_CONTEXT (jmem_heap_list_skip_p))) { - jmem_heap_list_skip_p = block_p; + JERRY_CONTEXT (jmem_heap_list_skip_p) = block_p; } // Can be merged @@ -528,22 +485,22 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p); } - jmem_heap_list_skip_p = prev_p; + JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p; VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); VALGRIND_NOACCESS_SPACE (block_p, size); VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t)); - JERRY_ASSERT (jmem_heap_allocated_size > 0); - jmem_heap_allocated_size -= aligned_size; + JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0); + JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size; - while (jmem_heap_allocated_size + CONFIG_MEM_HEAP_DESIRED_LIMIT <= jmem_heap_limit) + while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_MEM_HEAP_DESIRED_LIMIT <= JERRY_CONTEXT (jmem_heap_limit)) { - jmem_heap_limit -= CONFIG_MEM_HEAP_DESIRED_LIMIT; + JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_MEM_HEAP_DESIRED_LIMIT; } - VALGRIND_NOACCESS_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t)); - JERRY_ASSERT (jmem_heap_limit >= jmem_heap_allocated_size); + VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); + JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size)); JMEM_HEAP_STAT_FREE (size); } /* jmem_heap_free_block */ @@ -570,7 +527,7 @@ jmem_heap_compress_pointer (const void *pointer_p) /**< pointer to compress */ JERRY_ASSERT (jmem_is_heap_pointer (pointer_p)); uintptr_t int_ptr = (uintptr_t) pointer_p; - const uintptr_t heap_start = (uintptr_t) &jmem_heap; + const uintptr_t heap_start = (uintptr_t) &JERRY_HEAP_CONTEXT (first); JERRY_ASSERT (int_ptr % JMEM_ALIGNMENT == 0); @@ -595,7 +552,7 @@ jmem_heap_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to deco JERRY_ASSERT (compressed_pointer != JMEM_CP_NULL); uintptr_t int_ptr = compressed_pointer; - const uintptr_t heap_start = (uintptr_t) &jmem_heap; + const uintptr_t heap_start = (uintptr_t) &JERRY_HEAP_CONTEXT (first); int_ptr <<= JMEM_ALIGNMENT_LOG; int_ptr += heap_start; @@ -617,8 +574,8 @@ jmem_heap_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to deco bool jmem_is_heap_pointer (const void *pointer) /**< pointer */ { - return ((uint8_t *) pointer >= jmem_heap.area - && (uint8_t *) pointer <= ((uint8_t *) jmem_heap.area + JMEM_HEAP_AREA_SIZE)); + return ((uint8_t *) pointer >= JERRY_HEAP_CONTEXT (area) + && (uint8_t *) pointer <= (JERRY_HEAP_CONTEXT (area) + JMEM_HEAP_AREA_SIZE)); } /* jmem_is_heap_pointer */ #endif /* !JERRY_NDEBUG */ @@ -631,7 +588,7 @@ jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats { JERRY_ASSERT (out_heap_stats_p != NULL); - *out_heap_stats_p = jmem_heap_stats; + *out_heap_stats_p = JERRY_CONTEXT (jmem_heap_stats); } /* jmem_heap_get_stats */ /** @@ -640,8 +597,8 @@ jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats void jmem_heap_stats_reset_peak (void) { - jmem_heap_stats.peak_allocated_bytes = jmem_heap_stats.allocated_bytes; - jmem_heap_stats.peak_waste_bytes = jmem_heap_stats.waste_bytes; + JERRY_CONTEXT (jmem_heap_stats).peak_allocated_bytes = JERRY_CONTEXT (jmem_heap_stats).allocated_bytes; + JERRY_CONTEXT (jmem_heap_stats).peak_waste_bytes = JERRY_CONTEXT (jmem_heap_stats).waste_bytes; } /* jmem_heap_stats_reset_peak */ /** @@ -650,6 +607,8 @@ jmem_heap_stats_reset_peak (void) void jmem_heap_stats_print (void) { + jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats); + printf ("Heap stats:\n" " Heap size = %zu bytes\n" " Allocated = %zu bytes\n" @@ -660,17 +619,17 @@ jmem_heap_stats_print (void) " Average alloc iteration = %zu.%04zu\n" " Average free iteration = %zu.%04zu\n" "\n", - jmem_heap_stats.size, - jmem_heap_stats.allocated_bytes, - jmem_heap_stats.waste_bytes, - jmem_heap_stats.peak_allocated_bytes, - jmem_heap_stats.peak_waste_bytes, - jmem_heap_stats.skip_count / jmem_heap_stats.nonskip_count, - jmem_heap_stats.skip_count % jmem_heap_stats.nonskip_count * 10000 / jmem_heap_stats.nonskip_count, - jmem_heap_stats.alloc_iter_count / jmem_heap_stats.alloc_count, - jmem_heap_stats.alloc_iter_count % jmem_heap_stats.alloc_count * 10000 / jmem_heap_stats.alloc_count, - jmem_heap_stats.free_iter_count / jmem_heap_stats.free_count, - jmem_heap_stats.free_iter_count % jmem_heap_stats.free_count * 10000 / jmem_heap_stats.free_count); + heap_stats->size, + heap_stats->allocated_bytes, + heap_stats->waste_bytes, + heap_stats->peak_allocated_bytes, + heap_stats->peak_waste_bytes, + heap_stats->skip_count / heap_stats->nonskip_count, + heap_stats->skip_count % heap_stats->nonskip_count * 10000 / heap_stats->nonskip_count, + heap_stats->alloc_iter_count / heap_stats->alloc_count, + heap_stats->alloc_iter_count % heap_stats->alloc_count * 10000 / heap_stats->alloc_count, + heap_stats->free_iter_count / heap_stats->free_count, + heap_stats->free_iter_count % heap_stats->free_count * 10000 / heap_stats->free_count); } /* jmem_heap_stats_print */ /** @@ -679,9 +638,9 @@ jmem_heap_stats_print (void) static void jmem_heap_stat_init () { - memset (&jmem_heap_stats, 0, sizeof (jmem_heap_stats)); + memset (&JERRY_CONTEXT (jmem_heap_stats), 0, sizeof (jmem_heap_stats_t)); - jmem_heap_stats.size = JMEM_HEAP_AREA_SIZE; + JERRY_CONTEXT (jmem_heap_stats).size = JMEM_HEAP_AREA_SIZE; } /* jmem_heap_stat_init */ /** @@ -693,27 +652,28 @@ jmem_heap_stat_alloc (size_t size) /**< Size of allocated block */ const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT; const size_t waste_bytes = aligned_size - size; - jmem_heap_stats.allocated_bytes += aligned_size; - jmem_heap_stats.waste_bytes += waste_bytes; - jmem_heap_stats.alloc_count++; + jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats); + heap_stats->allocated_bytes += aligned_size; + heap_stats->waste_bytes += waste_bytes; + heap_stats->alloc_count++; - if (jmem_heap_stats.allocated_bytes > jmem_heap_stats.peak_allocated_bytes) + if (heap_stats->allocated_bytes > heap_stats->peak_allocated_bytes) { - jmem_heap_stats.peak_allocated_bytes = jmem_heap_stats.allocated_bytes; + heap_stats->peak_allocated_bytes = heap_stats->allocated_bytes; } - if (jmem_heap_stats.allocated_bytes > jmem_heap_stats.global_peak_allocated_bytes) + if (heap_stats->allocated_bytes > heap_stats->global_peak_allocated_bytes) { - jmem_heap_stats.global_peak_allocated_bytes = jmem_heap_stats.allocated_bytes; + heap_stats->global_peak_allocated_bytes = heap_stats->allocated_bytes; } - if (jmem_heap_stats.waste_bytes > jmem_heap_stats.peak_waste_bytes) + if (heap_stats->waste_bytes > heap_stats->peak_waste_bytes) { - jmem_heap_stats.peak_waste_bytes = jmem_heap_stats.waste_bytes; + heap_stats->peak_waste_bytes = heap_stats->waste_bytes; } - if (jmem_heap_stats.waste_bytes > jmem_heap_stats.global_peak_waste_bytes) + if (heap_stats->waste_bytes > heap_stats->global_peak_waste_bytes) { - jmem_heap_stats.global_peak_waste_bytes = jmem_heap_stats.waste_bytes; + heap_stats->global_peak_waste_bytes = heap_stats->waste_bytes; } } /* jmem_heap_stat_alloc */ @@ -726,9 +686,11 @@ jmem_heap_stat_free (size_t size) /**< Size of freed block */ const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT; const size_t waste_bytes = aligned_size - size; - jmem_heap_stats.free_count++; - jmem_heap_stats.allocated_bytes -= aligned_size; - jmem_heap_stats.waste_bytes -= waste_bytes; + jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats); + + heap_stats->free_count++; + heap_stats->allocated_bytes -= aligned_size; + heap_stats->waste_bytes -= waste_bytes; } /* jmem_heap_stat_free */ /** @@ -737,7 +699,7 @@ jmem_heap_stat_free (size_t size) /**< Size of freed block */ static void jmem_heap_stat_skip () { - jmem_heap_stats.skip_count++; + JERRY_CONTEXT (jmem_heap_stats).skip_count++; } /* jmem_heap_stat_skip */ /** @@ -746,7 +708,7 @@ jmem_heap_stat_skip () static void jmem_heap_stat_nonskip () { - jmem_heap_stats.nonskip_count++; + JERRY_CONTEXT (jmem_heap_stats).nonskip_count++; } /* jmem_heap_stat_nonskip */ /** @@ -755,7 +717,7 @@ jmem_heap_stat_nonskip () static void jmem_heap_stat_alloc_iter () { - jmem_heap_stats.alloc_iter_count++; + JERRY_CONTEXT (jmem_heap_stats).alloc_iter_count++; } /* jmem_heap_stat_alloc_iter */ /** @@ -764,7 +726,7 @@ jmem_heap_stat_alloc_iter () static void jmem_heap_stat_free_iter () { - jmem_heap_stats.free_iter_count++; + JERRY_CONTEXT (jmem_heap_stats).free_iter_count++; } /* jmem_heap_stat_free_iter */ #endif /* JMEM_STATS */ diff --git a/jerry-core/jmem/jmem-poolman.c b/jerry-core/jmem/jmem-poolman.c index d892369709..4bae454d7c 100644 --- a/jerry-core/jmem/jmem-poolman.c +++ b/jerry-core/jmem/jmem-poolman.c @@ -18,11 +18,11 @@ * Memory pool manager implementation */ -#include "jrt.h" -#include "jrt-libc-includes.h" +#include "jcontext.h" #include "jmem-allocator.h" #include "jmem-heap.h" #include "jmem-poolman.h" +#include "jrt-libc-includes.h" #define JMEM_ALLOCATOR_INTERNAL #include "jmem-allocator-internal.h" @@ -34,26 +34,8 @@ * @{ */ -/** - * Node for free chunk list - */ -typedef struct jmem_pools_chunk -{ - struct jmem_pools_chunk *next_p; /**< pointer to next pool chunk */ -} jmem_pools_chunk_t; - -/** - * List of free pool chunks - */ -jmem_pools_chunk_t *jmem_free_chunk_p; - #ifdef JMEM_STATS -/** - * Pools' memory usage statistics - */ -jmem_pools_stats_t jmem_pools_stats; - static void jmem_pools_stat_init (void); static void jmem_pools_stat_free_pool (void); static void jmem_pools_stat_new_alloc (void); @@ -107,7 +89,7 @@ jmem_pools_init (void) JERRY_STATIC_ASSERT (sizeof (jmem_pools_chunk_t) <= JMEM_POOL_CHUNK_SIZE, size_of_mem_pools_chunk_t_must_be_less_than_or_equal_to_MEM_POOL_CHUNK_SIZE); - jmem_free_chunk_p = NULL; + JERRY_CONTEXT (jmem_free_chunk_p) = NULL; JMEM_POOLS_STAT_INIT (); } /* jmem_pools_init */ @@ -120,7 +102,7 @@ jmem_pools_finalize (void) { jmem_pools_collect_empty (); - JERRY_ASSERT (jmem_free_chunk_p == NULL); + JERRY_ASSERT (JERRY_CONTEXT (jmem_free_chunk_p) == NULL); } /* jmem_pools_finalize */ /** @@ -136,15 +118,15 @@ jmem_pools_alloc (void) jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH); #endif /* JMEM_GC_BEFORE_EACH_ALLOC */ - if (jmem_free_chunk_p != NULL) + if (JERRY_CONTEXT (jmem_free_chunk_p) != NULL) { - const jmem_pools_chunk_t *const chunk_p = jmem_free_chunk_p; + const jmem_pools_chunk_t *const chunk_p = JERRY_CONTEXT (jmem_free_chunk_p); JMEM_POOLS_STAT_REUSE (); VALGRIND_DEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE); - jmem_free_chunk_p = chunk_p->next_p; + JERRY_CONTEXT (jmem_free_chunk_p) = chunk_p->next_p; VALGRIND_UNDEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE); @@ -167,8 +149,8 @@ jmem_pools_free (void *chunk_p) /**< pointer to the chunk */ VALGRIND_DEFINED_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE); - chunk_to_free_p->next_p = jmem_free_chunk_p; - jmem_free_chunk_p = chunk_to_free_p; + chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_chunk_p); + JERRY_CONTEXT (jmem_free_chunk_p) = chunk_to_free_p; VALGRIND_NOACCESS_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE); @@ -181,15 +163,15 @@ jmem_pools_free (void *chunk_p) /**< pointer to the chunk */ void jmem_pools_collect_empty () { - while (jmem_free_chunk_p) + while (JERRY_CONTEXT (jmem_free_chunk_p)) { - VALGRIND_DEFINED_SPACE (jmem_free_chunk_p, sizeof (jmem_pools_chunk_t)); - jmem_pools_chunk_t *const next_p = jmem_free_chunk_p->next_p; - VALGRIND_NOACCESS_SPACE (jmem_free_chunk_p, sizeof (jmem_pools_chunk_t)); + VALGRIND_DEFINED_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t)); + jmem_pools_chunk_t *const next_p = JERRY_CONTEXT (jmem_free_chunk_p)->next_p; + VALGRIND_NOACCESS_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t)); - jmem_heap_free_block (jmem_free_chunk_p, JMEM_POOL_CHUNK_SIZE); + jmem_heap_free_block (JERRY_CONTEXT (jmem_free_chunk_p), JMEM_POOL_CHUNK_SIZE); JMEM_POOLS_STAT_DEALLOC (); - jmem_free_chunk_p = next_p; + JERRY_CONTEXT (jmem_free_chunk_p) = next_p; } } /* jmem_pools_collect_empty */ @@ -202,7 +184,7 @@ jmem_pools_get_stats (jmem_pools_stats_t *out_pools_stats_p) /**< [out] pools' s { JERRY_ASSERT (out_pools_stats_p != NULL); - *out_pools_stats_p = jmem_pools_stats; + *out_pools_stats_p = JERRY_CONTEXT (jmem_pools_stats); } /* jmem_pools_get_stats */ /** @@ -211,7 +193,7 @@ jmem_pools_get_stats (jmem_pools_stats_t *out_pools_stats_p) /**< [out] pools' s void jmem_pools_stats_reset_peak (void) { - jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count; + JERRY_CONTEXT (jmem_pools_stats).peak_pools_count = JERRY_CONTEXT (jmem_pools_stats.pools_count); } /* jmem_pools_stats_reset_peak */ /** @@ -220,6 +202,8 @@ jmem_pools_stats_reset_peak (void) void jmem_pools_stats_print (void) { + jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats); + printf ("Pools stats:\n" " Chunk size: %zu\n" " Pool chunks: %zu\n" @@ -227,11 +211,11 @@ jmem_pools_stats_print (void) " Free chunks: %zu\n" " Pool reuse ratio: %zu.%04zu\n", JMEM_POOL_CHUNK_SIZE, - jmem_pools_stats.pools_count, - jmem_pools_stats.peak_pools_count, - jmem_pools_stats.free_chunks, - jmem_pools_stats.reused_count / jmem_pools_stats.new_alloc_count, - jmem_pools_stats.reused_count % jmem_pools_stats.new_alloc_count * 10000 / jmem_pools_stats.new_alloc_count); + pools_stats->pools_count, + pools_stats->peak_pools_count, + pools_stats->free_chunks, + pools_stats->reused_count / pools_stats->new_alloc_count, + pools_stats->reused_count % pools_stats->new_alloc_count * 10000 / pools_stats->new_alloc_count); } /* jmem_pools_stats_print */ /** @@ -240,7 +224,7 @@ jmem_pools_stats_print (void) static void jmem_pools_stat_init (void) { - memset (&jmem_pools_stats, 0, sizeof (jmem_pools_stats)); + memset (&JERRY_CONTEXT (jmem_pools_stats), 0, sizeof (jmem_pools_stats_t)); } /* jmem_pools_stat_init */ /** @@ -249,16 +233,18 @@ jmem_pools_stat_init (void) static void jmem_pools_stat_new_alloc (void) { - jmem_pools_stats.pools_count++; - jmem_pools_stats.new_alloc_count++; + jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats); + + pools_stats->pools_count++; + pools_stats->new_alloc_count++; - if (jmem_pools_stats.pools_count > jmem_pools_stats.peak_pools_count) + if (pools_stats->pools_count > pools_stats->peak_pools_count) { - jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count; + pools_stats->peak_pools_count = pools_stats->pools_count; } - if (jmem_pools_stats.pools_count > jmem_pools_stats.global_peak_pools_count) + if (pools_stats->pools_count > pools_stats->global_peak_pools_count) { - jmem_pools_stats.global_peak_pools_count = jmem_pools_stats.pools_count; + pools_stats->global_peak_pools_count = pools_stats->pools_count; } } /* jmem_pools_stat_new_alloc */ @@ -269,17 +255,19 @@ jmem_pools_stat_new_alloc (void) static void jmem_pools_stat_reuse (void) { - jmem_pools_stats.pools_count++; - jmem_pools_stats.free_chunks--; - jmem_pools_stats.reused_count++; + jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats); - if (jmem_pools_stats.pools_count > jmem_pools_stats.peak_pools_count) + pools_stats->pools_count++; + pools_stats->free_chunks--; + pools_stats->reused_count++; + + if (pools_stats->pools_count > pools_stats->peak_pools_count) { - jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count; + pools_stats->peak_pools_count = pools_stats->pools_count; } - if (jmem_pools_stats.pools_count > jmem_pools_stats.global_peak_pools_count) + if (pools_stats->pools_count > pools_stats->global_peak_pools_count) { - jmem_pools_stats.global_peak_pools_count = jmem_pools_stats.pools_count; + pools_stats->global_peak_pools_count = pools_stats->pools_count; } } /* jmem_pools_stat_reuse */ @@ -290,10 +278,12 @@ jmem_pools_stat_reuse (void) static void jmem_pools_stat_free_pool (void) { - JERRY_ASSERT (jmem_pools_stats.pools_count > 0); + jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats); + + JERRY_ASSERT (pools_stats->pools_count > 0); - jmem_pools_stats.pools_count--; - jmem_pools_stats.free_chunks++; + pools_stats->pools_count--; + pools_stats->free_chunks++; } /* jmem_pools_stat_free_pool */ /** @@ -302,7 +292,7 @@ jmem_pools_stat_free_pool (void) static void jmem_pools_stat_dealloc (void) { - jmem_pools_stats.free_chunks--; + JERRY_CONTEXT (jmem_pools_stats).free_chunks--; } /* jmem_pools_stat_dealloc */ #endif /* JMEM_STATS */