[1166] | 1 | // MT-optimized allocator -*- C++ -*-
|
---|
| 2 |
|
---|
| 3 | // Copyright (C) 2003-2021 Free Software Foundation, Inc.
|
---|
| 4 | //
|
---|
| 5 | // This file is part of the GNU ISO C++ Library. This library is free
|
---|
| 6 | // software; you can redistribute it and/or modify it under the
|
---|
| 7 | // terms of the GNU General Public License as published by the
|
---|
| 8 | // Free Software Foundation; either version 3, or (at your option)
|
---|
| 9 | // any later version.
|
---|
| 10 |
|
---|
| 11 | // This library is distributed in the hope that it will be useful,
|
---|
| 12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
| 13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
---|
| 14 | // GNU General Public License for more details.
|
---|
| 15 |
|
---|
| 16 | // Under Section 7 of GPL version 3, you are granted additional
|
---|
| 17 | // permissions described in the GCC Runtime Library Exception, version
|
---|
| 18 | // 3.1, as published by the Free Software Foundation.
|
---|
| 19 |
|
---|
| 20 | // You should have received a copy of the GNU General Public License and
|
---|
| 21 | // a copy of the GCC Runtime Library Exception along with this program;
|
---|
| 22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
---|
| 23 | // <http://www.gnu.org/licenses/>.
|
---|
| 24 |
|
---|
| 25 | /** @file ext/mt_allocator.h
|
---|
| 26 | * This file is a GNU extension to the Standard C++ Library.
|
---|
| 27 | */
|
---|
| 28 |
|
---|
| 29 | #ifndef _MT_ALLOCATOR_H
|
---|
| 30 | #define _MT_ALLOCATOR_H 1
|
---|
| 31 |
|
---|
| 32 | #include <new>
|
---|
| 33 | #include <cstdlib>
|
---|
| 34 | #include <bits/functexcept.h>
|
---|
| 35 | #include <ext/atomicity.h>
|
---|
| 36 | #include <bits/move.h>
|
---|
| 37 | #if __cplusplus >= 201103L
|
---|
| 38 | #include <type_traits>
|
---|
| 39 | #endif
|
---|
| 40 |
|
---|
| 41 | namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
|
---|
| 42 | {
|
---|
| 43 | _GLIBCXX_BEGIN_NAMESPACE_VERSION
|
---|
| 44 |
|
---|
| 45 |
|
---|
| 46 | typedef void (*__destroy_handler)(void*);
|
---|
| 47 |
|
---|
| 48 | /// Base class for pool object.
|
---|
| 49 | struct __pool_base
|
---|
| 50 | {
|
---|
| 51 | // Using short int as type for the binmap implies we are never
|
---|
| 52 | // caching blocks larger than 32768 with this allocator.
|
---|
| 53 | typedef unsigned short int _Binmap_type;
|
---|
| 54 | typedef std::size_t size_t;
|
---|
| 55 |
|
---|
| 56 | // Variables used to configure the behavior of the allocator,
|
---|
| 57 | // assigned and explained in detail below.
|
---|
| 58 | struct _Tune
|
---|
| 59 | {
|
---|
| 60 | // Compile time constants for the default _Tune values.
|
---|
| 61 | enum { _S_align = 8 };
|
---|
| 62 | enum { _S_max_bytes = 128 };
|
---|
| 63 | enum { _S_min_bin = 8 };
|
---|
| 64 | enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
|
---|
| 65 | enum { _S_max_threads = 4096 };
|
---|
| 66 | enum { _S_freelist_headroom = 10 };
|
---|
| 67 |
|
---|
| 68 | // Alignment needed.
|
---|
| 69 | // NB: In any case must be >= sizeof(_Block_record), that
|
---|
| 70 | // is 4 on 32 bit machines and 8 on 64 bit machines.
|
---|
| 71 | size_t _M_align;
|
---|
| 72 |
|
---|
| 73 | // Allocation requests (after round-up to power of 2) below
|
---|
| 74 | // this value will be handled by the allocator. A raw new/
|
---|
| 75 | // call will be used for requests larger than this value.
|
---|
| 76 | // NB: Must be much smaller than _M_chunk_size and in any
|
---|
| 77 | // case <= 32768.
|
---|
| 78 | size_t _M_max_bytes;
|
---|
| 79 |
|
---|
| 80 | // Size in bytes of the smallest bin.
|
---|
| 81 | // NB: Must be a power of 2 and >= _M_align (and of course
|
---|
| 82 | // much smaller than _M_max_bytes).
|
---|
| 83 | size_t _M_min_bin;
|
---|
| 84 |
|
---|
| 85 | // In order to avoid fragmenting and minimize the number of
|
---|
| 86 | // new() calls we always request new memory using this
|
---|
| 87 | // value. Based on previous discussions on the libstdc++
|
---|
| 88 | // mailing list we have chosen the value below.
|
---|
| 89 | // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
|
---|
| 90 | // NB: At least one order of magnitude > _M_max_bytes.
|
---|
| 91 | size_t _M_chunk_size;
|
---|
| 92 |
|
---|
| 93 | // The maximum number of supported threads. For
|
---|
| 94 | // single-threaded operation, use one. Maximum values will
|
---|
| 95 | // vary depending on details of the underlying system. (For
|
---|
| 96 | // instance, Linux 2.4.18 reports 4070 in
|
---|
| 97 | // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
|
---|
| 98 | // 65534)
|
---|
| 99 | size_t _M_max_threads;
|
---|
| 100 |
|
---|
| 101 | // Each time a deallocation occurs in a threaded application
|
---|
| 102 | // we make sure that there are no more than
|
---|
| 103 | // _M_freelist_headroom % of used memory on the freelist. If
|
---|
| 104 | // the number of additional records is more than
|
---|
| 105 | // _M_freelist_headroom % of the freelist, we move these
|
---|
| 106 | // records back to the global pool.
|
---|
| 107 | size_t _M_freelist_headroom;
|
---|
| 108 |
|
---|
| 109 | // Set to true forces all allocations to use new().
|
---|
| 110 | bool _M_force_new;
|
---|
| 111 |
|
---|
| 112 | explicit
|
---|
| 113 | _Tune()
|
---|
| 114 | : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
|
---|
| 115 | _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
|
---|
| 116 | _M_freelist_headroom(_S_freelist_headroom),
|
---|
| 117 | _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
|
---|
| 118 | { }
|
---|
| 119 |
|
---|
| 120 | explicit
|
---|
| 121 | _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
|
---|
| 122 | size_t __maxthreads, size_t __headroom, bool __force)
|
---|
| 123 | : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
|
---|
| 124 | _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
|
---|
| 125 | _M_freelist_headroom(__headroom), _M_force_new(__force)
|
---|
| 126 | { }
|
---|
| 127 | };
|
---|
| 128 |
|
---|
| 129 | struct _Block_address
|
---|
| 130 | {
|
---|
| 131 | void* _M_initial;
|
---|
| 132 | _Block_address* _M_next;
|
---|
| 133 | };
|
---|
| 134 |
|
---|
| 135 | const _Tune&
|
---|
| 136 | _M_get_options() const
|
---|
| 137 | { return _M_options; }
|
---|
| 138 |
|
---|
| 139 | void
|
---|
| 140 | _M_set_options(_Tune __t)
|
---|
| 141 | {
|
---|
| 142 | if (!_M_init)
|
---|
| 143 | _M_options = __t;
|
---|
| 144 | }
|
---|
| 145 |
|
---|
| 146 | bool
|
---|
| 147 | _M_check_threshold(size_t __bytes)
|
---|
| 148 | { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
|
---|
| 149 |
|
---|
| 150 | size_t
|
---|
| 151 | _M_get_binmap(size_t __bytes)
|
---|
| 152 | { return _M_binmap[__bytes]; }
|
---|
| 153 |
|
---|
| 154 | size_t
|
---|
| 155 | _M_get_align()
|
---|
| 156 | { return _M_options._M_align; }
|
---|
| 157 |
|
---|
| 158 | explicit
|
---|
| 159 | __pool_base()
|
---|
| 160 | : _M_options(_Tune()), _M_binmap(0), _M_init(false) { }
|
---|
| 161 |
|
---|
| 162 | explicit
|
---|
| 163 | __pool_base(const _Tune& __options)
|
---|
| 164 | : _M_options(__options), _M_binmap(0), _M_init(false) { }
|
---|
| 165 |
|
---|
| 166 | private:
|
---|
| 167 | explicit
|
---|
| 168 | __pool_base(const __pool_base&);
|
---|
| 169 |
|
---|
| 170 | __pool_base&
|
---|
| 171 | operator=(const __pool_base&);
|
---|
| 172 |
|
---|
| 173 | protected:
|
---|
| 174 | // Configuration options.
|
---|
| 175 | _Tune _M_options;
|
---|
| 176 |
|
---|
| 177 | _Binmap_type* _M_binmap;
|
---|
| 178 |
|
---|
| 179 | // Configuration of the pool object via _M_options can happen
|
---|
| 180 | // after construction but before initialization. After
|
---|
| 181 | // initialization is complete, this variable is set to true.
|
---|
| 182 | bool _M_init;
|
---|
| 183 | };
|
---|
| 184 |
|
---|
| 185 |
|
---|
| 186 | /**
|
---|
| 187 | * @brief Data describing the underlying memory pool, parameterized on
|
---|
| 188 | * threading support.
|
---|
| 189 | */
|
---|
| 190 | template<bool _Thread>
|
---|
| 191 | class __pool;
|
---|
| 192 |
|
---|
| 193 | /// Specialization for single thread.
|
---|
| 194 | template<>
|
---|
| 195 | class __pool<false> : public __pool_base
|
---|
| 196 | {
|
---|
| 197 | public:
|
---|
| 198 | union _Block_record
|
---|
| 199 | {
|
---|
| 200 | // Points to the block_record of the next free block.
|
---|
| 201 | _Block_record* _M_next;
|
---|
| 202 | };
|
---|
| 203 |
|
---|
| 204 | struct _Bin_record
|
---|
| 205 | {
|
---|
| 206 | // An "array" of pointers to the first free block.
|
---|
| 207 | _Block_record** _M_first;
|
---|
| 208 |
|
---|
| 209 | // A list of the initial addresses of all allocated blocks.
|
---|
| 210 | _Block_address* _M_address;
|
---|
| 211 | };
|
---|
| 212 |
|
---|
| 213 | void
|
---|
| 214 | _M_initialize_once()
|
---|
| 215 | {
|
---|
| 216 | if (__builtin_expect(_M_init == false, false))
|
---|
| 217 | _M_initialize();
|
---|
| 218 | }
|
---|
| 219 |
|
---|
| 220 | void
|
---|
| 221 | _M_destroy() throw();
|
---|
| 222 |
|
---|
| 223 | char*
|
---|
| 224 | _M_reserve_block(size_t __bytes, const size_t __thread_id);
|
---|
| 225 |
|
---|
| 226 | void
|
---|
| 227 | _M_reclaim_block(char* __p, size_t __bytes) throw ();
|
---|
| 228 |
|
---|
| 229 | size_t
|
---|
| 230 | _M_get_thread_id() { return 0; }
|
---|
| 231 |
|
---|
| 232 | const _Bin_record&
|
---|
| 233 | _M_get_bin(size_t __which)
|
---|
| 234 | { return _M_bin[__which]; }
|
---|
| 235 |
|
---|
| 236 | void
|
---|
| 237 | _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
|
---|
| 238 | { }
|
---|
| 239 |
|
---|
| 240 | explicit __pool()
|
---|
| 241 | : _M_bin(0), _M_bin_size(1) { }
|
---|
| 242 |
|
---|
| 243 | explicit __pool(const __pool_base::_Tune& __tune)
|
---|
| 244 | : __pool_base(__tune), _M_bin(0), _M_bin_size(1) { }
|
---|
| 245 |
|
---|
| 246 | private:
|
---|
| 247 | // An "array" of bin_records each of which represents a specific
|
---|
| 248 | // power of 2 size. Memory to this "array" is allocated in
|
---|
| 249 | // _M_initialize().
|
---|
| 250 | _Bin_record* _M_bin;
|
---|
| 251 |
|
---|
| 252 | // Actual value calculated in _M_initialize().
|
---|
| 253 | size_t _M_bin_size;
|
---|
| 254 |
|
---|
| 255 | void
|
---|
| 256 | _M_initialize();
|
---|
| 257 | };
|
---|
| 258 |
|
---|
| 259 | #ifdef __GTHREADS
|
---|
| 260 | /// Specialization for thread enabled, via gthreads.h.
|
---|
| 261 | template<>
|
---|
| 262 | class __pool<true> : public __pool_base
|
---|
| 263 | {
|
---|
| 264 | public:
|
---|
| 265 | // Each requesting thread is assigned an id ranging from 1 to
|
---|
| 266 | // _S_max_threads. Thread id 0 is used as a global memory pool.
|
---|
| 267 | // In order to get constant performance on the thread assignment
|
---|
| 268 | // routine, we keep a list of free ids. When a thread first
|
---|
| 269 | // requests memory we remove the first record in this list and
|
---|
| 270 | // stores the address in a __gthread_key. When initializing the
|
---|
| 271 | // __gthread_key we specify a destructor. When this destructor
|
---|
| 272 | // (i.e. the thread dies) is called, we return the thread id to
|
---|
| 273 | // the front of this list.
|
---|
| 274 | struct _Thread_record
|
---|
| 275 | {
|
---|
| 276 | // Points to next free thread id record. NULL if last record in list.
|
---|
| 277 | _Thread_record* _M_next;
|
---|
| 278 |
|
---|
| 279 | // Thread id ranging from 1 to _S_max_threads.
|
---|
| 280 | size_t _M_id;
|
---|
| 281 | };
|
---|
| 282 |
|
---|
| 283 | union _Block_record
|
---|
| 284 | {
|
---|
| 285 | // Points to the block_record of the next free block.
|
---|
| 286 | _Block_record* _M_next;
|
---|
| 287 |
|
---|
| 288 | // The thread id of the thread which has requested this block.
|
---|
| 289 | size_t _M_thread_id;
|
---|
| 290 | };
|
---|
| 291 |
|
---|
| 292 | struct _Bin_record
|
---|
| 293 | {
|
---|
| 294 | // An "array" of pointers to the first free block for each
|
---|
| 295 | // thread id. Memory to this "array" is allocated in
|
---|
| 296 | // _S_initialize() for _S_max_threads + global pool 0.
|
---|
| 297 | _Block_record** _M_first;
|
---|
| 298 |
|
---|
| 299 | // A list of the initial addresses of all allocated blocks.
|
---|
| 300 | _Block_address* _M_address;
|
---|
| 301 |
|
---|
| 302 | // An "array" of counters used to keep track of the amount of
|
---|
| 303 | // blocks that are on the freelist/used for each thread id.
|
---|
| 304 | // - Note that the second part of the allocated _M_used "array"
|
---|
| 305 | // actually hosts (atomic) counters of reclaimed blocks: in
|
---|
| 306 | // _M_reserve_block and in _M_reclaim_block those numbers are
|
---|
| 307 | // subtracted from the first ones to obtain the actual size
|
---|
| 308 | // of the "working set" of the given thread.
|
---|
| 309 | // - Memory to these "arrays" is allocated in _S_initialize()
|
---|
| 310 | // for _S_max_threads + global pool 0.
|
---|
| 311 | size_t* _M_free;
|
---|
| 312 | size_t* _M_used;
|
---|
| 313 |
|
---|
| 314 | // Each bin has its own mutex which is used to ensure data
|
---|
| 315 | // integrity while changing "ownership" on a block. The mutex
|
---|
| 316 | // is initialized in _S_initialize().
|
---|
| 317 | __gthread_mutex_t* _M_mutex;
|
---|
| 318 | };
|
---|
| 319 |
|
---|
| 320 | // XXX GLIBCXX_ABI Deprecated
|
---|
| 321 | void
|
---|
| 322 | _M_initialize(__destroy_handler);
|
---|
| 323 |
|
---|
| 324 | void
|
---|
| 325 | _M_initialize_once()
|
---|
| 326 | {
|
---|
| 327 | if (__builtin_expect(_M_init == false, false))
|
---|
| 328 | _M_initialize();
|
---|
| 329 | }
|
---|
| 330 |
|
---|
| 331 | void
|
---|
| 332 | _M_destroy() throw();
|
---|
| 333 |
|
---|
| 334 | char*
|
---|
| 335 | _M_reserve_block(size_t __bytes, const size_t __thread_id);
|
---|
| 336 |
|
---|
| 337 | void
|
---|
| 338 | _M_reclaim_block(char* __p, size_t __bytes) throw ();
|
---|
| 339 |
|
---|
| 340 | const _Bin_record&
|
---|
| 341 | _M_get_bin(size_t __which)
|
---|
| 342 | { return _M_bin[__which]; }
|
---|
| 343 |
|
---|
| 344 | void
|
---|
| 345 | _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
|
---|
| 346 | size_t __thread_id)
|
---|
| 347 | {
|
---|
| 348 | if (__gthread_active_p())
|
---|
| 349 | {
|
---|
| 350 | __block->_M_thread_id = __thread_id;
|
---|
| 351 | --__bin._M_free[__thread_id];
|
---|
| 352 | ++__bin._M_used[__thread_id];
|
---|
| 353 | }
|
---|
| 354 | }
|
---|
| 355 |
|
---|
| 356 | // XXX GLIBCXX_ABI Deprecated
|
---|
| 357 | void
|
---|
| 358 | _M_destroy_thread_key(void*) throw ();
|
---|
| 359 |
|
---|
| 360 | size_t
|
---|
| 361 | _M_get_thread_id();
|
---|
| 362 |
|
---|
| 363 | explicit __pool()
|
---|
| 364 | : _M_bin(0), _M_bin_size(1), _M_thread_freelist(0)
|
---|
| 365 | { }
|
---|
| 366 |
|
---|
| 367 | explicit __pool(const __pool_base::_Tune& __tune)
|
---|
| 368 | : __pool_base(__tune), _M_bin(0), _M_bin_size(1),
|
---|
| 369 | _M_thread_freelist(0)
|
---|
| 370 | { }
|
---|
| 371 |
|
---|
| 372 | private:
|
---|
| 373 | // An "array" of bin_records each of which represents a specific
|
---|
| 374 | // power of 2 size. Memory to this "array" is allocated in
|
---|
| 375 | // _M_initialize().
|
---|
| 376 | _Bin_record* _M_bin;
|
---|
| 377 |
|
---|
| 378 | // Actual value calculated in _M_initialize().
|
---|
| 379 | size_t _M_bin_size;
|
---|
| 380 |
|
---|
| 381 | _Thread_record* _M_thread_freelist;
|
---|
| 382 | void* _M_thread_freelist_initial;
|
---|
| 383 |
|
---|
| 384 | void
|
---|
| 385 | _M_initialize();
|
---|
| 386 | };
|
---|
| 387 | #endif
|
---|
| 388 |
|
---|
| 389 | template<template <bool> class _PoolTp, bool _Thread>
|
---|
| 390 | struct __common_pool
|
---|
| 391 | {
|
---|
| 392 | typedef _PoolTp<_Thread> pool_type;
|
---|
| 393 |
|
---|
| 394 | static pool_type&
|
---|
| 395 | _S_get_pool()
|
---|
| 396 | {
|
---|
| 397 | static pool_type _S_pool;
|
---|
| 398 | return _S_pool;
|
---|
| 399 | }
|
---|
| 400 | };
|
---|
| 401 |
|
---|
| 402 | template<template <bool> class _PoolTp, bool _Thread>
|
---|
| 403 | struct __common_pool_base;
|
---|
| 404 |
|
---|
| 405 | template<template <bool> class _PoolTp>
|
---|
| 406 | struct __common_pool_base<_PoolTp, false>
|
---|
| 407 | : public __common_pool<_PoolTp, false>
|
---|
| 408 | {
|
---|
| 409 | using __common_pool<_PoolTp, false>::_S_get_pool;
|
---|
| 410 |
|
---|
| 411 | static void
|
---|
| 412 | _S_initialize_once()
|
---|
| 413 | {
|
---|
| 414 | static bool __init;
|
---|
| 415 | if (__builtin_expect(__init == false, false))
|
---|
| 416 | {
|
---|
| 417 | _S_get_pool()._M_initialize_once();
|
---|
| 418 | __init = true;
|
---|
| 419 | }
|
---|
| 420 | }
|
---|
| 421 | };
|
---|
| 422 |
|
---|
| 423 | #ifdef __GTHREADS
|
---|
| 424 | template<template <bool> class _PoolTp>
|
---|
| 425 | struct __common_pool_base<_PoolTp, true>
|
---|
| 426 | : public __common_pool<_PoolTp, true>
|
---|
| 427 | {
|
---|
| 428 | using __common_pool<_PoolTp, true>::_S_get_pool;
|
---|
| 429 |
|
---|
| 430 | static void
|
---|
| 431 | _S_initialize()
|
---|
| 432 | { _S_get_pool()._M_initialize_once(); }
|
---|
| 433 |
|
---|
| 434 | static void
|
---|
| 435 | _S_initialize_once()
|
---|
| 436 | {
|
---|
| 437 | static bool __init;
|
---|
| 438 | if (__builtin_expect(__init == false, false))
|
---|
| 439 | {
|
---|
| 440 | if (__gthread_active_p())
|
---|
| 441 | {
|
---|
| 442 | // On some platforms, __gthread_once_t is an aggregate.
|
---|
| 443 | static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
|
---|
| 444 | __gthread_once(&__once, _S_initialize);
|
---|
| 445 | }
|
---|
| 446 |
|
---|
| 447 | // Double check initialization. May be necessary on some
|
---|
| 448 | // systems for proper construction when not compiling with
|
---|
| 449 | // thread flags.
|
---|
| 450 | _S_get_pool()._M_initialize_once();
|
---|
| 451 | __init = true;
|
---|
| 452 | }
|
---|
| 453 | }
|
---|
| 454 | };
|
---|
| 455 | #endif
|
---|
| 456 |
|
---|
| 457 | /// Policy for shared __pool objects.
|
---|
| 458 | template<template <bool> class _PoolTp, bool _Thread>
|
---|
| 459 | struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
|
---|
| 460 | {
|
---|
| 461 | template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
|
---|
| 462 | bool _Thread1 = _Thread>
|
---|
| 463 | struct _M_rebind
|
---|
| 464 | { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
|
---|
| 465 |
|
---|
| 466 | using __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
|
---|
| 467 | using __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
|
---|
| 468 | };
|
---|
| 469 |
|
---|
| 470 |
|
---|
| 471 | template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
|
---|
| 472 | struct __per_type_pool
|
---|
| 473 | {
|
---|
| 474 | typedef _Tp value_type;
|
---|
| 475 | typedef _PoolTp<_Thread> pool_type;
|
---|
| 476 |
|
---|
| 477 | static pool_type&
|
---|
| 478 | _S_get_pool()
|
---|
| 479 | {
|
---|
| 480 | using std::size_t;
|
---|
| 481 | // Sane defaults for the _PoolTp.
|
---|
| 482 | typedef typename pool_type::_Block_record _Block_record;
|
---|
| 483 | const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
|
---|
| 484 | ? __alignof__(_Tp) : sizeof(_Block_record));
|
---|
| 485 |
|
---|
| 486 | typedef typename __pool_base::_Tune _Tune;
|
---|
| 487 | static _Tune _S_tune(__a, sizeof(_Tp) * 64,
|
---|
| 488 | sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
|
---|
| 489 | sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
|
---|
| 490 | _Tune::_S_max_threads,
|
---|
| 491 | _Tune::_S_freelist_headroom,
|
---|
| 492 | std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
|
---|
| 493 | static pool_type _S_pool(_S_tune);
|
---|
| 494 | return _S_pool;
|
---|
| 495 | }
|
---|
| 496 | };
|
---|
| 497 |
|
---|
| 498 | template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
|
---|
| 499 | struct __per_type_pool_base;
|
---|
| 500 |
|
---|
| 501 | template<typename _Tp, template <bool> class _PoolTp>
|
---|
| 502 | struct __per_type_pool_base<_Tp, _PoolTp, false>
|
---|
| 503 | : public __per_type_pool<_Tp, _PoolTp, false>
|
---|
| 504 | {
|
---|
| 505 | using __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
|
---|
| 506 |
|
---|
| 507 | static void
|
---|
| 508 | _S_initialize_once()
|
---|
| 509 | {
|
---|
| 510 | static bool __init;
|
---|
| 511 | if (__builtin_expect(__init == false, false))
|
---|
| 512 | {
|
---|
| 513 | _S_get_pool()._M_initialize_once();
|
---|
| 514 | __init = true;
|
---|
| 515 | }
|
---|
| 516 | }
|
---|
| 517 | };
|
---|
| 518 |
|
---|
| 519 | #ifdef __GTHREADS
|
---|
| 520 | template<typename _Tp, template <bool> class _PoolTp>
|
---|
| 521 | struct __per_type_pool_base<_Tp, _PoolTp, true>
|
---|
| 522 | : public __per_type_pool<_Tp, _PoolTp, true>
|
---|
| 523 | {
|
---|
| 524 | using __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
|
---|
| 525 |
|
---|
| 526 | static void
|
---|
| 527 | _S_initialize()
|
---|
| 528 | { _S_get_pool()._M_initialize_once(); }
|
---|
| 529 |
|
---|
| 530 | static void
|
---|
| 531 | _S_initialize_once()
|
---|
| 532 | {
|
---|
| 533 | static bool __init;
|
---|
| 534 | if (__builtin_expect(__init == false, false))
|
---|
| 535 | {
|
---|
| 536 | if (__gthread_active_p())
|
---|
| 537 | {
|
---|
| 538 | // On some platforms, __gthread_once_t is an aggregate.
|
---|
| 539 | static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
|
---|
| 540 | __gthread_once(&__once, _S_initialize);
|
---|
| 541 | }
|
---|
| 542 |
|
---|
| 543 | // Double check initialization. May be necessary on some
|
---|
| 544 | // systems for proper construction when not compiling with
|
---|
| 545 | // thread flags.
|
---|
| 546 | _S_get_pool()._M_initialize_once();
|
---|
| 547 | __init = true;
|
---|
| 548 | }
|
---|
| 549 | }
|
---|
| 550 | };
|
---|
| 551 | #endif
|
---|
| 552 |
|
---|
| 553 | /// Policy for individual __pool objects.
|
---|
| 554 | template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
|
---|
| 555 | struct __per_type_pool_policy
|
---|
| 556 | : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
|
---|
| 557 | {
|
---|
| 558 | template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
|
---|
| 559 | bool _Thread1 = _Thread>
|
---|
| 560 | struct _M_rebind
|
---|
| 561 | { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
|
---|
| 562 |
|
---|
| 563 | using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
|
---|
| 564 | using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
|
---|
| 565 | };
|
---|
| 566 |
|
---|
| 567 |
|
---|
| 568 | /// Base class for _Tp dependent member functions.
|
---|
| 569 | template<typename _Tp>
|
---|
| 570 | class __mt_alloc_base
|
---|
| 571 | {
|
---|
| 572 | public:
|
---|
| 573 | typedef std::size_t size_type;
|
---|
| 574 | typedef std::ptrdiff_t difference_type;
|
---|
| 575 | typedef _Tp* pointer;
|
---|
| 576 | typedef const _Tp* const_pointer;
|
---|
| 577 | typedef _Tp& reference;
|
---|
| 578 | typedef const _Tp& const_reference;
|
---|
| 579 | typedef _Tp value_type;
|
---|
| 580 |
|
---|
| 581 | #if __cplusplus >= 201103L
|
---|
| 582 | // _GLIBCXX_RESOLVE_LIB_DEFECTS
|
---|
| 583 | // 2103. propagate_on_container_move_assignment
|
---|
| 584 | typedef std::true_type propagate_on_container_move_assignment;
|
---|
| 585 | #endif
|
---|
| 586 |
|
---|
| 587 | pointer
|
---|
| 588 | address(reference __x) const _GLIBCXX_NOEXCEPT
|
---|
| 589 | { return std::__addressof(__x); }
|
---|
| 590 |
|
---|
| 591 | const_pointer
|
---|
| 592 | address(const_reference __x) const _GLIBCXX_NOEXCEPT
|
---|
| 593 | { return std::__addressof(__x); }
|
---|
| 594 |
|
---|
| 595 | size_type
|
---|
| 596 | max_size() const _GLIBCXX_USE_NOEXCEPT
|
---|
| 597 | { return size_type(-1) / sizeof(_Tp); }
|
---|
| 598 |
|
---|
| 599 | #if __cplusplus >= 201103L
|
---|
| 600 | template<typename _Up, typename... _Args>
|
---|
| 601 | void
|
---|
| 602 | construct(_Up* __p, _Args&&... __args)
|
---|
| 603 | { ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
|
---|
| 604 |
|
---|
| 605 | template<typename _Up>
|
---|
| 606 | void
|
---|
| 607 | destroy(_Up* __p) { __p->~_Up(); }
|
---|
| 608 | #else
|
---|
| 609 | // _GLIBCXX_RESOLVE_LIB_DEFECTS
|
---|
| 610 | // 402. wrong new expression in [some_] allocator::construct
|
---|
| 611 | void
|
---|
| 612 | construct(pointer __p, const _Tp& __val)
|
---|
| 613 | { ::new((void *)__p) _Tp(__val); }
|
---|
| 614 |
|
---|
| 615 | void
|
---|
| 616 | destroy(pointer __p) { __p->~_Tp(); }
|
---|
| 617 | #endif
|
---|
| 618 | };
|
---|
| 619 |
|
---|
| 620 | #ifdef __GTHREADS
|
---|
| 621 | #define __thread_default true
|
---|
| 622 | #else
|
---|
| 623 | #define __thread_default false
|
---|
| 624 | #endif
|
---|
| 625 |
|
---|
| 626 | /**
|
---|
| 627 | * @brief This is a fixed size (power of 2) allocator which - when
|
---|
| 628 | * compiled with thread support - will maintain one freelist per
|
---|
| 629 | * size per thread plus a @a global one. Steps are taken to limit
|
---|
| 630 | * the per thread freelist sizes (by returning excess back to
|
---|
| 631 | * the @a global list).
|
---|
| 632 | * @ingroup allocators
|
---|
| 633 | *
|
---|
| 634 | * Further details:
|
---|
| 635 | * https://gcc.gnu.org/onlinedocs/libstdc++/manual/mt_allocator.html
|
---|
| 636 | */
|
---|
| 637 | template<typename _Tp,
|
---|
| 638 | typename _Poolp = __common_pool_policy<__pool, __thread_default> >
|
---|
| 639 | class __mt_alloc : public __mt_alloc_base<_Tp>
|
---|
| 640 | {
|
---|
| 641 | public:
|
---|
| 642 | typedef std::size_t size_type;
|
---|
| 643 | typedef std::ptrdiff_t difference_type;
|
---|
| 644 | typedef _Tp* pointer;
|
---|
| 645 | typedef const _Tp* const_pointer;
|
---|
| 646 | typedef _Tp& reference;
|
---|
| 647 | typedef const _Tp& const_reference;
|
---|
| 648 | typedef _Tp value_type;
|
---|
| 649 | typedef _Poolp __policy_type;
|
---|
| 650 | typedef typename _Poolp::pool_type __pool_type;
|
---|
| 651 |
|
---|
| 652 | template<typename _Tp1, typename _Poolp1 = _Poolp>
|
---|
| 653 | struct rebind
|
---|
| 654 | {
|
---|
| 655 | typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
|
---|
| 656 | typedef __mt_alloc<_Tp1, pol_type> other;
|
---|
| 657 | };
|
---|
| 658 |
|
---|
| 659 | __mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
|
---|
| 660 |
|
---|
| 661 | __mt_alloc(const __mt_alloc&) _GLIBCXX_USE_NOEXCEPT { }
|
---|
| 662 |
|
---|
| 663 | template<typename _Tp1, typename _Poolp1>
|
---|
| 664 | __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) _GLIBCXX_USE_NOEXCEPT { }
|
---|
| 665 |
|
---|
| 666 | ~__mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
|
---|
| 667 |
|
---|
| 668 | _GLIBCXX_NODISCARD pointer
|
---|
| 669 | allocate(size_type __n, const void* = 0);
|
---|
| 670 |
|
---|
| 671 | void
|
---|
| 672 | deallocate(pointer __p, size_type __n);
|
---|
| 673 |
|
---|
| 674 | const __pool_base::_Tune
|
---|
| 675 | _M_get_options()
|
---|
| 676 | {
|
---|
| 677 | // Return a copy, not a reference, for external consumption.
|
---|
| 678 | return __policy_type::_S_get_pool()._M_get_options();
|
---|
| 679 | }
|
---|
| 680 |
|
---|
| 681 | void
|
---|
| 682 | _M_set_options(__pool_base::_Tune __t)
|
---|
| 683 | { __policy_type::_S_get_pool()._M_set_options(__t); }
|
---|
| 684 | };
|
---|
| 685 |
|
---|
| 686 | template<typename _Tp, typename _Poolp>
|
---|
| 687 | _GLIBCXX_NODISCARD typename __mt_alloc<_Tp, _Poolp>::pointer
|
---|
| 688 | __mt_alloc<_Tp, _Poolp>::
|
---|
| 689 | allocate(size_type __n, const void*)
|
---|
| 690 | {
|
---|
| 691 | if (__n > this->max_size())
|
---|
| 692 | std::__throw_bad_alloc();
|
---|
| 693 |
|
---|
| 694 | #if __cpp_aligned_new
|
---|
| 695 | // Types with extended alignment are handled by operator new/delete.
|
---|
| 696 | if (alignof(_Tp) > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
|
---|
| 697 | {
|
---|
| 698 | std::align_val_t __al = std::align_val_t(alignof(_Tp));
|
---|
| 699 | return static_cast<_Tp*>(::operator new(__n * sizeof(_Tp), __al));
|
---|
| 700 | }
|
---|
| 701 | #endif
|
---|
| 702 |
|
---|
| 703 | __policy_type::_S_initialize_once();
|
---|
| 704 |
|
---|
| 705 | // Requests larger than _M_max_bytes are handled by operator
|
---|
| 706 | // new/delete directly.
|
---|
| 707 | __pool_type& __pool = __policy_type::_S_get_pool();
|
---|
| 708 | const size_type __bytes = __n * sizeof(_Tp);
|
---|
| 709 | if (__pool._M_check_threshold(__bytes))
|
---|
| 710 | {
|
---|
| 711 | void* __ret = ::operator new(__bytes);
|
---|
| 712 | return static_cast<_Tp*>(__ret);
|
---|
| 713 | }
|
---|
| 714 |
|
---|
| 715 | // Round up to power of 2 and figure out which bin to use.
|
---|
| 716 | const size_type __which = __pool._M_get_binmap(__bytes);
|
---|
| 717 | const size_type __thread_id = __pool._M_get_thread_id();
|
---|
| 718 |
|
---|
| 719 | // Find out if we have blocks on our freelist. If so, go ahead
|
---|
| 720 | // and use them directly without having to lock anything.
|
---|
| 721 | char* __c;
|
---|
| 722 | typedef typename __pool_type::_Bin_record _Bin_record;
|
---|
| 723 | const _Bin_record& __bin = __pool._M_get_bin(__which);
|
---|
| 724 | if (__bin._M_first[__thread_id])
|
---|
| 725 | {
|
---|
| 726 | // Already reserved.
|
---|
| 727 | typedef typename __pool_type::_Block_record _Block_record;
|
---|
| 728 | _Block_record* __block = __bin._M_first[__thread_id];
|
---|
| 729 | __bin._M_first[__thread_id] = __block->_M_next;
|
---|
| 730 |
|
---|
| 731 | __pool._M_adjust_freelist(__bin, __block, __thread_id);
|
---|
| 732 | __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
|
---|
| 733 | }
|
---|
| 734 | else
|
---|
| 735 | {
|
---|
| 736 | // Null, reserve.
|
---|
| 737 | __c = __pool._M_reserve_block(__bytes, __thread_id);
|
---|
| 738 | }
|
---|
| 739 | return static_cast<_Tp*>(static_cast<void*>(__c));
|
---|
| 740 | }
|
---|
| 741 |
|
---|
| 742 | template<typename _Tp, typename _Poolp>
|
---|
| 743 | void
|
---|
| 744 | __mt_alloc<_Tp, _Poolp>::
|
---|
| 745 | deallocate(pointer __p, size_type __n)
|
---|
| 746 | {
|
---|
| 747 | if (__builtin_expect(__p != 0, true))
|
---|
| 748 | {
|
---|
| 749 | #if __cpp_aligned_new
|
---|
| 750 | // Types with extended alignment are handled by operator new/delete.
|
---|
| 751 | if (alignof(_Tp) > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
|
---|
| 752 | {
|
---|
| 753 | ::operator delete(__p, std::align_val_t(alignof(_Tp)));
|
---|
| 754 | return;
|
---|
| 755 | }
|
---|
| 756 | #endif
|
---|
| 757 |
|
---|
| 758 | // Requests larger than _M_max_bytes are handled by
|
---|
| 759 | // operators new/delete directly.
|
---|
| 760 | __pool_type& __pool = __policy_type::_S_get_pool();
|
---|
| 761 | const size_type __bytes = __n * sizeof(_Tp);
|
---|
| 762 | if (__pool._M_check_threshold(__bytes))
|
---|
| 763 | ::operator delete(__p);
|
---|
| 764 | else
|
---|
| 765 | __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
|
---|
| 766 | }
|
---|
| 767 | }
|
---|
| 768 |
|
---|
| 769 | template<typename _Tp, typename _Poolp>
|
---|
| 770 | inline bool
|
---|
| 771 | operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
|
---|
| 772 | { return true; }
|
---|
| 773 |
|
---|
| 774 | #if __cpp_impl_three_way_comparison < 201907L
|
---|
| 775 | template<typename _Tp, typename _Poolp>
|
---|
| 776 | inline bool
|
---|
| 777 | operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
|
---|
| 778 | { return false; }
|
---|
| 779 | #endif
|
---|
| 780 |
|
---|
| 781 | #undef __thread_default
|
---|
| 782 |
|
---|
| 783 | _GLIBCXX_END_NAMESPACE_VERSION
|
---|
| 784 | } // namespace
|
---|
| 785 |
|
---|
| 786 | #endif
|
---|