acid-drop- Hacking the planet from a LilyGo T-Deck using custom firmware |
git clone git://git.acid.vegas/acid-drop.git |
Log | Files | Refs | Archive | README | LICENSE |
lv_tlsf.c (37687B)
1 #include "../lv_conf_internal.h" 2 #if LV_MEM_CUSTOM == 0 3 4 #include <limits.h> 5 #include "lv_tlsf.h" 6 #include "lv_mem.h" 7 #include "lv_log.h" 8 #include "lv_assert.h" 9 10 #undef printf 11 #define printf LV_LOG_ERROR 12 13 #define TLSF_MAX_POOL_SIZE LV_MEM_SIZE 14 15 #if !defined(_DEBUG) 16 #define _DEBUG 0 17 #endif 18 19 #if defined(__cplusplus) 20 #define tlsf_decl inline 21 #else 22 #define tlsf_decl static 23 #endif 24 25 /* 26 ** Architecture-specific bit manipulation routines. 27 ** 28 ** TLSF achieves O(1) cost for malloc and free operations by limiting 29 ** the search for a free block to a free list of guaranteed size 30 ** adequate to fulfill the request, combined with efficient free list 31 ** queries using bitmasks and architecture-specific bit-manipulation 32 ** routines. 33 ** 34 ** Most modern processors provide instructions to count leading zeroes 35 ** in a word, find the lowest and highest set bit, etc. These 36 ** specific implementations will be used when available, falling back 37 ** to a reasonably efficient generic implementation. 38 ** 39 ** NOTE: TLSF spec relies on ffs/fls returning value 0..31. 40 ** ffs/fls return 1-32 by default, returning 0 for error. 41 */ 42 43 /* 44 ** Detect whether or not we are building for a 32- or 64-bit (LP/LLP) 45 ** architecture. There is no reliable portable method at compile-time. 46 */ 47 #if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) \ 48 || defined (_WIN64) || defined (__LP64__) || defined (__LLP64__) 49 #define TLSF_64BIT 50 #endif 51 52 /* 53 ** Returns one plus the index of the most significant 1-bit of n, 54 ** or if n is zero, returns zero. 55 */ 56 #ifdef TLSF_64BIT 57 #define TLSF_FLS(n) ((n) & 0xffffffff00000000ull ? 32 + TLSF_FLS32((size_t)(n) >> 32) : TLSF_FLS32(n)) 58 #else 59 #define TLSF_FLS(n) TLSF_FLS32(n) 60 #endif 61 62 #define TLSF_FLS32(n) ((n) & 0xffff0000 ? 16 + TLSF_FLS16((n) >> 16) : TLSF_FLS16(n)) 63 #define TLSF_FLS16(n) ((n) & 0xff00 ? 8 + TLSF_FLS8 ((n) >> 8) : TLSF_FLS8 (n)) 64 #define TLSF_FLS8(n) ((n) & 0xf0 ? 4 + TLSF_FLS4 ((n) >> 4) : TLSF_FLS4 (n)) 65 #define TLSF_FLS4(n) ((n) & 0xc ? 2 + TLSF_FLS2 ((n) >> 2) : TLSF_FLS2 (n)) 66 #define TLSF_FLS2(n) ((n) & 0x2 ? 1 + TLSF_FLS1 ((n) >> 1) : TLSF_FLS1 (n)) 67 #define TLSF_FLS1(n) ((n) & 0x1 ? 1 : 0) 68 69 /* 70 ** Returns round up value of log2(n). 71 ** Note: it is used at compile time. 72 */ 73 #define TLSF_LOG2_CEIL(n) ((n) & (n - 1) ? TLSF_FLS(n) : TLSF_FLS(n) - 1) 74 75 /* 76 ** gcc 3.4 and above have builtin support, specialized for architecture. 77 ** Some compilers masquerade as gcc; patchlevel test filters them out. 78 */ 79 #if defined (__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \ 80 && defined (__GNUC_PATCHLEVEL__) 81 82 #if defined (__SNC__) 83 /* SNC for Playstation 3. */ 84 85 tlsf_decl int tlsf_ffs(unsigned int word) 86 { 87 const unsigned int reverse = word & (~word + 1); 88 const int bit = 32 - __builtin_clz(reverse); 89 return bit - 1; 90 } 91 92 #else 93 94 tlsf_decl int tlsf_ffs(unsigned int word) 95 { 96 return __builtin_ffs(word) - 1; 97 } 98 99 #endif 100 101 tlsf_decl int tlsf_fls(unsigned int word) 102 { 103 const int bit = word ? 32 - __builtin_clz(word) : 0; 104 return bit - 1; 105 } 106 107 #elif defined (_MSC_VER) && (_MSC_VER >= 1400) && (defined (_M_IX86) || defined (_M_X64)) 108 /* Microsoft Visual C++ support on x86/X64 architectures. */ 109 110 #include <intrin.h> 111 112 #pragma intrinsic(_BitScanReverse) 113 #pragma intrinsic(_BitScanForward) 114 115 tlsf_decl int tlsf_fls(unsigned int word) 116 { 117 unsigned long index; 118 return _BitScanReverse(&index, word) ? index : -1; 119 } 120 121 tlsf_decl int tlsf_ffs(unsigned int word) 122 { 123 unsigned long index; 124 return _BitScanForward(&index, word) ? index : -1; 125 } 126 127 #elif defined (_MSC_VER) && defined (_M_PPC) 128 /* Microsoft Visual C++ support on PowerPC architectures. */ 129 130 #include <ppcintrinsics.h> 131 132 tlsf_decl int tlsf_fls(unsigned int word) 133 { 134 const int bit = 32 - _CountLeadingZeros(word); 135 return bit - 1; 136 } 137 138 tlsf_decl int tlsf_ffs(unsigned int word) 139 { 140 const unsigned int reverse = word & (~word + 1); 141 const int bit = 32 - _CountLeadingZeros(reverse); 142 return bit - 1; 143 } 144 145 #elif defined (__ARMCC_VERSION) 146 /* RealView Compilation Tools for ARM */ 147 148 tlsf_decl int tlsf_ffs(unsigned int word) 149 { 150 const unsigned int reverse = word & (~word + 1); 151 const int bit = 32 - __clz(reverse); 152 return bit - 1; 153 } 154 155 tlsf_decl int tlsf_fls(unsigned int word) 156 { 157 const int bit = word ? 32 - __clz(word) : 0; 158 return bit - 1; 159 } 160 161 #elif defined (__ghs__) 162 /* Green Hills support for PowerPC */ 163 164 #include <ppc_ghs.h> 165 166 tlsf_decl int tlsf_ffs(unsigned int word) 167 { 168 const unsigned int reverse = word & (~word + 1); 169 const int bit = 32 - __CLZ32(reverse); 170 return bit - 1; 171 } 172 173 tlsf_decl int tlsf_fls(unsigned int word) 174 { 175 const int bit = word ? 32 - __CLZ32(word) : 0; 176 return bit - 1; 177 } 178 179 #else 180 /* Fall back to generic implementation. */ 181 182 /* Implement ffs in terms of fls. */ 183 tlsf_decl int tlsf_ffs(unsigned int word) 184 { 185 const unsigned int reverse = word & (~word + 1); 186 return TLSF_FLS32(reverse) - 1; 187 } 188 189 tlsf_decl int tlsf_fls(unsigned int word) 190 { 191 return TLSF_FLS32(word) - 1; 192 } 193 194 #endif 195 196 /* Possibly 64-bit version of tlsf_fls. */ 197 #if defined (TLSF_64BIT) 198 tlsf_decl int tlsf_fls_sizet(size_t size) 199 { 200 int high = (int)(size >> 32); 201 int bits = 0; 202 if(high) { 203 bits = 32 + tlsf_fls(high); 204 } 205 else { 206 bits = tlsf_fls((int)size & 0xffffffff); 207 208 } 209 return bits; 210 } 211 #else 212 #define tlsf_fls_sizet tlsf_fls 213 #endif 214 215 #undef tlsf_decl 216 217 /* 218 ** Constants. 219 */ 220 221 /* Public constants: may be modified. */ 222 enum tlsf_public { 223 /* log2 of number of linear subdivisions of block sizes. Larger 224 ** values require more memory in the control structure. Values of 225 ** 4 or 5 are typical. 226 */ 227 SL_INDEX_COUNT_LOG2 = 5, 228 }; 229 230 /* Private constants: do not modify. */ 231 enum tlsf_private { 232 #if defined (TLSF_64BIT) 233 /* All allocation sizes and addresses are aligned to 8 bytes. */ 234 ALIGN_SIZE_LOG2 = 3, 235 #else 236 /* All allocation sizes and addresses are aligned to 4 bytes. */ 237 ALIGN_SIZE_LOG2 = 2, 238 #endif 239 ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2), 240 241 /* 242 ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits. 243 ** However, because we linearly subdivide the second-level lists, and 244 ** our minimum size granularity is 4 bytes, it doesn't make sense to 245 ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4, 246 ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be 247 ** trying to split size ranges into more slots than we have available. 248 ** Instead, we calculate the minimum threshold size, and place all 249 ** blocks below that size into the 0th first-level list. 250 */ 251 252 #if defined (TLSF_MAX_POOL_SIZE) 253 FL_INDEX_MAX = TLSF_LOG2_CEIL(TLSF_MAX_POOL_SIZE), 254 #elif defined (TLSF_64BIT) 255 /* 256 ** TODO: We can increase this to support larger sizes, at the expense 257 ** of more overhead in the TLSF structure. 258 */ 259 FL_INDEX_MAX = 32, 260 #else 261 FL_INDEX_MAX = 30, 262 #endif 263 SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2), 264 FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2), 265 FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1), 266 267 SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT), 268 }; 269 270 /* 271 ** Cast and min/max macros. 272 */ 273 274 #define tlsf_cast(t, exp) ((t) (exp)) 275 #define tlsf_min(a, b) ((a) < (b) ? (a) : (b)) 276 #define tlsf_max(a, b) ((a) > (b) ? (a) : (b)) 277 278 /* 279 ** Set assert macro, if it has not been provided by the user. 280 */ 281 #define tlsf_assert LV_ASSERT 282 283 #if !defined (tlsf_assert) 284 #define tlsf_assert assert 285 #endif 286 287 /* 288 ** Static assertion mechanism. 289 */ 290 291 #define _tlsf_glue2(x, y) x ## y 292 #define _tlsf_glue(x, y) _tlsf_glue2(x, y) 293 #define tlsf_static_assert(exp) \ 294 typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1] 295 296 /* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */ 297 tlsf_static_assert(sizeof(int) * CHAR_BIT == 32); 298 tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32); 299 tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64); 300 301 /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ 302 tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT); 303 304 /* Ensure we've properly tuned our sizes. */ 305 tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT); 306 307 /* 308 ** Data structures and associated constants. 309 */ 310 311 /* 312 ** Block header structure. 313 ** 314 ** There are several implementation subtleties involved: 315 ** - The prev_phys_block field is only valid if the previous block is free. 316 ** - The prev_phys_block field is actually stored at the end of the 317 ** previous block. It appears at the beginning of this structure only to 318 ** simplify the implementation. 319 ** - The next_free / prev_free fields are only valid if the block is free. 320 */ 321 typedef struct block_header_t { 322 /* Points to the previous physical block. */ 323 struct block_header_t * prev_phys_block; 324 325 /* The size of this block, excluding the block header. */ 326 size_t size; 327 328 /* Next and previous free blocks. */ 329 struct block_header_t * next_free; 330 struct block_header_t * prev_free; 331 } block_header_t; 332 333 /* 334 ** Since block sizes are always at least a multiple of 4, the two least 335 ** significant bits of the size field are used to store the block status: 336 ** - bit 0: whether block is busy or free 337 ** - bit 1: whether previous block is busy or free 338 */ 339 static const size_t block_header_free_bit = 1 << 0; 340 static const size_t block_header_prev_free_bit = 1 << 1; 341 342 /* 343 ** The size of the block header exposed to used blocks is the size field. 344 ** The prev_phys_block field is stored *inside* the previous free block. 345 */ 346 static const size_t block_header_overhead = sizeof(size_t); 347 348 /* User data starts directly after the size field in a used block. */ 349 static const size_t block_start_offset = 350 offsetof(block_header_t, size) + sizeof(size_t); 351 352 /* 353 ** A free block must be large enough to store its header minus the size of 354 ** the prev_phys_block field, and no larger than the number of addressable 355 ** bits for FL_INDEX. 356 */ 357 static const size_t block_size_min = 358 sizeof(block_header_t) - sizeof(block_header_t *); 359 static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX; 360 361 362 /* The TLSF control structure. */ 363 typedef struct control_t { 364 /* Empty lists point at this block to indicate they are free. */ 365 block_header_t block_null; 366 367 /* Bitmaps for free lists. */ 368 unsigned int fl_bitmap; 369 unsigned int sl_bitmap[FL_INDEX_COUNT]; 370 371 /* Head of free lists. */ 372 block_header_t * blocks[FL_INDEX_COUNT][SL_INDEX_COUNT]; 373 } control_t; 374 375 /* A type used for casting when doing pointer arithmetic. */ 376 typedef ptrdiff_t tlsfptr_t; 377 378 /* 379 ** block_header_t member functions. 380 */ 381 382 static size_t block_size(const block_header_t * block) 383 { 384 return block->size & ~(block_header_free_bit | block_header_prev_free_bit); 385 } 386 387 static void block_set_size(block_header_t * block, size_t size) 388 { 389 const size_t oldsize = block->size; 390 block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit)); 391 } 392 393 static int block_is_last(const block_header_t * block) 394 { 395 return block_size(block) == 0; 396 } 397 398 static int block_is_free(const block_header_t * block) 399 { 400 return tlsf_cast(int, block->size & block_header_free_bit); 401 } 402 403 static void block_set_free(block_header_t * block) 404 { 405 block->size |= block_header_free_bit; 406 } 407 408 static void block_set_used(block_header_t * block) 409 { 410 block->size &= ~block_header_free_bit; 411 } 412 413 static int block_is_prev_free(const block_header_t * block) 414 { 415 return tlsf_cast(int, block->size & block_header_prev_free_bit); 416 } 417 418 static void block_set_prev_free(block_header_t * block) 419 { 420 block->size |= block_header_prev_free_bit; 421 } 422 423 static void block_set_prev_used(block_header_t * block) 424 { 425 block->size &= ~block_header_prev_free_bit; 426 } 427 428 static block_header_t * block_from_ptr(const void * ptr) 429 { 430 return tlsf_cast(block_header_t *, 431 tlsf_cast(unsigned char *, ptr) - block_start_offset); 432 } 433 434 static void * block_to_ptr(const block_header_t * block) 435 { 436 return tlsf_cast(void *, 437 tlsf_cast(unsigned char *, block) + block_start_offset); 438 } 439 440 /* Return location of next block after block of given size. */ 441 static block_header_t * offset_to_block(const void * ptr, size_t size) 442 { 443 return tlsf_cast(block_header_t *, tlsf_cast(tlsfptr_t, ptr) + size); 444 } 445 446 /* Return location of previous block. */ 447 static block_header_t * block_prev(const block_header_t * block) 448 { 449 tlsf_assert(block_is_prev_free(block) && "previous block must be free"); 450 return block->prev_phys_block; 451 } 452 453 /* Return location of next existing block. */ 454 static block_header_t * block_next(const block_header_t * block) 455 { 456 block_header_t * next = offset_to_block(block_to_ptr(block), 457 block_size(block) - block_header_overhead); 458 tlsf_assert(!block_is_last(block)); 459 return next; 460 } 461 462 /* Link a new block with its physical neighbor, return the neighbor. */ 463 static block_header_t * block_link_next(block_header_t * block) 464 { 465 block_header_t * next = block_next(block); 466 next->prev_phys_block = block; 467 return next; 468 } 469 470 static void block_mark_as_free(block_header_t * block) 471 { 472 /* Link the block to the next block, first. */ 473 block_header_t * next = block_link_next(block); 474 block_set_prev_free(next); 475 block_set_free(block); 476 } 477 478 static void block_mark_as_used(block_header_t * block) 479 { 480 block_header_t * next = block_next(block); 481 block_set_prev_used(next); 482 block_set_used(block); 483 } 484 485 static size_t align_up(size_t x, size_t align) 486 { 487 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 488 return (x + (align - 1)) & ~(align - 1); 489 } 490 491 static size_t align_down(size_t x, size_t align) 492 { 493 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 494 return x - (x & (align - 1)); 495 } 496 497 static void * align_ptr(const void * ptr, size_t align) 498 { 499 const tlsfptr_t aligned = 500 (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1); 501 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); 502 return tlsf_cast(void *, aligned); 503 } 504 505 /* 506 ** Adjust an allocation size to be aligned to word size, and no smaller 507 ** than internal minimum. 508 */ 509 static size_t adjust_request_size(size_t size, size_t align) 510 { 511 size_t adjust = 0; 512 if(size) { 513 const size_t aligned = align_up(size, align); 514 515 /* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */ 516 if(aligned < block_size_max) { 517 adjust = tlsf_max(aligned, block_size_min); 518 } 519 } 520 return adjust; 521 } 522 523 /* 524 ** TLSF utility functions. In most cases, these are direct translations of 525 ** the documentation found in the white paper. 526 */ 527 528 static void mapping_insert(size_t size, int * fli, int * sli) 529 { 530 int fl, sl; 531 if(size < SMALL_BLOCK_SIZE) { 532 /* Store small blocks in first list. */ 533 fl = 0; 534 sl = tlsf_cast(int, size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT); 535 } 536 else { 537 fl = tlsf_fls_sizet(size); 538 sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2); 539 fl -= (FL_INDEX_SHIFT - 1); 540 } 541 *fli = fl; 542 *sli = sl; 543 } 544 545 /* This version rounds up to the next block size (for allocations) */ 546 static void mapping_search(size_t size, int * fli, int * sli) 547 { 548 if(size >= SMALL_BLOCK_SIZE) { 549 const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1; 550 size += round; 551 } 552 mapping_insert(size, fli, sli); 553 } 554 555 static block_header_t * search_suitable_block(control_t * control, int * fli, int * sli) 556 { 557 int fl = *fli; 558 int sl = *sli; 559 560 /* 561 ** First, search for a block in the list associated with the given 562 ** fl/sl index. 563 */ 564 unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl); 565 if(!sl_map) { 566 /* No block exists. Search in the next largest first-level list. */ 567 const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1)); 568 if(!fl_map) { 569 /* No free blocks available, memory has been exhausted. */ 570 return 0; 571 } 572 573 fl = tlsf_ffs(fl_map); 574 *fli = fl; 575 sl_map = control->sl_bitmap[fl]; 576 } 577 tlsf_assert(sl_map && "internal error - second level bitmap is null"); 578 sl = tlsf_ffs(sl_map); 579 *sli = sl; 580 581 /* Return the first block in the free list. */ 582 return control->blocks[fl][sl]; 583 } 584 585 /* Remove a free block from the free list.*/ 586 static void remove_free_block(control_t * control, block_header_t * block, int fl, int sl) 587 { 588 block_header_t * prev = block->prev_free; 589 block_header_t * next = block->next_free; 590 tlsf_assert(prev && "prev_free field can not be null"); 591 tlsf_assert(next && "next_free field can not be null"); 592 next->prev_free = prev; 593 prev->next_free = next; 594 595 /* If this block is the head of the free list, set new head. */ 596 if(control->blocks[fl][sl] == block) { 597 control->blocks[fl][sl] = next; 598 599 /* If the new head is null, clear the bitmap. */ 600 if(next == &control->block_null) { 601 control->sl_bitmap[fl] &= ~(1U << sl); 602 603 /* If the second bitmap is now empty, clear the fl bitmap. */ 604 if(!control->sl_bitmap[fl]) { 605 control->fl_bitmap &= ~(1U << fl); 606 } 607 } 608 } 609 } 610 611 /* Insert a free block into the free block list. */ 612 static void insert_free_block(control_t * control, block_header_t * block, int fl, int sl) 613 { 614 block_header_t * current = control->blocks[fl][sl]; 615 tlsf_assert(current && "free list cannot have a null entry"); 616 tlsf_assert(block && "cannot insert a null entry into the free list"); 617 block->next_free = current; 618 block->prev_free = &control->block_null; 619 current->prev_free = block; 620 621 tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE) 622 && "block not aligned properly"); 623 /* 624 ** Insert the new block at the head of the list, and mark the first- 625 ** and second-level bitmaps appropriately. 626 */ 627 control->blocks[fl][sl] = block; 628 control->fl_bitmap |= (1U << fl); 629 control->sl_bitmap[fl] |= (1U << sl); 630 } 631 632 /* Remove a given block from the free list. */ 633 static void block_remove(control_t * control, block_header_t * block) 634 { 635 int fl, sl; 636 mapping_insert(block_size(block), &fl, &sl); 637 remove_free_block(control, block, fl, sl); 638 } 639 640 /* Insert a given block into the free list. */ 641 static void block_insert(control_t * control, block_header_t * block) 642 { 643 int fl, sl; 644 mapping_insert(block_size(block), &fl, &sl); 645 insert_free_block(control, block, fl, sl); 646 } 647 648 static int block_can_split(block_header_t * block, size_t size) 649 { 650 return block_size(block) >= sizeof(block_header_t) + size; 651 } 652 653 /* Split a block into two, the second of which is free. */ 654 static block_header_t * block_split(block_header_t * block, size_t size) 655 { 656 /* Calculate the amount of space left in the remaining block. */ 657 block_header_t * remaining = 658 offset_to_block(block_to_ptr(block), size - block_header_overhead); 659 660 const size_t remain_size = block_size(block) - (size + block_header_overhead); 661 662 tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE) 663 && "remaining block not aligned properly"); 664 665 tlsf_assert(block_size(block) == remain_size + size + block_header_overhead); 666 block_set_size(remaining, remain_size); 667 tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size"); 668 669 block_set_size(block, size); 670 block_mark_as_free(remaining); 671 672 return remaining; 673 } 674 675 /* Absorb a free block's storage into an adjacent previous free block. */ 676 static block_header_t * block_absorb(block_header_t * prev, block_header_t * block) 677 { 678 tlsf_assert(!block_is_last(prev) && "previous block can't be last"); 679 /* Note: Leaves flags untouched. */ 680 prev->size += block_size(block) + block_header_overhead; 681 block_link_next(prev); 682 return prev; 683 } 684 685 /* Merge a just-freed block with an adjacent previous free block. */ 686 static block_header_t * block_merge_prev(control_t * control, block_header_t * block) 687 { 688 if(block_is_prev_free(block)) { 689 block_header_t * prev = block_prev(block); 690 tlsf_assert(prev && "prev physical block can't be null"); 691 tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such"); 692 block_remove(control, prev); 693 block = block_absorb(prev, block); 694 } 695 696 return block; 697 } 698 699 /* Merge a just-freed block with an adjacent free block. */ 700 static block_header_t * block_merge_next(control_t * control, block_header_t * block) 701 { 702 block_header_t * next = block_next(block); 703 tlsf_assert(next && "next physical block can't be null"); 704 705 if(block_is_free(next)) { 706 tlsf_assert(!block_is_last(block) && "previous block can't be last"); 707 block_remove(control, next); 708 block = block_absorb(block, next); 709 } 710 711 return block; 712 } 713 714 /* Trim any trailing block space off the end of a block, return to pool. */ 715 static void block_trim_free(control_t * control, block_header_t * block, size_t size) 716 { 717 tlsf_assert(block_is_free(block) && "block must be free"); 718 if(block_can_split(block, size)) { 719 block_header_t * remaining_block = block_split(block, size); 720 block_link_next(block); 721 block_set_prev_free(remaining_block); 722 block_insert(control, remaining_block); 723 } 724 } 725 726 /* Trim any trailing block space off the end of a used block, return to pool. */ 727 static void block_trim_used(control_t * control, block_header_t * block, size_t size) 728 { 729 tlsf_assert(!block_is_free(block) && "block must be used"); 730 if(block_can_split(block, size)) { 731 /* If the next block is free, we must coalesce. */ 732 block_header_t * remaining_block = block_split(block, size); 733 block_set_prev_used(remaining_block); 734 735 remaining_block = block_merge_next(control, remaining_block); 736 block_insert(control, remaining_block); 737 } 738 } 739 740 static block_header_t * block_trim_free_leading(control_t * control, block_header_t * block, size_t size) 741 { 742 block_header_t * remaining_block = block; 743 if(block_can_split(block, size)) { 744 /* We want the 2nd block. */ 745 remaining_block = block_split(block, size - block_header_overhead); 746 block_set_prev_free(remaining_block); 747 748 block_link_next(block); 749 block_insert(control, block); 750 } 751 752 return remaining_block; 753 } 754 755 static block_header_t * block_locate_free(control_t * control, size_t size) 756 { 757 int fl = 0, sl = 0; 758 block_header_t * block = 0; 759 760 if(size) { 761 mapping_search(size, &fl, &sl); 762 763 /* 764 ** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up 765 ** with indices that are off the end of the block array. 766 ** So, we protect against that here, since this is the only callsite of mapping_search. 767 ** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range. 768 */ 769 if(fl < FL_INDEX_COUNT) { 770 block = search_suitable_block(control, &fl, &sl); 771 } 772 } 773 774 if(block) { 775 tlsf_assert(block_size(block) >= size); 776 remove_free_block(control, block, fl, sl); 777 } 778 779 return block; 780 } 781 782 static void * block_prepare_used(control_t * control, block_header_t * block, size_t size) 783 { 784 void * p = 0; 785 if(block) { 786 tlsf_assert(size && "size must be non-zero"); 787 block_trim_free(control, block, size); 788 block_mark_as_used(block); 789 p = block_to_ptr(block); 790 } 791 return p; 792 } 793 794 /* Clear structure and point all empty lists at the null block. */ 795 static void control_constructor(control_t * control) 796 { 797 int i, j; 798 799 control->block_null.next_free = &control->block_null; 800 control->block_null.prev_free = &control->block_null; 801 802 control->fl_bitmap = 0; 803 for(i = 0; i < FL_INDEX_COUNT; ++i) { 804 control->sl_bitmap[i] = 0; 805 for(j = 0; j < SL_INDEX_COUNT; ++j) { 806 control->blocks[i][j] = &control->block_null; 807 } 808 } 809 } 810 811 /* 812 ** Debugging utilities. 813 */ 814 815 typedef struct integrity_t { 816 int prev_status; 817 int status; 818 } integrity_t; 819 820 #define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } } 821 822 static void integrity_walker(void * ptr, size_t size, int used, void * user) 823 { 824 block_header_t * block = block_from_ptr(ptr); 825 integrity_t * integ = tlsf_cast(integrity_t *, user); 826 const int this_prev_status = block_is_prev_free(block) ? 1 : 0; 827 const int this_status = block_is_free(block) ? 1 : 0; 828 const size_t this_block_size = block_size(block); 829 830 int status = 0; 831 LV_UNUSED(used); 832 tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect"); 833 tlsf_insist(size == this_block_size && "block size incorrect"); 834 835 integ->prev_status = this_status; 836 integ->status += status; 837 } 838 839 int lv_tlsf_check(lv_tlsf_t tlsf) 840 { 841 int i, j; 842 843 control_t * control = tlsf_cast(control_t *, tlsf); 844 int status = 0; 845 846 /* Check that the free lists and bitmaps are accurate. */ 847 for(i = 0; i < FL_INDEX_COUNT; ++i) { 848 for(j = 0; j < SL_INDEX_COUNT; ++j) { 849 const int fl_map = control->fl_bitmap & (1U << i); 850 const int sl_list = control->sl_bitmap[i]; 851 const int sl_map = sl_list & (1U << j); 852 const block_header_t * block = control->blocks[i][j]; 853 854 /* Check that first- and second-level lists agree. */ 855 if(!fl_map) { 856 tlsf_insist(!sl_map && "second-level map must be null"); 857 } 858 859 if(!sl_map) { 860 tlsf_insist(block == &control->block_null && "block list must be null"); 861 continue; 862 } 863 864 /* Check that there is at least one free block. */ 865 tlsf_insist(sl_list && "no free blocks in second-level map"); 866 tlsf_insist(block != &control->block_null && "block should not be null"); 867 868 while(block != &control->block_null) { 869 int fli, sli; 870 tlsf_insist(block_is_free(block) && "block should be free"); 871 tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced"); 872 tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced"); 873 tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free"); 874 tlsf_insist(block_size(block) >= block_size_min && "block not minimum size"); 875 876 mapping_insert(block_size(block), &fli, &sli); 877 tlsf_insist(fli == i && sli == j && "block size indexed in wrong list"); 878 block = block->next_free; 879 } 880 } 881 } 882 883 return status; 884 } 885 886 #undef tlsf_insist 887 888 static void default_walker(void * ptr, size_t size, int used, void * user) 889 { 890 LV_UNUSED(user); 891 printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, (void *)block_from_ptr(ptr)); 892 } 893 894 void lv_tlsf_walk_pool(lv_pool_t pool, lv_tlsf_walker walker, void * user) 895 { 896 lv_tlsf_walker pool_walker = walker ? walker : default_walker; 897 block_header_t * block = 898 offset_to_block(pool, -(int)block_header_overhead); 899 900 while(block && !block_is_last(block)) { 901 pool_walker( 902 block_to_ptr(block), 903 block_size(block), 904 !block_is_free(block), 905 user); 906 block = block_next(block); 907 } 908 } 909 910 size_t lv_tlsf_block_size(void * ptr) 911 { 912 size_t size = 0; 913 if(ptr) { 914 const block_header_t * block = block_from_ptr(ptr); 915 size = block_size(block); 916 } 917 return size; 918 } 919 920 int lv_tlsf_check_pool(lv_pool_t pool) 921 { 922 /* Check that the blocks are physically correct. */ 923 integrity_t integ = { 0, 0 }; 924 lv_tlsf_walk_pool(pool, integrity_walker, &integ); 925 926 return integ.status; 927 } 928 929 /* 930 ** Size of the TLSF structures in a given memory block passed to 931 ** lv_tlsf_create, equal to the size of a control_t 932 */ 933 size_t lv_tlsf_size(void) 934 { 935 return sizeof(control_t); 936 } 937 938 size_t lv_tlsf_align_size(void) 939 { 940 return ALIGN_SIZE; 941 } 942 943 size_t lv_tlsf_block_size_min(void) 944 { 945 return block_size_min; 946 } 947 948 size_t lv_tlsf_block_size_max(void) 949 { 950 return block_size_max; 951 } 952 953 /* 954 ** Overhead of the TLSF structures in a given memory block passed to 955 ** lv_tlsf_add_pool, equal to the overhead of a free block and the 956 ** sentinel block. 957 */ 958 size_t lv_tlsf_pool_overhead(void) 959 { 960 return 2 * block_header_overhead; 961 } 962 963 size_t lv_tlsf_alloc_overhead(void) 964 { 965 return block_header_overhead; 966 } 967 968 lv_pool_t lv_tlsf_add_pool(lv_tlsf_t tlsf, void * mem, size_t bytes) 969 { 970 block_header_t * block; 971 block_header_t * next; 972 973 const size_t pool_overhead = lv_tlsf_pool_overhead(); 974 const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE); 975 976 if(((ptrdiff_t)mem % ALIGN_SIZE) != 0) { 977 printf("lv_tlsf_add_pool: Memory must be aligned by %u bytes.\n", 978 (unsigned int)ALIGN_SIZE); 979 return 0; 980 } 981 982 if(pool_bytes < block_size_min || pool_bytes > block_size_max) { 983 #if defined (TLSF_64BIT) 984 printf("lv_tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", 985 (unsigned int)(pool_overhead + block_size_min), 986 (unsigned int)((pool_overhead + block_size_max) / 256)); 987 #else 988 printf("lv_tlsf_add_pool: Memory size must be between %u and %u bytes.\n", 989 (unsigned int)(pool_overhead + block_size_min), 990 (unsigned int)(pool_overhead + block_size_max)); 991 #endif 992 return 0; 993 } 994 995 /* 996 ** Create the main free block. Offset the start of the block slightly 997 ** so that the prev_phys_block field falls outside of the pool - 998 ** it will never be used. 999 */ 1000 block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead); 1001 block_set_size(block, pool_bytes); 1002 block_set_free(block); 1003 block_set_prev_used(block); 1004 block_insert(tlsf_cast(control_t *, tlsf), block); 1005 1006 /* Split the block to create a zero-size sentinel block. */ 1007 next = block_link_next(block); 1008 block_set_size(next, 0); 1009 block_set_used(next); 1010 block_set_prev_free(next); 1011 1012 return mem; 1013 } 1014 1015 void lv_tlsf_remove_pool(lv_tlsf_t tlsf, lv_pool_t pool) 1016 { 1017 control_t * control = tlsf_cast(control_t *, tlsf); 1018 block_header_t * block = offset_to_block(pool, -(int)block_header_overhead); 1019 1020 int fl = 0, sl = 0; 1021 1022 tlsf_assert(block_is_free(block) && "block should be free"); 1023 tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free"); 1024 tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero"); 1025 1026 mapping_insert(block_size(block), &fl, &sl); 1027 remove_free_block(control, block, fl, sl); 1028 } 1029 1030 /* 1031 ** TLSF main interface. 1032 */ 1033 1034 #if _DEBUG 1035 int test_ffs_fls() 1036 { 1037 /* Verify ffs/fls work properly. */ 1038 int rv = 0; 1039 rv += (tlsf_ffs(0) == -1) ? 0 : 0x1; 1040 rv += (tlsf_fls(0) == -1) ? 0 : 0x2; 1041 rv += (tlsf_ffs(1) == 0) ? 0 : 0x4; 1042 rv += (tlsf_fls(1) == 0) ? 0 : 0x8; 1043 rv += (tlsf_ffs(0x80000000) == 31) ? 0 : 0x10; 1044 rv += (tlsf_ffs(0x80008000) == 15) ? 0 : 0x20; 1045 rv += (tlsf_fls(0x80000008) == 31) ? 0 : 0x40; 1046 rv += (tlsf_fls(0x7FFFFFFF) == 30) ? 0 : 0x80; 1047 1048 #if defined (TLSF_64BIT) 1049 rv += (tlsf_fls_sizet(0x80000000) == 31) ? 0 : 0x100; 1050 rv += (tlsf_fls_sizet(0x100000000) == 32) ? 0 : 0x200; 1051 rv += (tlsf_fls_sizet(0xffffffffffffffff) == 63) ? 0 : 0x400; 1052 #endif 1053 1054 if(rv) { 1055 printf("test_ffs_fls: %x ffs/fls tests failed.\n", rv); 1056 } 1057 return rv; 1058 } 1059 #endif 1060 1061 lv_tlsf_t lv_tlsf_create(void * mem) 1062 { 1063 #if _DEBUG 1064 if(test_ffs_fls()) { 1065 return 0; 1066 } 1067 #endif 1068 1069 if(((tlsfptr_t)mem % ALIGN_SIZE) != 0) { 1070 printf("lv_tlsf_create: Memory must be aligned to %u bytes.\n", 1071 (unsigned int)ALIGN_SIZE); 1072 return 0; 1073 } 1074 1075 control_constructor(tlsf_cast(control_t *, mem)); 1076 1077 return tlsf_cast(lv_tlsf_t, mem); 1078 } 1079 1080 lv_tlsf_t lv_tlsf_create_with_pool(void * mem, size_t bytes) 1081 { 1082 lv_tlsf_t tlsf = lv_tlsf_create(mem); 1083 lv_tlsf_add_pool(tlsf, (char *)mem + lv_tlsf_size(), bytes - lv_tlsf_size()); 1084 return tlsf; 1085 } 1086 1087 void lv_tlsf_destroy(lv_tlsf_t tlsf) 1088 { 1089 /* Nothing to do. */ 1090 LV_UNUSED(tlsf); 1091 } 1092 1093 lv_pool_t lv_tlsf_get_pool(lv_tlsf_t tlsf) 1094 { 1095 return tlsf_cast(lv_pool_t, (char *)tlsf + lv_tlsf_size()); 1096 } 1097 1098 void * lv_tlsf_malloc(lv_tlsf_t tlsf, size_t size) 1099 { 1100 control_t * control = tlsf_cast(control_t *, tlsf); 1101 const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 1102 block_header_t * block = block_locate_free(control, adjust); 1103 return block_prepare_used(control, block, adjust); 1104 } 1105 1106 void * lv_tlsf_memalign(lv_tlsf_t tlsf, size_t align, size_t size) 1107 { 1108 control_t * control = tlsf_cast(control_t *, tlsf); 1109 const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 1110 1111 /* 1112 ** We must allocate an additional minimum block size bytes so that if 1113 ** our free block will leave an alignment gap which is smaller, we can 1114 ** trim a leading free block and release it back to the pool. We must 1115 ** do this because the previous physical block is in use, therefore 1116 ** the prev_phys_block field is not valid, and we can't simply adjust 1117 ** the size of that block. 1118 */ 1119 const size_t gap_minimum = sizeof(block_header_t); 1120 const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align); 1121 1122 /* 1123 ** If alignment is less than or equals base alignment, we're done. 1124 ** If we requested 0 bytes, return null, as lv_tlsf_malloc(0) does. 1125 */ 1126 const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust; 1127 1128 block_header_t * block = block_locate_free(control, aligned_size); 1129 1130 /* This can't be a static assert. */ 1131 tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead); 1132 1133 if(block) { 1134 void * ptr = block_to_ptr(block); 1135 void * aligned = align_ptr(ptr, align); 1136 size_t gap = tlsf_cast(size_t, 1137 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); 1138 1139 /* If gap size is too small, offset to next aligned boundary. */ 1140 if(gap && gap < gap_minimum) { 1141 const size_t gap_remain = gap_minimum - gap; 1142 const size_t offset = tlsf_max(gap_remain, align); 1143 const void * next_aligned = tlsf_cast(void *, 1144 tlsf_cast(tlsfptr_t, aligned) + offset); 1145 1146 aligned = align_ptr(next_aligned, align); 1147 gap = tlsf_cast(size_t, 1148 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); 1149 } 1150 1151 if(gap) { 1152 tlsf_assert(gap >= gap_minimum && "gap size too small"); 1153 block = block_trim_free_leading(control, block, gap); 1154 } 1155 } 1156 1157 return block_prepare_used(control, block, adjust); 1158 } 1159 1160 void lv_tlsf_free(lv_tlsf_t tlsf, const void * ptr) 1161 { 1162 /* Don't attempt to free a NULL pointer. */ 1163 if(ptr) { 1164 control_t * control = tlsf_cast(control_t *, tlsf); 1165 block_header_t * block = block_from_ptr(ptr); 1166 tlsf_assert(!block_is_free(block) && "block already marked as free"); 1167 block_mark_as_free(block); 1168 block = block_merge_prev(control, block); 1169 block = block_merge_next(control, block); 1170 block_insert(control, block); 1171 } 1172 } 1173 1174 /* 1175 ** The TLSF block information provides us with enough information to 1176 ** provide a reasonably intelligent implementation of realloc, growing or 1177 ** shrinking the currently allocated block as required. 1178 ** 1179 ** This routine handles the somewhat esoteric edge cases of realloc: 1180 ** - a non-zero size with a null pointer will behave like malloc 1181 ** - a zero size with a non-null pointer will behave like free 1182 ** - a request that cannot be satisfied will leave the original buffer 1183 ** untouched 1184 ** - an extended buffer size will leave the newly-allocated area with 1185 ** contents undefined 1186 */ 1187 void * lv_tlsf_realloc(lv_tlsf_t tlsf, void * ptr, size_t size) 1188 { 1189 control_t * control = tlsf_cast(control_t *, tlsf); 1190 void * p = 0; 1191 1192 /* Zero-size requests are treated as free. */ 1193 if(ptr && size == 0) { 1194 lv_tlsf_free(tlsf, ptr); 1195 } 1196 /* Requests with NULL pointers are treated as malloc. */ 1197 else if(!ptr) { 1198 p = lv_tlsf_malloc(tlsf, size); 1199 } 1200 else { 1201 block_header_t * block = block_from_ptr(ptr); 1202 block_header_t * next = block_next(block); 1203 1204 const size_t cursize = block_size(block); 1205 const size_t combined = cursize + block_size(next) + block_header_overhead; 1206 const size_t adjust = adjust_request_size(size, ALIGN_SIZE); 1207 1208 tlsf_assert(!block_is_free(block) && "block already marked as free"); 1209 1210 /* 1211 ** If the next block is used, or when combined with the current 1212 ** block, does not offer enough space, we must reallocate and copy. 1213 */ 1214 if(adjust > cursize && (!block_is_free(next) || adjust > combined)) { 1215 p = lv_tlsf_malloc(tlsf, size); 1216 if(p) { 1217 const size_t minsize = tlsf_min(cursize, size); 1218 lv_memcpy(p, ptr, minsize); 1219 lv_tlsf_free(tlsf, ptr); 1220 } 1221 } 1222 else { 1223 /* Do we need to expand to the next block? */ 1224 if(adjust > cursize) { 1225 block_merge_next(control, block); 1226 block_mark_as_used(block); 1227 } 1228 1229 /* Trim the resulting block and return the original pointer. */ 1230 block_trim_used(control, block, adjust); 1231 p = ptr; 1232 } 1233 } 1234 1235 return p; 1236 } 1237 1238 #endif /* LV_MEM_CUSTOM == 0 */