ReactOS 0.4.15-dev-8135-g1bc6c90
mem.h File Reference
#include "lwip/opt.h"
Include dependency graph for mem.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define MEM_SIZE_F   U16_F
 
#define LWIP_MEM_ALIGN_SIZE(size)   (((size) + MEM_ALIGNMENT - 1) & ~(MEM_ALIGNMENT-1))
 
#define LWIP_MEM_ALIGN_BUFFER(size)   (((size) + MEM_ALIGNMENT - 1))
 
#define LWIP_MEM_ALIGN(addr)   ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1)))
 

Typedefs

typedef u16_t mem_size_t
 

Functions

void mem_init (void)
 
voidmem_trim (void *mem, mem_size_t size)
 
voidmem_malloc (mem_size_t size)
 
voidmem_calloc (mem_size_t count, mem_size_t size)
 
void mem_free (void *mem)
 

Macro Definition Documentation

◆ LWIP_MEM_ALIGN

#define LWIP_MEM_ALIGN (   addr)    ((void *)(((mem_ptr_t)(addr) + MEM_ALIGNMENT - 1) & ~(mem_ptr_t)(MEM_ALIGNMENT-1)))

Align a memory pointer to the alignment defined by MEM_ALIGNMENT so that ADDR % MEM_ALIGNMENT == 0

Definition at line 116 of file mem.h.

◆ LWIP_MEM_ALIGN_BUFFER

#define LWIP_MEM_ALIGN_BUFFER (   size)    (((size) + MEM_ALIGNMENT - 1))

Calculate safe memory size for an aligned buffer when using an unaligned type as storage. This includes a safety-margin on (MEM_ALIGNMENT - 1) at the start (e.g. if buffer is u8_t[] and actual data will be u32_t*)

Definition at line 109 of file mem.h.

◆ LWIP_MEM_ALIGN_SIZE

#define LWIP_MEM_ALIGN_SIZE (   size)    (((size) + MEM_ALIGNMENT - 1) & ~(MEM_ALIGNMENT-1))

Calculate memory size for an aligned buffer - returns the next highest multiple of MEM_ALIGNMENT (e.g. LWIP_MEM_ALIGN_SIZE(3) and LWIP_MEM_ALIGN_SIZE(4) will both yield 4 for MEM_ALIGNMENT == 4).

Definition at line 101 of file mem.h.

◆ MEM_SIZE_F

#define MEM_SIZE_F   U16_F

Definition at line 77 of file mem.h.

Typedef Documentation

◆ mem_size_t

typedef u16_t mem_size_t

Definition at line 76 of file mem.h.

Function Documentation

◆ mem_calloc()

void * mem_calloc ( mem_size_t  count,
mem_size_t  size 
)

Contiguously allocates enough space for count objects that are size bytes of memory each and returns a pointer to the allocated memory.

The allocated memory is filled with bytes of value zero.

Parameters
countnumber of objects to allocate
sizesize of the objects to allocate
Returns
pointer to allocated memory / NULL pointer if there is an error

Definition at line 646 of file mem.c.

647{
648 void *p;
649
650 /* allocate 'count' objects of size 'size' */
651 p = mem_malloc(count * size);
652 if (p) {
653 /* zero the memory */
654 memset(p, 0, count * size);
655 }
656 return p;
657}
void * mem_malloc(mem_size_t size)
Definition: mem.c:494
GLuint GLuint GLsizei count
Definition: gl.h:1545
GLsizeiptr size
Definition: glext.h:5919
GLfloat GLfloat p
Definition: glext.h:8902
#define memset(x, y, z)
Definition: compat.h:39

◆ mem_free()

void mem_free ( void rmem)

Put a struct mem back on the heap

Parameters
rmemis the data portion of a struct mem as returned by a previous call to mem_malloc()

Definition at line 311 of file mem.c.

312{
313 struct mem *mem;
315
316 if (rmem == NULL) {
317 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
318 return;
319 }
320 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
321
322 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
323 (u8_t *)rmem < (u8_t *)ram_end);
324
325 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
327 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
328 /* protect mem stats from concurrent access */
329 SYS_ARCH_PROTECT(lev);
330 MEM_STATS_INC(illegal);
332 return;
333 }
334 /* protect the heap from concurrent access */
336 /* Get the corresponding struct mem ... */
337 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
338 /* ... which has to be in a used state ... */
339 LWIP_ASSERT("mem_free: mem->used", mem->used);
340 /* ... and is now unused. */
341 mem->used = 0;
342
343 if (mem < lfree) {
344 /* the newly freed struct is now the lowest */
345 lfree = mem;
346 }
347
349
350 /* finally, see if prev or next are free also */
352#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
353 mem_free_count = 1;
354#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
356}
static int used
Definition: adh-main.c:39
#define MEM_ALIGNMENT
Definition: d3d9_helpers.c:15
#define NULL
Definition: types.h:112
#define SYS_ARCH_UNPROTECT(lev)
Definition: cc.h:56
#define SYS_ARCH_PROTECT(lev)
Definition: cc.h:55
#define SYS_ARCH_DECL_PROTECT(lev)
Definition: cc.h:54
ULONG_PTR mem_ptr_t
Definition: cc.h:33
unsigned char u8_t
Definition: cc.h:23
#define LWIP_MEM_FREE_DECL_PROTECT()
Definition: mem.c:213
static void plug_holes(struct mem *mem)
Definition: mem.c:236
static struct mem * lfree
Definition: mem.c:191
#define LWIP_MEM_FREE_UNPROTECT()
Definition: mem.c:215
static u8_t * ram
Definition: mem.c:187
#define SIZEOF_STRUCT_MEM
Definition: mem.c:173
static struct mem * ram_end
Definition: mem.c:189
#define LWIP_MEM_FREE_PROTECT()
Definition: mem.c:214
#define LWIP_DBG_LEVEL_SERIOUS
Definition: debug.h:47
#define LWIP_DEBUGF(debug, message)
Definition: debug.h:95
#define LWIP_ASSERT(message, assertion)
Definition: debug.h:66
#define LWIP_DBG_TRACE
Definition: debug.h:57
#define LWIP_DBG_LEVEL_SEVERE
Definition: debug.h:48
u16_t mem_size_t
Definition: mem.h:76
#define MEM_DEBUG
Definition: lwipopts.h:145
#define MEM_STATS_DEC_USED(x, y)
Definition: stats.h:241
#define MEM_STATS_INC(x)
Definition: stats.h:239
Definition: mem.c:156
mem_size_t next
Definition: mem.c:158
u8_t used
Definition: mem.c:162

◆ mem_init()

void mem_init ( void  )

Zero the heap and initialize start, end and lowest-free

Definition at line 274 of file mem.c.

275{
276 struct mem *mem;
277
278 LWIP_ASSERT("Sanity check alignment",
279 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
280
281 /* align the heap */
283 /* initialize the start of the heap */
284 mem = (struct mem *)(void *)ram;
286 mem->prev = 0;
287 mem->used = 0;
288 /* initialize the end of the heap */
289 ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
290 ram_end->used = 1;
293
294 /* initialize the lowest-free pointer to the start of the heap */
295 lfree = (struct mem *)(void *)ram;
296
298
300 LWIP_ASSERT("failed to create mem_mutex", 0);
301 }
302}
static int avail
Definition: adh-main.c:39
static sys_mutex_t mem_mutex
Definition: mem.c:195
#define LWIP_RAM_HEAP_POINTER
Definition: mem.c:183
#define MEM_SIZE_ALIGNED
Definition: mem.c:174
#define ERR_OK
Definition: err.h:52
#define LWIP_MEM_ALIGN(addr)
Definition: mem.h:116
#define MEM_STATS_AVAIL(x, y)
Definition: stats.h:238
mem_size_t prev
Definition: mem.c:160
err_t sys_mutex_new(sys_mutex_t *mutex)

Referenced by lwip_init().

◆ mem_malloc()

void * mem_malloc ( mem_size_t  size)

Adam's mem_malloc() plus solution for bug #17922 Allocate a block of memory with a minimum of 'size' bytes.

Parameters
sizeis the minimum size of the requested block in bytes.
Returns
pointer to allocated memory or NULL if no free memory was found.

Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).

Definition at line 494 of file mem.c.

495{
496 mem_size_t ptr, ptr2;
497 struct mem *mem, *mem2;
498#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
499 u8_t local_mem_free_count = 0;
500#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
502
503 if (size == 0) {
504 return NULL;
505 }
506
507 /* Expand the size of the allocated memory region so that we can
508 adjust for alignment. */
510
511 if(size < MIN_SIZE_ALIGNED) {
512 /* every data block must be at least MIN_SIZE_ALIGNED long */
514 }
515
516 if (size > MEM_SIZE_ALIGNED) {
517 return NULL;
518 }
519
520 /* protect the heap from concurrent access */
523#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
524 /* run as long as a mem_free disturbed mem_malloc or mem_trim */
525 do {
526 local_mem_free_count = 0;
527#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
528
529 /* Scan through the heap searching for a free block that is big enough,
530 * beginning with the lowest free block.
531 */
532 for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
533 ptr = ((struct mem *)(void *)&ram[ptr])->next) {
534 mem = (struct mem *)(void *)&ram[ptr];
535#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
536 mem_free_count = 0;
538 /* allow mem_free or mem_trim to run */
540 if (mem_free_count != 0) {
541 /* If mem_free or mem_trim have run, we have to restart since they
542 could have altered our current struct mem. */
543 local_mem_free_count = 1;
544 break;
545 }
546#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
547
548 if ((!mem->used) &&
549 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
550 /* mem is not used and at least perfect fit is possible:
551 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
552
554 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
555 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
556 * -> split large block, create empty remainder,
557 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
558 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
559 * struct mem would fit in but no data between mem2 and mem2->next
560 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
561 * region that couldn't hold data, but when mem->next gets freed,
562 * the 2 regions would be combined, resulting in more free memory
563 */
564 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
565 /* create mem2 struct */
566 mem2 = (struct mem *)(void *)&ram[ptr2];
567 mem2->used = 0;
568 mem2->next = mem->next;
569 mem2->prev = ptr;
570 /* and insert it between mem and mem->next */
571 mem->next = ptr2;
572 mem->used = 1;
573
574 if (mem2->next != MEM_SIZE_ALIGNED) {
575 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
576 }
578 } else {
579 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
580 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
581 * take care of this).
582 * -> near fit or excact fit: do not split, no mem2 creation
583 * also can't move mem->next directly behind mem, since mem->next
584 * will always be used at this point!
585 */
586 mem->used = 1;
588 }
589#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
590mem_malloc_adjust_lfree:
591#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
592 if (mem == lfree) {
593 struct mem *cur = lfree;
594 /* Find next free block after mem and update lowest free pointer */
595 while (cur->used && cur != ram_end) {
596#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
597 mem_free_count = 0;
599 /* prevent high interrupt latency... */
601 if (mem_free_count != 0) {
602 /* If mem_free or mem_trim have run, we have to restart since they
603 could have altered our current struct mem or lfree. */
604 goto mem_malloc_adjust_lfree;
605 }
606#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
607 cur = (struct mem *)(void *)&ram[cur->next];
608 }
609 lfree = cur;
610 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
611 }
614 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
616 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
618 LWIP_ASSERT("mem_malloc: sanity check alignment",
619 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
620
621 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
622 }
623 }
624#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
625 /* if we got interrupted by a mem_free, try again */
626 } while(local_mem_free_count != 0);
627#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
628 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
632 return NULL;
633}
signed short s16_t
Definition: cc.h:29
#define S16_F
Definition: cc.h:37
#define LWIP_MEM_ALLOC_DECL_PROTECT()
Definition: mem.c:217
#define LWIP_MEM_ALLOC_PROTECT()
Definition: mem.c:218
#define MIN_SIZE_ALIGNED
Definition: mem.c:172
#define LWIP_MEM_ALLOC_UNPROTECT()
Definition: mem.c:219
#define LWIP_MEM_ALIGN_SIZE(size)
Definition: mem.h:101
FxCollectionEntry * cur
static PVOID ptr
Definition: dispmode.c:27
static unsigned __int64 next
Definition: rand_nt.c:6
#define err(...)
#define MEM_STATS_INC_USED(x, y)
Definition: stats.h:240
void sys_mutex_lock(sys_mutex_t *mutex)
void sys_mutex_unlock(sys_mutex_t *mutex)

Referenced by mem_calloc(), pbuf_alloc(), and START_TEST().

◆ mem_trim()

void * mem_trim ( void rmem,
mem_size_t  newsize 
)

Shrink memory returned by mem_malloc().

Parameters
rmempointer to memory allocated by mem_malloc the is to be shrinked
newsizerequired size after shrinking (needs to be smaller than or equal to the previous size)
Returns
for compatibility reasons: is always == rmem, at the moment or NULL if newsize is > old size, in which case rmem is NOT touched or freed!

Definition at line 369 of file mem.c.

370{
372 mem_size_t ptr, ptr2;
373 struct mem *mem, *mem2;
374 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
376
377 /* Expand the size of the allocated memory region so that we can
378 adjust for alignment. */
379 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
380
381 if(newsize < MIN_SIZE_ALIGNED) {
382 /* every data block must be at least MIN_SIZE_ALIGNED long */
383 newsize = MIN_SIZE_ALIGNED;
384 }
385
386 if (newsize > MEM_SIZE_ALIGNED) {
387 return NULL;
388 }
389
390 LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
391 (u8_t *)rmem < (u8_t *)ram_end);
392
393 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
395 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
396 /* protect mem stats from concurrent access */
397 SYS_ARCH_PROTECT(lev);
398 MEM_STATS_INC(illegal);
400 return rmem;
401 }
402 /* Get the corresponding struct mem ... */
403 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
404 /* ... and its offset pointer */
405 ptr = (mem_size_t)((u8_t *)mem - ram);
406
408 LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
409 if (newsize > size) {
410 /* not supported */
411 return NULL;
412 }
413 if (newsize == size) {
414 /* No change in size, simply return */
415 return rmem;
416 }
417
418 /* protect the heap from concurrent access */
420
421 mem2 = (struct mem *)(void *)&ram[mem->next];
422 if(mem2->used == 0) {
423 /* The next struct is unused, we can simply move it at little */
425 /* remember the old next pointer */
426 next = mem2->next;
427 /* create new struct mem which is moved directly after the shrinked mem */
428 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
429 if (lfree == mem2) {
430 lfree = (struct mem *)(void *)&ram[ptr2];
431 }
432 mem2 = (struct mem *)(void *)&ram[ptr2];
433 mem2->used = 0;
434 /* restore the next pointer */
435 mem2->next = next;
436 /* link it back to mem */
437 mem2->prev = ptr;
438 /* link mem to it */
439 mem->next = ptr2;
440 /* last thing to restore linked list: as we have moved mem2,
441 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
442 * the end of the heap */
443 if (mem2->next != MEM_SIZE_ALIGNED) {
444 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
445 }
446 MEM_STATS_DEC_USED(used, (size - newsize));
447 /* no need to plug holes, we've already done that */
448 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
449 /* Next struct is used but there's room for another struct mem with
450 * at least MIN_SIZE_ALIGNED of data.
451 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
452 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
453 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
454 * region that couldn't hold data, but when mem->next gets freed,
455 * the 2 regions would be combined, resulting in more free memory */
456 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
457 mem2 = (struct mem *)(void *)&ram[ptr2];
458 if (mem2 < lfree) {
459 lfree = mem2;
460 }
461 mem2->used = 0;
462 mem2->next = mem->next;
463 mem2->prev = ptr;
464 mem->next = ptr2;
465 if (mem2->next != MEM_SIZE_ALIGNED) {
466 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
467 }
468 MEM_STATS_DEC_USED(used, (size - newsize));
469 /* the original mem->next is used, so no need to plug holes! */
470 }
471 /* else {
472 next struct mem is used but size between mem and mem2 is not big enough
473 to create another struct mem
474 -> don't do anyhting.
475 -> the remaining space stays unused since it is too small
476 } */
477#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
478 mem_free_count = 1;
479#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
481 return rmem;
482}