mirror of
https://github.com/Zenithsiz/ftmemsim-valgrind.git
synced 2026-02-17 08:04:36 +00:00
lots of the details changed. Made the following generalisations: - Recast everything to be entirely terms of bytes, instead of a mixture of (32-bit) words and bytes. This is a bit easier to understand, and made the following generalisations possible... - Almost 64-bit clean; no longer assuming 32-bit words/pointers. Only (I think) non-64-bit clean part is that VG_(malloc)() et al take an Int as the size arg, and size_t is 64-bits on 64-bit machines. - Made the alignment of blocks returned by malloc() et al completely controlled by a single value, VG_MIN_MALLOC_SZB. (Previously there were various magic numbers and assumptions about block alignment scattered throughout.) I tested this, all the regression tests pass with VG_MIN_MALLOC_SZB of 4, 8, 16, 32, 64. One thing required for this was to make redzones elastic; the asked-for redzone size is now the minimum size; it will use bigger ones if necessary to get the required alignment. Some other specific changes: - Made use of types a bit more; ie. actually using the type 'Block', rather than just having everything as arrays of words, so that should be a bit safer. - Removed the a->rz_check field, which was redundant wrt. a->clientmem. - Fixed up the decision about which list to use so the 4 lists which weren't ever being used now are -- the problem was that this hasn't been properly updated when alignment changed from 4 to 8 bytes. - Added a regression test for memalign() and posix_memalign(). memalign() was aborting if passed a bad alignment argument. - Added some high-level comments in various places, explaining how the damn thing works. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@2579
56 lines
2.2 KiB
C
56 lines
2.2 KiB
C
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <assert.h>
|
|
#include <malloc.h>
|
|
#include <errno.h>
|
|
|
|
int main ( void )
|
|
{
|
|
// Nb: assuming VG_MIN_MALLOC_SZB is 8!
|
|
// Should work with both 32-bit and 64-bit pointers, though.
|
|
|
|
int* p;
|
|
int res;
|
|
assert(sizeof(long int) == sizeof(void*));
|
|
|
|
p = memalign(0, 100); assert(0 == (long)p % 8);
|
|
p = memalign(1, 100); assert(0 == (long)p % 8);
|
|
p = memalign(2, 100); assert(0 == (long)p % 8);
|
|
p = memalign(3, 100); assert(0 == (long)p % 8);
|
|
p = memalign(4, 100); assert(0 == (long)p % 8);
|
|
p = memalign(5, 100); assert(0 == (long)p % 8);
|
|
|
|
p = memalign(7, 100); assert(0 == (long)p % 8);
|
|
p = memalign(8, 100); assert(0 == (long)p % 8);
|
|
p = memalign(9, 100); assert(0 == (long)p % 16);
|
|
|
|
p = memalign(31, 100); assert(0 == (long)p % 32);
|
|
p = memalign(32, 100); assert(0 == (long)p % 32);
|
|
p = memalign(33, 100); assert(0 == (long)p % 64);
|
|
|
|
p = memalign(4095, 100); assert(0 == (long)p % 4096);
|
|
p = memalign(4096, 100); assert(0 == (long)p % 4096);
|
|
p = memalign(4097, 100); assert(0 == (long)p % 8192);
|
|
|
|
res = posix_memalign(&p, -1,100); assert(EINVAL == res);
|
|
res = posix_memalign(&p, 0, 100); assert(0 == res && 0 == (long)p % 8);
|
|
res = posix_memalign(&p, 1, 100); assert(EINVAL == res);
|
|
res = posix_memalign(&p, 2, 100); assert(EINVAL == res);
|
|
res = posix_memalign(&p, 3, 100); assert(EINVAL == res);
|
|
res = posix_memalign(&p, sizeof(void*), 100);
|
|
assert(0 == res &&
|
|
0 == (long)p % sizeof(void*));
|
|
|
|
res = posix_memalign(&p, 31, 100); assert(EINVAL == res);
|
|
res = posix_memalign(&p, 32, 100); assert(0 == res &&
|
|
0 == (long)p % 32);
|
|
res = posix_memalign(&p, 33, 100); assert(EINVAL == res);
|
|
|
|
res = posix_memalign(&p, 4095, 100); assert(EINVAL == res);
|
|
res = posix_memalign(&p, 4096, 100); assert(0 == res &&
|
|
0 == (long)p % 4096);
|
|
res = posix_memalign(&p, 4097, 100); assert(EINVAL == res);
|
|
|
|
return 0;
|
|
}
|