Merge Helgrind from branches/YARD into the trunk. Also includes some

minor changes to make stack unwinding on amd64-linux approximately
twice as fast as it was before.



git-svn-id: svn://svn.valgrind.org/valgrind/trunk@8707
This commit is contained in:
Julian Seward
2008-10-25 16:22:41 +00:00
parent 4d822b7bd3
commit 35c28b721f
26 changed files with 7361 additions and 5706 deletions

View File

@@ -98,6 +98,13 @@
*/
/*------------------------------------------------------------*/
/*--- fwdses ---*/
/*------------------------------------------------------------*/
static void cfsi_cache__invalidate ( void );
/*------------------------------------------------------------*/
/*--- Root structure ---*/
/*------------------------------------------------------------*/
@@ -320,10 +327,11 @@ static void discard_DebugInfo ( DebugInfo* di )
/* Repeatedly scan debugInfo_list, looking for DebugInfos with text
AVMAs intersecting [start,start+length), and call discard_DebugInfo
to get rid of them. This modifies the list, hence the multiple
iterations.
iterations. Returns True iff any such DebugInfos were found.
*/
static void discard_syms_in_range ( Addr start, SizeT length )
static Bool discard_syms_in_range ( Addr start, SizeT length )
{
Bool anyFound = False;
Bool found;
DebugInfo* curr;
@@ -347,8 +355,11 @@ static void discard_syms_in_range ( Addr start, SizeT length )
}
if (!found) break;
anyFound = True;
discard_DebugInfo( curr );
}
return anyFound;
}
@@ -478,6 +489,84 @@ DebugInfo* find_or_create_DebugInfo_for ( UChar* filename, UChar* memname )
}
/* Debuginfo reading for 'di' has just been successfully completed.
Check that the invariants stated in
"Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS" in
priv_storage.h are observed. */
static void check_CFSI_related_invariants ( DebugInfo* di )
{
DebugInfo* di2 = NULL;
vg_assert(di);
/* This fn isn't called until after debuginfo for this object has
been successfully read. And that shouldn't happen until we have
both a r-x and rw- mapping for the object. Hence: */
vg_assert(di->have_rx_map);
vg_assert(di->have_rw_map);
/* degenerate case: r-x section is empty */
if (di->rx_map_size == 0) {
vg_assert(di->cfsi == NULL);
return;
}
/* normal case: r-x section is nonempty */
/* invariant (0) */
vg_assert(di->rx_map_size > 0);
/* invariant (1) */
for (di2 = debugInfo_list; di2; di2 = di2->next) {
if (di2 == di)
continue;
if (di2->rx_map_size == 0)
continue;
vg_assert(di->rx_map_avma + di->rx_map_size <= di2->rx_map_avma
|| di2->rx_map_avma + di2->rx_map_size <= di->rx_map_avma);
}
di2 = NULL;
/* invariant (2) */
if (di->cfsi) {
vg_assert(di->cfsi_minavma <= di->cfsi_maxavma); /* duh! */
vg_assert(di->cfsi_minavma >= di->rx_map_avma);
vg_assert(di->cfsi_maxavma < di->rx_map_avma + di->rx_map_size);
}
/* invariants (3) and (4) */
if (di->cfsi) {
Word i;
vg_assert(di->cfsi_used > 0);
vg_assert(di->cfsi_size > 0);
for (i = 0; i < di->cfsi_used; i++) {
DiCfSI* cfsi = &di->cfsi[i];
vg_assert(cfsi->len > 0);
vg_assert(cfsi->base >= di->cfsi_minavma);
vg_assert(cfsi->base + cfsi->len - 1 <= di->cfsi_maxavma);
if (i > 0) {
DiCfSI* cfsip = &di->cfsi[i-1];
vg_assert(cfsip->base + cfsip->len <= cfsi->base);
}
}
} else {
vg_assert(di->cfsi_used == 0);
vg_assert(di->cfsi_size == 0);
}
}
/*--------------------------------------------------------------*/
/*--- ---*/
/*--- TOP LEVEL: INITIALISE THE DEBUGINFO SYSTEM ---*/
/*--- ---*/
/*--------------------------------------------------------------*/
void VG_(di_initialise) ( void )
{
/* There's actually very little to do here, since everything
centers around the DebugInfos in debugInfo_list, they are
created and destroyed on demand, and each one is treated more or
less independently. */
vg_assert(debugInfo_list == NULL);
/* flush the CFI fast query cache. */
cfsi_cache__invalidate();
}
/*--------------------------------------------------------------*/
/*--- ---*/
/*--- TOP LEVEL: NOTIFICATION (ACQUIRE/DISCARD INFO) (LINUX) ---*/
@@ -718,6 +807,8 @@ ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV )
TRACE_SYMTAB("\n------ Canonicalising the "
"acquired info ------\n");
/* invalidate the CFI unwind cache. */
cfsi_cache__invalidate();
/* prepare read data for use */
ML_(canonicaliseTables)( di );
/* notify m_redir about it */
@@ -727,6 +818,10 @@ ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV )
di->have_dinfo = True;
tl_assert(di->handle > 0);
di_handle = di->handle;
/* Check invariants listed in
Comment_on_IMPORTANT_REPRESENTATIONAL_INVARIANTS in
priv_storage.h. */
check_CFSI_related_invariants(di);
} else {
TRACE_SYMTAB("\n------ ELF reading failed ------\n");
@@ -734,6 +829,7 @@ ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV )
this DebugInfo? No - it contains info on the rw/rx
mappings, at least. */
di_handle = 0;
vg_assert(di->have_dinfo == False);
}
TRACE_SYMTAB("\n");
@@ -750,8 +846,11 @@ ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV )
[a, a+len). */
void VG_(di_notify_munmap)( Addr a, SizeT len )
{
Bool anyFound;
if (0) VG_(printf)("DISCARD %#lx %#lx\n", a, a+len);
discard_syms_in_range(a, len);
anyFound = discard_syms_in_range(a, len);
if (anyFound)
cfsi_cache__invalidate();
}
@@ -765,8 +864,11 @@ void VG_(di_notify_mprotect)( Addr a, SizeT len, UInt prot )
# if defined(VGP_x86_linux)
exe_ok = exe_ok || toBool(prot & VKI_PROT_READ);
# endif
if (0 && !exe_ok)
discard_syms_in_range(a, len);
if (0 && !exe_ok) {
Bool anyFound = discard_syms_in_range(a, len);
if (anyFound)
cfsi_cache__invalidate();
}
}
#endif /* defined(VGO_linux) */
@@ -797,6 +899,10 @@ ULong VG_(di_aix5_notify_segchange)(
{
ULong hdl = 0;
/* play safe; always invalidate the CFI cache. Not
that it should be used on AIX, but still .. */
cfsi_cache__invalidate();
if (acquire) {
Bool ok;
@@ -840,6 +946,10 @@ ULong VG_(di_aix5_notify_segchange)(
di->have_dinfo = True;
hdl = di->handle;
vg_assert(hdl > 0);
/* Check invariants listed in
Comment_on_IMPORTANT_REPRESENTATIONAL_INVARIANTS in
priv_storage.h. */
check_CFSI_related_invariants(di);
} else {
/* Something went wrong (eg. bad XCOFF file). */
discard_DebugInfo( di );
@@ -850,8 +960,11 @@ ULong VG_(di_aix5_notify_segchange)(
/* Dump all the debugInfos whose text segments intersect
code_start/code_len. */
/* CFI cache is always invalidated at start of this routine.
Hence it's safe to ignore the return value of
discard_syms_in_range. */
if (code_len > 0)
discard_syms_in_range( code_start, code_len );
(void)discard_syms_in_range( code_start, code_len );
}
@@ -893,11 +1006,11 @@ void VG_(di_discard_ALL_debuginfo)( void )
If findText==False, only data symbols are searched for.
*/
static void search_all_symtabs ( Addr ptr, /*OUT*/DebugInfo** pdi,
/*OUT*/Int* symno,
/*OUT*/Word* symno,
Bool match_anywhere_in_sym,
Bool findText )
{
Int sno;
Word sno;
DebugInfo* di;
Bool inRange;
@@ -944,9 +1057,9 @@ static void search_all_symtabs ( Addr ptr, /*OUT*/DebugInfo** pdi,
*pdi to the relevant DebugInfo, and *locno to the loctab entry
*number within that. If not found, *pdi is set to NULL. */
static void search_all_loctabs ( Addr ptr, /*OUT*/DebugInfo** pdi,
/*OUT*/Int* locno )
/*OUT*/Word* locno )
{
Int lno;
Word lno;
DebugInfo* di;
for (di = debugInfo_list; di != NULL; di = di->next) {
if (di->text_present
@@ -977,7 +1090,7 @@ Bool get_sym_name ( Bool demangle, Addr a, Char* buf, Int nbuf,
Bool findText, /*OUT*/OffT* offsetP )
{
DebugInfo* di;
Int sno;
Word sno;
Int offset;
search_all_symtabs ( a, &di, &sno, match_anywhere_in_sym, findText );
@@ -1019,7 +1132,7 @@ Bool get_sym_name ( Bool demangle, Addr a, Char* buf, Int nbuf,
Addr VG_(get_tocptr) ( Addr guest_code_addr )
{
DebugInfo* si;
Int sno;
Word sno;
search_all_symtabs ( guest_code_addr,
&si, &sno,
True/*match_anywhere_in_fun*/,
@@ -1186,7 +1299,7 @@ DebugInfo* VG_(find_seginfo) ( Addr a )
Bool VG_(get_filename)( Addr a, Char* filename, Int n_filename )
{
DebugInfo* si;
Int locno;
Word locno;
search_all_loctabs ( a, &si, &locno );
if (si == NULL)
return False;
@@ -1198,7 +1311,7 @@ Bool VG_(get_filename)( Addr a, Char* filename, Int n_filename )
Bool VG_(get_linenum)( Addr a, UInt* lineno )
{
DebugInfo* si;
Int locno;
Word locno;
search_all_loctabs ( a, &si, &locno );
if (si == NULL)
return False;
@@ -1217,7 +1330,7 @@ Bool VG_(get_filename_linenum) ( Addr a,
/*OUT*/UInt* lineno )
{
DebugInfo* si;
Int locno;
Word locno;
vg_assert( (dirname == NULL && dirname_available == NULL)
||
@@ -1541,6 +1654,122 @@ UWord evalCfiExpr ( XArray* exprs, Int ix,
}
/* Search all the DebugInfos in the entire system, to find the DiCfSI
that pertains to 'ip'.
If found, set *diP to the DebugInfo in which it resides, and
*ixP to the index in that DebugInfo's cfsi array.
If not found, set *diP to (DebugInfo*)1 and *ixP to zero.
*/
__attribute__((noinline))
static void find_DiCfSI ( /*OUT*/DebugInfo** diP,
/*OUT*/Word* ixP,
Addr ip )
{
DebugInfo* di;
Word i = -1;
static UWord n_search = 0;
static UWord n_steps = 0;
n_search++;
if (0) VG_(printf)("search for %#lx\n", ip);
for (di = debugInfo_list; di != NULL; di = di->next) {
Word j;
n_steps++;
/* Use the per-DebugInfo summary address ranges to skip
inapplicable DebugInfos quickly. */
if (di->cfsi_used == 0)
continue;
if (ip < di->cfsi_minavma || ip > di->cfsi_maxavma)
continue;
/* It might be in this DebugInfo. Search it. */
j = ML_(search_one_cfitab)( di, ip );
vg_assert(j >= -1 && j < (Word)di->cfsi_used);
if (j != -1) {
i = j;
break; /* found it */
}
}
if (i == -1) {
/* we didn't find it. */
*diP = (DebugInfo*)1;
*ixP = 0;
} else {
/* found it. */
/* ensure that di is 4-aligned (at least), so it can't possibly
be equal to (DebugInfo*)1. */
vg_assert(di && VG_IS_4_ALIGNED(di));
vg_assert(i >= 0 && i < di->cfsi_used);
*diP = di;
*ixP = i;
/* Start of performance-enhancing hack: once every 64 (chosen
hackily after profiling) successful searches, move the found
DebugInfo one step closer to the start of the list. This
makes future searches cheaper. For starting konqueror on
amd64, this in fact reduces the total amount of searching
done by the above find-the-right-DebugInfo loop by more than
a factor of 20. */
if ((n_search & 0xF) == 0) {
/* Move di one step closer to the start of the list. */
move_DebugInfo_one_step_forward( di );
}
/* End of performance-enhancing hack. */
if (0 && ((n_search & 0x7FFFF) == 0))
VG_(printf)("find_DiCfSI: %lu searches, "
"%lu DebugInfos looked at\n",
n_search, n_steps);
}
}
/* Now follows a mechanism for caching queries to find_DiCfSI, since
they are extremely frequent on amd64-linux, during stack unwinding.
Each cache entry binds an ip value to a (di, ix) pair. Possible
values:
di is non-null, ix >= 0 ==> cache slot in use, "di->cfsi[ix]"
di is (DebugInfo*)1 ==> cache slot in use, no associated di
di is NULL ==> cache slot not in use
Hence simply zeroing out the entire cache invalidates all
entries.
Why not map ip values directly to DiCfSI*'s? Because this would
cause problems if/when the cfsi array is moved due to resizing.
Instead we cache .cfsi array index value, which should be invariant
across resizing. (That said, I don't think the current
implementation will resize whilst during queries, since the DiCfSI
records are added all at once, when the debuginfo for an object is
read, and is not changed ever thereafter. */
#define N_CFSI_CACHE 511
typedef
struct { Addr ip; DebugInfo* di; Word ix; }
CFSICacheEnt;
static CFSICacheEnt cfsi_cache[N_CFSI_CACHE];
static void cfsi_cache__invalidate ( void ) {
VG_(memset)(&cfsi_cache, 0, sizeof(cfsi_cache));
}
/* The main function for DWARF2/3 CFI-based stack unwinding.
Given an IP/SP/FP triple, produce the IP/SP/FP values for the
previous frame, if possible. */
@@ -1553,61 +1782,47 @@ Bool VG_(use_CF_info) ( /*MOD*/Addr* ipP,
Addr min_accessible,
Addr max_accessible )
{
Bool ok;
Int i;
DebugInfo* si;
DiCfSI* cfsi = NULL;
Addr cfa, ipHere, spHere, fpHere, ipPrev, spPrev, fpPrev;
Bool ok;
DebugInfo* di;
DiCfSI* cfsi = NULL;
Addr cfa, ipHere, spHere, fpHere, ipPrev, spPrev, fpPrev;
CfiExprEvalContext eec;
static UInt n_search = 0;
static UInt n_steps = 0;
n_search++;
static UWord n_q = 0, n_m = 0;
n_q++;
if (0 && 0 == (n_q & 0x1FFFFF))
VG_(printf)("QQQ %lu %lu\n", n_q, n_m);
if (0) VG_(printf)("search for %#lx\n", *ipP);
{ UWord hash = (*ipP) % N_CFSI_CACHE;
CFSICacheEnt* ce = &cfsi_cache[hash];
for (si = debugInfo_list; si != NULL; si = si->next) {
n_steps++;
if (LIKELY(ce->ip == *ipP) && LIKELY(ce->di != NULL)) {
/* found an entry in the cache .. */
} else {
/* not found in cache. Search and update. */
n_m++;
ce->ip = *ipP;
find_DiCfSI( &ce->di, &ce->ix, *ipP );
}
/* Use the per-DebugInfo summary address ranges to skip
inapplicable DebugInfos quickly. */
if (si->cfsi_used == 0)
continue;
if (*ipP < si->cfsi_minavma || *ipP > si->cfsi_maxavma)
continue;
i = ML_(search_one_cfitab)( si, *ipP );
if (i != -1) {
vg_assert(i >= 0 && i < si->cfsi_used);
cfsi = &si->cfsi[i];
break;
}
if (UNLIKELY(ce->di == (DebugInfo*)1)) {
/* no DiCfSI for this address */
cfsi = NULL;
di = NULL;
} else {
/* found a DiCfSI for this address */
di = ce->di;
cfsi = &di->cfsi[ ce->ix ];
}
}
if (cfsi == NULL)
return False;
if (0 && ((n_search & 0x7FFFF) == 0))
VG_(printf)("VG_(use_CF_info): %u searches, "
"%u DebugInfos looked at\n",
n_search, n_steps);
/* Start of performance-enhancing hack: once every 64 (chosen
hackily after profiling) successful searches, move the found
DebugInfo one step closer to the start of the list. This makes
future searches cheaper. For starting konqueror on amd64, this
in fact reduces the total amount of searching done by the above
find-the-right-DebugInfo loop by more than a factor of 20. */
if ((n_search & 0x3F) == 0) {
/* Move si one step closer to the start of the list. */
move_DebugInfo_one_step_forward( si );
}
/* End of performance-enhancing hack. */
if (UNLIKELY(cfsi == NULL))
return False; /* no info. Nothing we can do. */
if (0) {
VG_(printf)("found cfisi: ");
ML_(ppDiCfSI)(si->cfsi_exprs, cfsi);
ML_(ppDiCfSI)(di->cfsi_exprs, cfsi);
}
ipPrev = spPrev = fpPrev = 0;
@@ -1628,7 +1843,7 @@ Bool VG_(use_CF_info) ( /*MOD*/Addr* ipP,
case CFIC_EXPR:
if (0) {
VG_(printf)("CFIC_EXPR: ");
ML_(ppCfiExpr)(si->cfsi_exprs, cfsi->cfa_off);
ML_(ppCfiExpr)(di->cfsi_exprs, cfsi->cfa_off);
VG_(printf)("\n");
}
eec.ipHere = ipHere;
@@ -1637,7 +1852,7 @@ Bool VG_(use_CF_info) ( /*MOD*/Addr* ipP,
eec.min_accessible = min_accessible;
eec.max_accessible = max_accessible;
ok = True;
cfa = evalCfiExpr(si->cfsi_exprs, cfsi->cfa_off, &eec, &ok );
cfa = evalCfiExpr(di->cfsi_exprs, cfsi->cfa_off, &eec, &ok );
if (!ok) return False;
break;
default:
@@ -1667,14 +1882,14 @@ Bool VG_(use_CF_info) ( /*MOD*/Addr* ipP,
break; \
case CFIR_EXPR: \
if (0) \
ML_(ppCfiExpr)(si->cfsi_exprs,_off); \
ML_(ppCfiExpr)(di->cfsi_exprs,_off); \
eec.ipHere = ipHere; \
eec.spHere = spHere; \
eec.fpHere = fpHere; \
eec.min_accessible = min_accessible; \
eec.max_accessible = max_accessible; \
ok = True; \
_prev = evalCfiExpr(si->cfsi_exprs, _off, &eec, &ok ); \
_prev = evalCfiExpr(di->cfsi_exprs, _off, &eec, &ok ); \
if (!ok) return False; \
break; \
default: \

View File

@@ -308,7 +308,46 @@ struct _DebugInfo {
in some obscure circumstances (to do with data/sdata/bss) it is
possible for the mapping to be present but have zero size.
Certainly text_ is mandatory on all platforms; not sure about
the rest though. */
the rest though.
Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS: we require that
either (rx_map_size == 0 && cfsi == NULL) (the degenerate case)
or the normal case, which is the AND of the following:
(0) rx_map_size > 0
(1) no two DebugInfos with rx_map_size > 0
have overlapping [rx_map_avma,+rx_map_size)
(2) [cfsi_minavma,cfsi_maxavma] does not extend
beyond [rx_map_avma,+rx_map_size); that is, the former is a
subrange or equal to the latter.
(3) all DiCfSI in the cfsi array all have ranges that fall within
[rx_map_avma,+rx_map_size).
(4) all DiCfSI in the cfsi array are non-overlapping
The cumulative effect of these restrictions is to ensure that
all the DiCfSI records in the entire system are non overlapping.
Hence any address falls into either exactly one DiCfSI record,
or none. Hence it is safe to cache the results of searches for
DiCfSI records. This is the whole point of these restrictions.
The caching of DiCfSI searches is done in VG_(use_CF_info). The
cache is flushed after any change to debugInfo_list. DiCfSI
searches are cached because they are central to stack unwinding
on amd64-linux.
Where are these invariants imposed and checked?
They are checked after a successful read of debuginfo into
a DebugInfo*, in check_CFSI_related_invariants.
(1) is not really imposed anywhere. We simply assume that the
kernel will not map the text segments from two different objects
into the same space. Sounds reasonable.
(2) follows from (4) and (3). It is ensured by canonicaliseCFI.
(3) is ensured by ML_(addDiCfSI).
(4) is ensured by canonicaliseCFI.
*/
/* .text */
Bool text_present;
Addr text_avma;
@@ -372,8 +411,8 @@ struct _DebugInfo {
records require any expression nodes, they are stored in
cfsi_exprs. */
DiCfSI* cfsi;
UInt cfsi_used;
UInt cfsi_size;
UWord cfsi_used;
UWord cfsi_size;
Addr cfsi_minavma;
Addr cfsi_maxavma;
XArray* cfsi_exprs; /* XArray of CfiExpr */
@@ -464,17 +503,17 @@ extern void ML_(canonicaliseTables) ( struct _DebugInfo* di );
/* Find a symbol-table index containing the specified pointer, or -1
if not found. Binary search. */
extern Int ML_(search_one_symtab) ( struct _DebugInfo* di, Addr ptr,
Bool match_anywhere_in_sym,
Bool findText );
extern Word ML_(search_one_symtab) ( struct _DebugInfo* di, Addr ptr,
Bool match_anywhere_in_sym,
Bool findText );
/* Find a location-table index containing the specified pointer, or -1
if not found. Binary search. */
extern Int ML_(search_one_loctab) ( struct _DebugInfo* di, Addr ptr );
extern Word ML_(search_one_loctab) ( struct _DebugInfo* di, Addr ptr );
/* Find a CFI-table index containing the specified pointer, or -1 if
not found. Binary search. */
extern Int ML_(search_one_cfitab) ( struct _DebugInfo* di, Addr ptr );
extern Word ML_(search_one_cfitab) ( struct _DebugInfo* di, Addr ptr );
/* ------ Misc ------ */

View File

@@ -978,9 +978,9 @@ static Int compare_DiSym ( void* va, void* vb )
*/
static DiSym* prefersym ( struct _DebugInfo* di, DiSym* a, DiSym* b )
{
Int cmp;
Int lena, lenb; /* full length */
Int vlena, vlenb; /* length without version */
Word cmp;
Word lena, lenb; /* full length */
Word vlena, vlenb; /* length without version */
const UChar *vpa, *vpb;
Bool preferA = False;
@@ -1062,7 +1062,7 @@ static DiSym* prefersym ( struct _DebugInfo* di, DiSym* a, DiSym* b )
static void canonicaliseSymtab ( struct _DebugInfo* di )
{
Int i, j, n_merged, n_truncated;
Word i, j, n_merged, n_truncated;
Addr s1, s2, e1, e2;
# define SWAP(ty,aa,bb) \
@@ -1095,14 +1095,14 @@ static void canonicaliseSymtab ( struct _DebugInfo* di )
di->symtab[di->symtab_used++] = di->symtab[i];
}
}
TRACE_SYMTAB( "canonicaliseSymtab: %d symbols merged\n", n_merged);
TRACE_SYMTAB( "canonicaliseSymtab: %ld symbols merged\n", n_merged);
}
while (n_merged > 0);
/* Detect and "fix" overlapping address ranges. */
n_truncated = 0;
for (i = 0; i < ((Int)di->symtab_used) -1; i++) {
for (i = 0; i < ((Word)di->symtab_used) -1; i++) {
vg_assert(di->symtab[i].addr <= di->symtab[i+1].addr);
@@ -1149,7 +1149,7 @@ static void canonicaliseSymtab ( struct _DebugInfo* di )
/* It may be that the i+1 entry now needs to be moved further
along to maintain the address order requirement. */
j = i+1;
while (j < ((Int)di->symtab_used)-1
while (j < ((Word)di->symtab_used)-1
&& di->symtab[j].addr > di->symtab[j+1].addr) {
SWAP(DiSym,di->symtab[j],di->symtab[j+1]);
j++;
@@ -1160,7 +1160,7 @@ static void canonicaliseSymtab ( struct _DebugInfo* di )
if (n_truncated > 0) goto cleanup_more;
/* Ensure relevant postconditions hold. */
for (i = 0; i < ((Int)di->symtab_used)-1; i++) {
for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
/* No zero-sized symbols. */
vg_assert(di->symtab[i].size > 0);
/* In order. */
@@ -1189,7 +1189,7 @@ static Int compare_DiLoc ( void* va, void* vb )
static void canonicaliseLoctab ( struct _DebugInfo* di )
{
Int i, j;
Word i, j;
# define SWAP(ty,aa,bb) \
do { ty tt = (aa); (aa) = (bb); (bb) = tt; } while (0);
@@ -1202,7 +1202,7 @@ static void canonicaliseLoctab ( struct _DebugInfo* di )
sizeof(*di->loctab), compare_DiLoc);
/* If two adjacent entries overlap, truncate the first. */
for (i = 0; i < ((Int)di->loctab_used)-1; i++) {
for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
vg_assert(di->loctab[i].size < 10000);
if (di->loctab[i].addr + di->loctab[i].size > di->loctab[i+1].addr) {
/* Do this in signed int32 because the actual .size fields
@@ -1222,7 +1222,7 @@ static void canonicaliseLoctab ( struct _DebugInfo* di )
/* Zap any zero-sized entries resulting from the truncation
process. */
j = 0;
for (i = 0; i < (Int)di->loctab_used; i++) {
for (i = 0; i < (Word)di->loctab_used; i++) {
if (di->loctab[i].size > 0) {
if (j != i)
di->loctab[j] = di->loctab[i];
@@ -1232,7 +1232,7 @@ static void canonicaliseLoctab ( struct _DebugInfo* di )
di->loctab_used = j;
/* Ensure relevant postconditions hold. */
for (i = 0; i < ((Int)di->loctab_used)-1; i++) {
for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
/*
VG_(printf)("%d (%d) %d 0x%x\n",
i, di->loctab[i+1].confident,
@@ -1272,7 +1272,7 @@ static Int compare_DiCfSI ( void* va, void* vb )
static void canonicaliseCFI ( struct _DebugInfo* di )
{
Int i, j;
Word i, j;
const Addr minAvma = 0;
const Addr maxAvma = ~minAvma;
@@ -1287,7 +1287,7 @@ static void canonicaliseCFI ( struct _DebugInfo* di )
address range contained in cfsi[0 .. cfsi_used-1]. */
di->cfsi_minavma = maxAvma;
di->cfsi_maxavma = minAvma;
for (i = 0; i < (Int)di->cfsi_used; i++) {
for (i = 0; i < (Word)di->cfsi_used; i++) {
Addr here_min = di->cfsi[i].base;
Addr here_max = di->cfsi[i].base + di->cfsi[i].len - 1;
if (here_min < di->cfsi_minavma)
@@ -1297,7 +1297,7 @@ static void canonicaliseCFI ( struct _DebugInfo* di )
}
if (di->trace_cfi)
VG_(printf)("canonicaliseCfiSI: %d entries, %#lx .. %#lx\n",
VG_(printf)("canonicaliseCfiSI: %ld entries, %#lx .. %#lx\n",
di->cfsi_used,
di->cfsi_minavma, di->cfsi_maxavma);
@@ -1305,9 +1305,9 @@ static void canonicaliseCFI ( struct _DebugInfo* di )
VG_(ssort)(di->cfsi, di->cfsi_used, sizeof(*di->cfsi), compare_DiCfSI);
/* If two adjacent entries overlap, truncate the first. */
for (i = 0; i < (Int)di->cfsi_used-1; i++) {
for (i = 0; i < (Word)di->cfsi_used-1; i++) {
if (di->cfsi[i].base + di->cfsi[i].len > di->cfsi[i+1].base) {
Int new_len = di->cfsi[i+1].base - di->cfsi[i].base;
Word new_len = di->cfsi[i+1].base - di->cfsi[i].base;
/* how could it be otherwise? The entries are sorted by the
.base field. */
vg_assert(new_len >= 0);
@@ -1319,7 +1319,7 @@ static void canonicaliseCFI ( struct _DebugInfo* di )
/* Zap any zero-sized entries resulting from the truncation
process. */
j = 0;
for (i = 0; i < (Int)di->cfsi_used; i++) {
for (i = 0; i < (Word)di->cfsi_used; i++) {
if (di->cfsi[i].len > 0) {
if (j != i)
di->cfsi[j] = di->cfsi[i];
@@ -1330,7 +1330,7 @@ static void canonicaliseCFI ( struct _DebugInfo* di )
di->cfsi_used = j;
/* Ensure relevant postconditions hold. */
for (i = 0; i < (Int)di->cfsi_used; i++) {
for (i = 0; i < (Word)di->cfsi_used; i++) {
/* No zero-length ranges. */
vg_assert(di->cfsi[i].len > 0);
/* Makes sense w.r.t. summary address range */
@@ -1375,9 +1375,9 @@ void ML_(canonicaliseTables) ( struct _DebugInfo* di )
/* Find a symbol-table index containing the specified pointer, or -1
if not found. Binary search. */
Int ML_(search_one_symtab) ( struct _DebugInfo* di, Addr ptr,
Bool match_anywhere_in_sym,
Bool findText )
Word ML_(search_one_symtab) ( struct _DebugInfo* di, Addr ptr,
Bool match_anywhere_in_sym,
Bool findText )
{
Addr a_mid_lo, a_mid_hi;
Word mid, size,
@@ -1408,7 +1408,7 @@ Int ML_(search_one_symtab) ( struct _DebugInfo* di, Addr ptr,
/* Find a location-table index containing the specified pointer, or -1
if not found. Binary search. */
Int ML_(search_one_loctab) ( struct _DebugInfo* di, Addr ptr )
Word ML_(search_one_loctab) ( struct _DebugInfo* di, Addr ptr )
{
Addr a_mid_lo, a_mid_hi;
Word mid,
@@ -1432,10 +1432,10 @@ Int ML_(search_one_loctab) ( struct _DebugInfo* di, Addr ptr )
/* Find a CFI-table index containing the specified pointer, or -1
if not found. Binary search. */
Int ML_(search_one_cfitab) ( struct _DebugInfo* di, Addr ptr )
Word ML_(search_one_cfitab) ( struct _DebugInfo* di, Addr ptr )
{
Addr a_mid_lo, a_mid_hi;
Int mid, size,
Word mid, size,
lo = 0,
hi = di->cfsi_used-1;
while (True) {

View File

@@ -469,6 +469,11 @@ ExeContext* VG_(get_ExeContext_from_ECU)( UInt ecu )
return NULL;
}
ExeContext* VG_(make_ExeContext_from_StackTrace)( Addr* ips, UInt n_ips )
{
return record_ExeContext_wrk2(ips, n_ips);
}
/*--------------------------------------------------------------------*/
/*--- end m_execontext.c ---*/
/*--------------------------------------------------------------------*/

View File

@@ -1359,6 +1359,12 @@ Int valgrind_main ( Int argc, HChar **argv, HChar **envp )
//
//============================================================
//--------------------------------------------------------------
// Initialise m_debuginfo
// p: dynamic memory allocation
VG_(debugLog)(1, "main", "Initialise m_debuginfo\n");
VG_(di_initialise)();
//--------------------------------------------------------------
// Look for alternative libdir
{ HChar *cp = VG_(getenv)(VALGRIND_LIB);
@@ -1729,6 +1735,7 @@ Int valgrind_main ( Int argc, HChar **argv, HChar **envp )
// p: setup_code_redirect_table [so that redirs can be recorded]
// p: mallocfree
// p: probably: setup fds and process CLOs, so that logging works
// p: initialise m_debuginfo
//
// While doing this, make a note of the debuginfo-handles that
// come back from VG_(di_notify_mmap)/VG_(di_aix5_notify_segchange).

View File

@@ -161,7 +161,8 @@ UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
fails, and is expensive. */
/* Deal with frames resulting from functions which begin "pushl%
ebp ; movl %esp, %ebp" which is the ABI-mandated preamble. */
if (fp_min <= fp && fp <= fp_max) {
if (fp_min <= fp && fp <= fp_max
- 1 * sizeof(UWord)/*see comment below*/) {
/* fp looks sane, so use it. */
ip = (((UWord*)fp)[1]);
sp = fp + sizeof(Addr) /*saved %ebp*/
@@ -251,7 +252,11 @@ UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
the start of the fn, like GDB does, there's no reliable way
to tell. Hence the hack of first trying out CFI, and if that
fails, then use this as a fallback. */
if (fp_min <= fp && fp <= fp_max) {
/* Note: re "- 1 * sizeof(UWord)", need to take account of the
fact that we are prodding at & ((UWord*)fp)[1] and so need to
adjust the limit check accordingly. Omitting this has been
observed to cause segfaults on rare occasions. */
if (fp_min <= fp && fp <= fp_max - 1 * sizeof(UWord)) {
/* fp looks sane, so use it. */
ip = (((UWord*)fp)[1]);
sp = fp + sizeof(Addr) /*saved %rbp*/
@@ -371,7 +376,7 @@ UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
/* Try to derive a new (ip,fp) pair from the current set. */
if (fp_min <= fp && fp <= fp_max) {
if (fp_min <= fp && fp <= fp_max - lr_offset * sizeof(UWord)) {
/* fp looks sane, so use it. */
if (i == 1 && lr_is_first_RA)

View File

@@ -162,7 +162,7 @@ static inline void ensureSpaceXA ( struct _XArray* xa )
else if (xa->elemSzB == 2) newsz = 4;
else newsz = 2;
} else {
newsz = 1 + (3 * xa->totsizeE) / 2; /* 2 * xa->totsizeE; */
newsz = 2 + (3 * xa->totsizeE) / 2; /* 2 * xa->totsizeE; */
}
if (0 && xa->totsizeE >= 10000)
VG_(printf)("addToXA: increasing from %ld to %ld\n",

View File

@@ -39,6 +39,9 @@
#include "pub_tool_debuginfo.h"
/* Initialise the entire module. Must be called first of all. */
extern void VG_(di_initialise) ( void );
/* LINUX: Notify the debuginfo system about a new mapping, or the
disappearance of such, or a permissions change on an existing
mapping. This is the way new debug information gets loaded. If

View File

@@ -210,6 +210,11 @@
Helgrind:Race
fun:__lll_*lock_*
}
{
helgrind-glibc28-112
Helgrind:Race
fun:pthread_create@*
}
######------------ glibc-2.7 specific ---------######
#

View File

@@ -70,7 +70,9 @@ vgpreload_helgrind_ppc64_aix5_so_LDFLAGS = \
$(PRELOAD_LDFLAGS_PPC64_AIX5) \
$(LIBREPLACEMALLOC_LDFLAGS_PPC64_AIX5)
HELGRIND_SOURCES_COMMON = hg_wordset.c hg_main.c
HELGRIND_SOURCES_COMMON = \
hg_basics.c hg_lock_n_thread.c hg_wordset.c libhb_core.c \
hg_errors.c hg_main.c
helgrind_x86_linux_SOURCES = $(HELGRIND_SOURCES_COMMON)
helgrind_x86_linux_CPPFLAGS = $(AM_CPPFLAGS_X86_LINUX)
@@ -118,4 +120,7 @@ hgincludedir = $(includedir)/valgrind
hginclude_HEADERS = helgrind.h
noinst_HEADERS = hg_wordset.h
noinst_HEADERS = \
hg_basics.h hg_lock_n_thread.h hg_errors.h hg_wordset.h
EXTRA_DIST = README_MSMProp2.txt README_YARD.txt

View File

@@ -0,0 +1,156 @@
MSMProp2, a simplified but functionally equivalent version of MSMProp1
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Julian Seward, OpenWorks Ltd, 19 August 2008
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Note that this file does NOT describe the state machine used in the
svn://svn.valgrind.org/branches/YARD version of Helgrind. That state
machine is different again from any previously described machine.
See the file README_YARD.txt for more details on YARD.
----------------------
In early 2008 Konstantin Serebryany proposed "MSMProp1", a memory
state machine for data race detection. It is described at
http://code.google.com/p/data-race-test/wiki/MSMProp1
Implementation experiences show MSMProp1 is useful, but difficult to
implement efficiently. In particular keeping the memory usage under
control is complex and difficult.
This note points out a key simplification of MSMProp1, which makes it
easier to implement without changing the functionality.
The idea
~~~~~~~~
The core of the idea pertains to the "Condition" entry for MSMProp1
state machine rules E5 and E6(r). These are, respectively:
HB(SS, currS) and its negation
! HB(SS, currS).
Here, SS is a set of segments, and currS is a single segment. Each
segment contains a vector timestamp. The expression "HB(SS, currS)"
is intended to denote
for each segment S in SS . happens_before(S,currS)
where happens_before(S,T) means that S's vector timestamp is ordered
before-or-equal to T's vector timestamp.
In words, the expression
for each segment S in SS . happens_before(S,currS)
is equivalent to saying that currS has a timestamp which is
greater-than-equal to the timestamps of all the segments in SS.
The key observation is that this is equivalent to
happens_before( JOIN(SS), currS )
where JOIN is the lattice-theoretic "max" or "least upper bound"
operation on vector clocks. Given the definition of HB,
happens_before and (binary) JOIN, this is easy to prove.
The consequences
~~~~~~~~~~~~~~~~
With that observation in place, it is a short step to observe that
storing segment sets in MSMProp1 is unnecessary. Instead of
storing a segment set in each shadow value, just store and
update a single vector timestamp. The following two equivalences
hold:
MSMProp1 MSMProp2
adding a segment S join-ing S's vector timestamp
to the segment-set to the current vector timestamp
HB(SS,currS) happens_before(
currS's timestamp,
current vector timestamp )
Once it is no longer necessary to represent segment sets, it then
also becomes unnecessary to represent segments. This constitutes
a significant simplication to the implementation.
The resulting state machine, MSMProp2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MSMProp2 is isomorphic to MSMProp1, with the following changes:
States are New, Read(VTS,LS), Write(VTS,LS)
where LS is a lockset (as before) and VTS is a vector timestamp.
For a thread T with current lockset 'currLS' and current VTS 'currVTS'
making a memory access, the new rules are
Name Old-State Op Guard New-State Race-If
E1 New rd True Read(currVTS,currLS) False
E2 New wr True Write(currVTS,currLS) False
E3 Read(oldVTS,oldLS) rd True Read(newVTS,newLS) False
E4 Read(oldVTS,oldLS) wr True Write(newVTS,newLS) #newLS == 0
&& !hb(oldVTS,currVTS)
E5 Write(oldVTS,oldLS) rd hb(oldVTS, Read(currVTS,currLS) False
currVTS)
E6r Write(oldVTS,oldLS) rd !hb(oldVTS, Write(newVTS,newLS) #newLS == 0
currVTS) && !hb(oldVTS,currVTS)
E6w Write(oldVTS,oldLS) wr True Write(newVTS,newLS) #newLS == 0
&& !hb(oldVTS,currVTS)
where newVTS = join2(oldVTS,currVTS)
newLS = if hb(oldVTS,currVTS)
then currLS
else intersect(oldLS,currLS)
hb(vts1, vts2) = vts1 happens before or is equal to vts2
Interpretation of the states
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
I always found the state names in MSMProp1 confusing. Both MSMProp1
and MSMProp2 are easier to understand if the states Read and Write are
renamed, like this:
old name new name
Read WriteConstraint
Write AllConstraint
The effect of a state Read(VTS,LS) is to constrain all later-observed
writes so that either (1) the writing thread holds at least one lock
in common with LS, or (2) those writes must happen-after VTS. If
neither of those two conditions hold, a race is reported.
Hence a Read state places a constraint on writes.
The effect of a state Write(VTS,LS) is similar, but it applies to all
later-observed accesses: either (1) the accessing thread holds at
least one lock in common with LS, or (2) those accesses must
happen-after VTS. If neither of those two conditions hold, a race is
reported.
Hence a Write state places a constraint on all accesses.
If we ignore the LS component of these states, the intuitive
interpretation of the VTS component is that it states the earliest
vector-time that the next write / access may safely happen.

34
helgrind/README_YARD.txt Normal file
View File

@@ -0,0 +1,34 @@
YARD, Yet Another Race Detector, built on the Helgrind framework
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Julian Seward, OpenWorks Ltd, 19 August 2008
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The YARD race detector lives in svn://svn.valgrind.org/branches/YARD.
It uses a new and relatively simple race detection engine, based on
the idea of shadowing each memory location with two vector timestamps,
indicating respectively the "earliest safe read point" and "earliest
safe write point". As far as I know this is a novel approach. Some
features of the implementation:
* Modularity. The entire race detection engine is placed in a
standalone library (libhb_core.c) with a simple interface (libhb.h).
This makes it easier to debug and verify the engine; indeed it can
be built as a standalone executable with test harness using "make -f
Makefile_sa".
* Simplified and scalable storage management, so that large programs,
with many synchronisation events, can be handled.
* Ability to report both call stacks involved in a race, without
excessive time or space overhead.
* Pure happens before operation, so as not to give any false
positives.
To use, build as usual and run as "--tool=helgrind".
You can disable lock order checking with --track-lockorders=no, as it
sometimes produces an annoying amount of output.

View File

@@ -82,6 +82,7 @@ typedef
_VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
_VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */
_VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */
_VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t* */
_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */
_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */

86
helgrind/hg_basics.c Normal file
View File

@@ -0,0 +1,86 @@
/*--------------------------------------------------------------------*/
/*--- Basic definitions for all of Helgrind. ---*/
/*--- hg_basics.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Helgrind, a Valgrind tool for detecting errors
in threaded programs.
Copyright (C) 2007-2008 OpenWorks Ltd
info@open-works.co.uk
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "pub_tool_basics.h"
#include "pub_tool_libcbase.h"
#include "pub_tool_libcassert.h"
#include "pub_tool_mallocfree.h"
#include "pub_tool_threadstate.h"
#include "hg_basics.h" /* self */
/*----------------------------------------------------------------*/
/*--- Very basic stuff ---*/
/*----------------------------------------------------------------*/
void* HG_(zalloc) ( HChar* cc, SizeT n )
{
void* p;
tl_assert(n > 0);
p = VG_(malloc)( cc, n );
tl_assert(p);
VG_(memset)(p, 0, n);
return p;
}
void HG_(free) ( void* p )
{
tl_assert(p);
VG_(free)(p);
}
Char* HG_(strdup) ( HChar* cc, const Char* s )
{
return VG_(strdup)( cc, s );
}
/*----------------------------------------------------------------*/
/*--- Command line options ---*/
/*----------------------------------------------------------------*/
/* Description of these flags is in hg_basics.h. */
Bool HG_(clo_track_lockorders) = True;
Bool HG_(clo_cmp_race_err_addrs) = False;
Addr HG_(clo_trace_addr) = 0;
Word HG_(clo_trace_level) = 0;
Word HG_(clo_sanity_flags) = 0;
/*--------------------------------------------------------------------*/
/*--- end hg_basics.c ---*/
/*--------------------------------------------------------------------*/

92
helgrind/hg_basics.h Normal file
View File

@@ -0,0 +1,92 @@
/*--------------------------------------------------------------------*/
/*--- Basic definitions for all of Helgrind. ---*/
/*--- hg_basics.h ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Helgrind, a Valgrind tool for detecting errors
in threaded programs.
Copyright (C) 2007-2008 OpenWorks Ltd
info@open-works.co.uk
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#ifndef __HG_BASICS_H
#define __HG_BASICS_H
/*----------------------------------------------------------------*/
/*--- Very basic stuff ---*/
/*----------------------------------------------------------------*/
#define HG_(str) VGAPPEND(vgHelgrind_,str)
void* HG_(zalloc) ( HChar* cc, SizeT n );
void HG_(free) ( void* p );
Char* HG_(strdup) ( HChar* cc, const Char* s );
static inline Bool HG_(is_sane_ThreadId) ( ThreadId coretid ) {
return coretid >= 0 && coretid < VG_N_THREADS;
}
/*----------------------------------------------------------------*/
/*--- Command line options ---*/
/*----------------------------------------------------------------*/
/* Flags for controlling for which events sanity checking is done */
#define SCE_THREADS (1<<0) // Sanity check at thread create/join
#define SCE_LOCKS (1<<1) // Sanity check at lock events
#define SCE_BIGRANGE (1<<2) // Sanity check at big mem range events
#define SCE_ACCESS (1<<3) // Sanity check at mem accesses
#define SCE_LAOG (1<<4) // Sanity check at significant LAOG events
#define SCE_BIGRANGE_T 256 // big mem range minimum size
/* Enable/disable lock order checking. Sometimes it produces a lot of
errors, possibly genuine, which nevertheless can be very
annoying. */
extern Bool HG_(clo_track_lockorders);
/* When comparing race errors for equality, should the race address be
taken into account? For users, no, but for verification purposes
(regtesting) this is sometimes important. */
extern Bool HG_(clo_cmp_race_err_addrs);
/* Tracing memory accesses, so we can see what's going on.
clo_trace_addr is the address to monitor. clo_trace_level = 0 for
no tracing, 1 for summary, 2 for detailed. */
extern Addr HG_(clo_trace_addr);
extern Word HG_(clo_trace_level);
/* Sanity check level. This is an or-ing of
SCE_{THREADS,LOCKS,BIGRANGE,ACCESS,LAOG}. */
extern Word HG_(clo_sanity_flags);
#endif /* ! __HG_BASICS_H */
/*--------------------------------------------------------------------*/
/*--- end hg_basics.h ---*/
/*--------------------------------------------------------------------*/

768
helgrind/hg_errors.c Normal file
View File

@@ -0,0 +1,768 @@
/*--------------------------------------------------------------------*/
/*--- Error management for Helgrind. ---*/
/*--- hg_errors.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Helgrind, a Valgrind tool for detecting errors
in threaded programs.
Copyright (C) 2007-2008 OpenWorks Ltd
info@open-works.co.uk
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "pub_tool_basics.h"
#include "pub_tool_libcbase.h"
#include "pub_tool_libcassert.h"
#include "pub_tool_libcprint.h"
#include "pub_tool_execontext.h"
#include "pub_tool_errormgr.h"
#include "pub_tool_wordfm.h"
#include "pub_tool_xarray.h"
#include "pub_tool_debuginfo.h"
#include "pub_tool_threadstate.h"
#include "hg_basics.h"
#include "hg_wordset.h"
#include "hg_lock_n_thread.h"
#include "hg_errors.h" /* self */
/*----------------------------------------------------------------*/
/*--- ---*/
/*----------------------------------------------------------------*/
/* This has to do with printing error messages. See comments on
announce_threadset() and summarise_threadset(). Perhaps it
should be a command line option. */
#define N_THREADS_TO_ANNOUNCE 5
/*----------------------------------------------------------------*/
/*--- Error management ---*/
/*----------------------------------------------------------------*/
/* maps (by value) strings to a copy of them in ARENA_TOOL */
static WordFM* string_table = NULL;
ULong HG_(stats__string_table_queries) = 0;
ULong HG_(stats__string_table_get_map_size) ( void ) {
return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
}
static Word string_table_cmp ( UWord s1, UWord s2 ) {
return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
}
static HChar* string_table_strdup ( HChar* str ) {
HChar* copy = NULL;
HG_(stats__string_table_queries)++;
if (!str)
str = "(null)";
if (!string_table) {
string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
HG_(free), string_table_cmp );
tl_assert(string_table);
}
if (VG_(lookupFM)( string_table,
NULL, (Word*)&copy, (Word)str )) {
tl_assert(copy);
if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
return copy;
} else {
copy = HG_(strdup)("hg.sts.2", str);
tl_assert(copy);
VG_(addToFM)( string_table, (Word)copy, (Word)copy );
return copy;
}
}
/* maps from Lock .unique fields to LockP*s */
static WordFM* map_LockN_to_P = NULL;
ULong HG_(stats__LockN_to_P_queries) = 0;
ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
}
static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
{
Lock* lk1 = (Lock*)lk1W;
Lock* lk2 = (Lock*)lk2W;
tl_assert( HG_(is_sane_LockNorP)(lk1) );
tl_assert( HG_(is_sane_LockNorP)(lk2) );
if (lk1->unique < lk2->unique) return -1;
if (lk1->unique > lk2->unique) return 1;
return 0;
}
static Lock* mk_LockP_from_LockN ( Lock* lkn )
{
Lock* lkp = NULL;
HG_(stats__LockN_to_P_queries)++;
tl_assert( HG_(is_sane_LockN)(lkn) );
if (!map_LockN_to_P) {
map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
HG_(free), lock_unique_cmp );
tl_assert(map_LockN_to_P);
}
if (!VG_(lookupFM)( map_LockN_to_P, NULL, (Word*)&lkp, (Word)lkn)) {
lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
*lkp = *lkn;
lkp->admin = NULL;
lkp->magic = LockP_MAGIC;
/* Forget about the bag of lock holders - don't copy that.
Also, acquired_at should be NULL whenever heldBy is, and vice
versa. Also forget about the associated libhb synch object. */
lkp->heldW = False;
lkp->heldBy = NULL;
lkp->acquired_at = NULL;
lkp->hbso = NULL;
VG_(addToFM)( map_LockN_to_P, (Word)lkp, (Word)lkp );
}
tl_assert( HG_(is_sane_LockP)(lkp) );
return lkp;
}
/* Errors:
race: program counter
read or write
data size
previous state
current state
FIXME: how does state printing interact with lockset gc?
Are the locksets in prev/curr state always valid?
Ditto question for the threadsets
ThreadSets - probably are always valid if Threads
are never thrown away.
LockSets - could at least print the lockset elements that
correspond to actual locks at the time of printing. Hmm.
*/
/* Error kinds */
typedef
enum {
XE_Race=1101, // race
XE_FreeMemLock, // freeing memory containing a locked lock
XE_UnlockUnlocked, // unlocking a not-locked lock
XE_UnlockForeign, // unlocking a lock held by some other thread
XE_UnlockBogus, // unlocking an address not known to be a lock
XE_PthAPIerror, // error from the POSIX pthreads API
XE_LockOrder, // lock order error
XE_Misc // misc other error (w/ string to describe it)
}
XErrorTag;
/* Extra contexts for kinds */
typedef
struct {
XErrorTag tag;
union {
struct {
Addr data_addr;
Int szB;
Bool isWrite;
ExeContext* mb_lastlock;
ExeContext* mb_confacc;
Thread* thr;
Thread* mb_confaccthr;
Char descr1[96];
Char descr2[96];
} Race;
struct {
Thread* thr; /* doing the freeing */
Lock* lock; /* lock which is locked */
} FreeMemLock;
struct {
Thread* thr; /* doing the unlocking */
Lock* lock; /* lock (that is already unlocked) */
} UnlockUnlocked;
struct {
Thread* thr; /* doing the unlocking */
Thread* owner; /* thread that actually holds the lock */
Lock* lock; /* lock (that is held by 'owner') */
} UnlockForeign;
struct {
Thread* thr; /* doing the unlocking */
Addr lock_ga; /* purported address of the lock */
} UnlockBogus;
struct {
Thread* thr;
HChar* fnname; /* persistent, in tool-arena */
Word err; /* pth error code */
HChar* errstr; /* persistent, in tool-arena */
} PthAPIerror;
struct {
Thread* thr;
Addr before_ga; /* always locked first in prog. history */
Addr after_ga;
ExeContext* before_ec;
ExeContext* after_ec;
} LockOrder;
struct {
Thread* thr;
HChar* errstr; /* persistent, in tool-arena */
} Misc;
} XE;
}
XError;
static void init_XError ( XError* xe ) {
VG_(memset)(xe, 0, sizeof(*xe) );
xe->tag = XE_Race-1; /* bogus */
}
/* Extensions of suppressions */
typedef
enum {
XS_Race=1201, /* race */
XS_FreeMemLock,
XS_UnlockUnlocked,
XS_UnlockForeign,
XS_UnlockBogus,
XS_PthAPIerror,
XS_LockOrder,
XS_Misc
}
XSuppTag;
/* Updates the copy with address info if necessary. */
UInt HG_(update_extra) ( Error* err )
{
XError* xe = (XError*)VG_(get_error_extra)(err);
tl_assert(xe);
//if (extra != NULL && Undescribed == extra->addrinfo.akind) {
// describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
//}
if (xe->tag == XE_Race) {
/* See if we can come up with a source level description of the
raced-upon address. This is potentially expensive, which is
why it's only done at the update_extra point, not when the
error is initially created. */
tl_assert(sizeof(xe->XE.Race.descr1) == sizeof(xe->XE.Race.descr2));
if (VG_(get_data_description)(
&xe->XE.Race.descr1[0],
&xe->XE.Race.descr2[0],
sizeof(xe->XE.Race.descr1)-1,
xe->XE.Race.data_addr )) {
tl_assert( xe->XE.Race.descr1
[ sizeof(xe->XE.Race.descr1)-1 ] == 0);
tl_assert( xe->XE.Race.descr2
[ sizeof(xe->XE.Race.descr2)-1 ] == 0);
}
}
return sizeof(XError);
}
void HG_(record_error_Race) ( Thread* thr,
Addr data_addr, Bool isWrite, Int szB,
ExeContext* mb_lastlock,
ExeContext* mb_confacc,
Thread* mb_confaccthr )
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
# if defined(VGO_linux)
/* Skip any races on locations apparently in GOTPLT sections. This
is said to be caused by ld.so poking PLT table entries (or
whatever) when it writes the resolved address of a dynamically
linked routine, into the table (or whatever) when it is called
for the first time. */
{
VgSectKind sect = VG_(seginfo_sect_kind)( NULL, 0, data_addr );
if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
data_addr, VG_(pp_SectKind)(sect));
if (sect == Vg_SectGOTPLT) return;
}
# endif
init_XError(&xe);
xe.tag = XE_Race;
xe.XE.Race.data_addr = data_addr;
xe.XE.Race.szB = szB;
xe.XE.Race.isWrite = isWrite;
xe.XE.Race.mb_lastlock = mb_lastlock;
xe.XE.Race.mb_confacc = mb_confacc;
xe.XE.Race.thr = thr;
xe.XE.Race.mb_confaccthr = mb_confaccthr;
tl_assert(isWrite == False || isWrite == True);
// tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
xe.XE.Race.descr1[0] = xe.XE.Race.descr2[0] = 0;
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_Race, data_addr, NULL, &xe );
}
void HG_(record_error_FreeMemLock) ( Thread* thr, Lock* lk )
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
tl_assert( HG_(is_sane_LockN)(lk) );
init_XError(&xe);
xe.tag = XE_FreeMemLock;
xe.XE.FreeMemLock.thr = thr;
xe.XE.FreeMemLock.lock = mk_LockP_from_LockN(lk);
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_FreeMemLock, 0, NULL, &xe );
}
void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
tl_assert( HG_(is_sane_LockN)(lk) );
init_XError(&xe);
xe.tag = XE_UnlockUnlocked;
xe.XE.UnlockUnlocked.thr = thr;
xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk);
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_UnlockUnlocked, 0, NULL, &xe );
}
void HG_(record_error_UnlockForeign) ( Thread* thr,
Thread* owner, Lock* lk )
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
tl_assert( HG_(is_sane_Thread)(owner) );
tl_assert( HG_(is_sane_LockN)(lk) );
init_XError(&xe);
xe.tag = XE_UnlockForeign;
xe.XE.UnlockForeign.thr = thr;
xe.XE.UnlockForeign.owner = owner;
xe.XE.UnlockForeign.lock = mk_LockP_from_LockN(lk);
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_UnlockForeign, 0, NULL, &xe );
}
void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
init_XError(&xe);
xe.tag = XE_UnlockBogus;
xe.XE.UnlockBogus.thr = thr;
xe.XE.UnlockBogus.lock_ga = lock_ga;
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_UnlockBogus, 0, NULL, &xe );
}
void HG_(record_error_LockOrder)(
Thread* thr, Addr before_ga, Addr after_ga,
ExeContext* before_ec, ExeContext* after_ec
)
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
if (!HG_(clo_track_lockorders))
return;
init_XError(&xe);
xe.tag = XE_LockOrder;
xe.XE.LockOrder.thr = thr;
xe.XE.LockOrder.before_ga = before_ga;
xe.XE.LockOrder.before_ec = before_ec;
xe.XE.LockOrder.after_ga = after_ga;
xe.XE.LockOrder.after_ec = after_ec;
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_LockOrder, 0, NULL, &xe );
}
void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname,
Word err, HChar* errstr )
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
tl_assert(fnname);
tl_assert(errstr);
init_XError(&xe);
xe.tag = XE_PthAPIerror;
xe.XE.PthAPIerror.thr = thr;
xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
xe.XE.PthAPIerror.err = err;
xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_PthAPIerror, 0, NULL, &xe );
}
void HG_(record_error_Misc) ( Thread* thr, HChar* errstr )
{
XError xe;
tl_assert( HG_(is_sane_Thread)(thr) );
tl_assert(errstr);
init_XError(&xe);
xe.tag = XE_Misc;
xe.XE.Misc.thr = thr;
xe.XE.Misc.errstr = string_table_strdup(errstr);
// FIXME: tid vs thr
tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
tl_assert( thr->coretid != VG_INVALID_THREADID );
VG_(maybe_record_error)( thr->coretid,
XE_Misc, 0, NULL, &xe );
}
Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
{
XError *xe1, *xe2;
tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
xe1 = (XError*)VG_(get_error_extra)(e1);
xe2 = (XError*)VG_(get_error_extra)(e2);
tl_assert(xe1);
tl_assert(xe2);
switch (VG_(get_error_kind)(e1)) {
case XE_Race:
return xe1->XE.Race.szB == xe2->XE.Race.szB
&& xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
&& (HG_(clo_cmp_race_err_addrs)
? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
: True);
case XE_FreeMemLock:
return xe1->XE.FreeMemLock.thr == xe2->XE.FreeMemLock.thr
&& xe1->XE.FreeMemLock.lock == xe2->XE.FreeMemLock.lock;
case XE_UnlockUnlocked:
return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
&& xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
case XE_UnlockForeign:
return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
&& xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
&& xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
case XE_UnlockBogus:
return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
&& xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
case XE_PthAPIerror:
return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
&& 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
xe2->XE.PthAPIerror.fnname)
&& xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
case XE_LockOrder:
return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
case XE_Misc:
return xe1->XE.Misc.thr == xe2->XE.Misc.thr
&& 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
default:
tl_assert(0);
}
/*NOTREACHED*/
tl_assert(0);
}
/* Announce (that is, print the point-of-creation) of 'thr'. Only do
this once, as we only want to see these announcements once per
thread. */
static void announce_one_thread ( Thread* thr )
{
tl_assert(HG_(is_sane_Thread)(thr));
tl_assert(thr->errmsg_index >= 1);
if (!thr->announced) {
if (thr->errmsg_index == 1) {
tl_assert(thr->created_at == NULL);
VG_(message)(Vg_UserMsg, "Thread #%d is the program's root thread",
thr->errmsg_index);
} else {
tl_assert(thr->created_at != NULL);
VG_(message)(Vg_UserMsg, "Thread #%d was created",
thr->errmsg_index);
VG_(pp_ExeContext)( thr->created_at );
}
VG_(message)(Vg_UserMsg, "");
thr->announced = True;
}
}
void HG_(pp_Error) ( Error* err )
{
XError *xe = (XError*)VG_(get_error_extra)(err);
switch (VG_(get_error_kind)(err)) {
case XE_Misc: {
tl_assert(xe);
tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
announce_one_thread( xe->XE.Misc.thr );
VG_(message)(Vg_UserMsg,
"Thread #%d: %s",
(Int)xe->XE.Misc.thr->errmsg_index,
xe->XE.Misc.errstr);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
break;
}
case XE_LockOrder: {
tl_assert(xe);
tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
announce_one_thread( xe->XE.LockOrder.thr );
VG_(message)(Vg_UserMsg,
"Thread #%d: lock order \"%p before %p\" violated",
(Int)xe->XE.LockOrder.thr->errmsg_index,
(void*)xe->XE.LockOrder.before_ga,
(void*)xe->XE.LockOrder.after_ga);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
VG_(message)(Vg_UserMsg,
" Required order was established by acquisition of lock at %p",
(void*)xe->XE.LockOrder.before_ga);
VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
VG_(message)(Vg_UserMsg,
" followed by a later acquisition of lock at %p",
(void*)xe->XE.LockOrder.after_ga);
VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
}
break;
}
case XE_PthAPIerror: {
tl_assert(xe);
tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
announce_one_thread( xe->XE.PthAPIerror.thr );
VG_(message)(Vg_UserMsg,
"Thread #%d's call to %s failed",
(Int)xe->XE.PthAPIerror.thr->errmsg_index,
xe->XE.PthAPIerror.fnname);
VG_(message)(Vg_UserMsg,
" with error code %ld (%s)",
xe->XE.PthAPIerror.err,
xe->XE.PthAPIerror.errstr);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
break;
}
case XE_UnlockBogus: {
tl_assert(xe);
tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
announce_one_thread( xe->XE.UnlockBogus.thr );
VG_(message)(Vg_UserMsg,
"Thread #%d unlocked an invalid lock at %p ",
(Int)xe->XE.UnlockBogus.thr->errmsg_index,
(void*)xe->XE.UnlockBogus.lock_ga);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
break;
}
case XE_UnlockForeign: {
tl_assert(xe);
tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
announce_one_thread( xe->XE.UnlockForeign.thr );
announce_one_thread( xe->XE.UnlockForeign.owner );
VG_(message)(Vg_UserMsg,
"Thread #%d unlocked lock at %p "
"currently held by thread #%d",
(Int)xe->XE.UnlockForeign.thr->errmsg_index,
(void*)xe->XE.UnlockForeign.lock->guestaddr,
(Int)xe->XE.UnlockForeign.owner->errmsg_index );
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (xe->XE.UnlockForeign.lock->appeared_at) {
VG_(message)(Vg_UserMsg,
" Lock at %p was first observed",
(void*)xe->XE.UnlockForeign.lock->guestaddr);
VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
}
break;
}
case XE_UnlockUnlocked: {
tl_assert(xe);
tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
announce_one_thread( xe->XE.UnlockUnlocked.thr );
VG_(message)(Vg_UserMsg,
"Thread #%d unlocked a not-locked lock at %p ",
(Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
(void*)xe->XE.UnlockUnlocked.lock->guestaddr);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (xe->XE.UnlockUnlocked.lock->appeared_at) {
VG_(message)(Vg_UserMsg,
" Lock at %p was first observed",
(void*)xe->XE.UnlockUnlocked.lock->guestaddr);
VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
}
break;
}
case XE_FreeMemLock: {
tl_assert(xe);
tl_assert( HG_(is_sane_LockP)( xe->XE.FreeMemLock.lock ) );
tl_assert( HG_(is_sane_Thread)( xe->XE.FreeMemLock.thr ) );
announce_one_thread( xe->XE.FreeMemLock.thr );
VG_(message)(Vg_UserMsg,
"Thread #%d deallocated location %p "
"containing a locked lock",
(Int)xe->XE.FreeMemLock.thr->errmsg_index,
(void*)xe->XE.FreeMemLock.lock->guestaddr);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (xe->XE.FreeMemLock.lock->appeared_at) {
VG_(message)(Vg_UserMsg,
" Lock at %p was first observed",
(void*)xe->XE.FreeMemLock.lock->guestaddr);
VG_(pp_ExeContext)( xe->XE.FreeMemLock.lock->appeared_at );
}
break;
}
case XE_Race: {
Addr err_ga;
HChar* what;
Int szB;
what = xe->XE.Race.isWrite ? "write" : "read";
szB = xe->XE.Race.szB;
err_ga = VG_(get_error_address)(err);
announce_one_thread( xe->XE.Race.thr );
if (xe->XE.Race.mb_confaccthr)
announce_one_thread( xe->XE.Race.mb_confaccthr );
VG_(message)(Vg_UserMsg,
"Possible data race during %s of size %d at %#lx by thread #%d",
what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index
);
VG_(pp_ExeContext)( VG_(get_error_where)(err) );
if (xe->XE.Race.mb_confacc) {
if (xe->XE.Race.mb_confaccthr) {
VG_(message)(Vg_UserMsg,
" This conflicts with a previous access by thread #%d",
xe->XE.Race.mb_confaccthr->errmsg_index
);
} else {
VG_(message)(Vg_UserMsg,
" This conflicts with a previous access"
);
}
VG_(pp_ExeContext)( xe->XE.Race.mb_confacc );
}
/* If we have a better description of the address, show it. */
if (xe->XE.Race.descr1[0] != 0)
VG_(message)(Vg_UserMsg, " %s", &xe->XE.Race.descr1[0]);
if (xe->XE.Race.descr2[0] != 0)
VG_(message)(Vg_UserMsg, " %s", &xe->XE.Race.descr2[0]);
break; /* case XE_Race */
} /* case XE_Race */
default:
tl_assert(0);
} /* switch (VG_(get_error_kind)(err)) */
}
Char* HG_(get_error_name) ( Error* err )
{
switch (VG_(get_error_kind)(err)) {
case XE_Race: return "Race";
case XE_FreeMemLock: return "FreeMemLock";
case XE_UnlockUnlocked: return "UnlockUnlocked";
case XE_UnlockForeign: return "UnlockForeign";
case XE_UnlockBogus: return "UnlockBogus";
case XE_PthAPIerror: return "PthAPIerror";
case XE_LockOrder: return "LockOrder";
case XE_Misc: return "Misc";
default: tl_assert(0); /* fill in missing case */
}
}
Bool HG_(recognised_suppression) ( Char* name, Supp *su )
{
# define TRY(_name,_xskind) \
if (0 == VG_(strcmp)(name, (_name))) { \
VG_(set_supp_kind)(su, (_xskind)); \
return True; \
}
TRY("Race", XS_Race);
TRY("FreeMemLock", XS_FreeMemLock);
TRY("UnlockUnlocked", XS_UnlockUnlocked);
TRY("UnlockForeign", XS_UnlockForeign);
TRY("UnlockBogus", XS_UnlockBogus);
TRY("PthAPIerror", XS_PthAPIerror);
TRY("LockOrder", XS_LockOrder);
TRY("Misc", XS_Misc);
return False;
# undef TRY
}
Bool HG_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
Supp* su )
{
/* do nothing -- no extra suppression info present. Return True to
indicate nothing bad happened. */
return True;
}
Bool HG_(error_matches_suppression) ( Error* err, Supp* su )
{
switch (VG_(get_supp_kind)(su)) {
case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
case XS_FreeMemLock: return VG_(get_error_kind)(err) == XE_FreeMemLock;
case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
//case XS_: return VG_(get_error_kind)(err) == XE_;
default: tl_assert(0); /* fill in missing cases */
}
}
void HG_(print_extra_suppression_info) ( Error* err )
{
/* Do nothing */
}
/*--------------------------------------------------------------------*/
/*--- end hg_errors.c ---*/
/*--------------------------------------------------------------------*/

73
helgrind/hg_errors.h Normal file
View File

@@ -0,0 +1,73 @@
/*--------------------------------------------------------------------*/
/*--- Error management for Helgrind. ---*/
/*--- hg_errors.h ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Helgrind, a Valgrind tool for detecting errors
in threaded programs.
Copyright (C) 2007-2008 OpenWorks Ltd
info@open-works.co.uk
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#ifndef __HG_ERRORS_H
#define __HG_ERRORS_H
/* The standard bundle of error management functions that we are
required to present to the core/tool interface at startup. */
Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 );
void HG_(pp_Error) ( Error* err );
UInt HG_(update_extra) ( Error* err );
Bool HG_(recognised_suppression) ( Char* name, Supp *su );
Bool HG_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
Supp* su );
Bool HG_(error_matches_suppression) ( Error* err, Supp* su );
Char* HG_(get_error_name) ( Error* err );
void HG_(print_extra_suppression_info) ( Error* err );
/* Functions for recording various kinds of errors. */
void HG_(record_error_Race) ( Thread* thr,
Addr data_addr, Bool isWrite, Int szB,
ExeContext* mb_lastlock,
ExeContext* mb_confacc,
Thread* mb_confaccthr );
void HG_(record_error_FreeMemLock) ( Thread* thr, Lock* lk );
void HG_(record_error_UnlockUnlocked) ( Thread*, Lock* );
void HG_(record_error_UnlockForeign) ( Thread*, Thread*, Lock* );
void HG_(record_error_UnlockBogus) ( Thread*, Addr );
void HG_(record_error_PthAPIerror) ( Thread*, HChar*, Word, HChar* );
void HG_(record_error_LockOrder) ( Thread*, Addr, Addr,
ExeContext*, ExeContext* );
void HG_(record_error_Misc) ( Thread*, HChar* );
/* Statistics pertaining to error management. */
extern ULong HG_(stats__LockN_to_P_queries);
extern ULong HG_(stats__LockN_to_P_get_map_size) ( void );
extern ULong HG_(stats__string_table_queries);
extern ULong HG_(stats__string_table_get_map_size) ( void );
#endif /* ! __HG_ERRORS_H */
/*--------------------------------------------------------------------*/
/*--- end hg_errors.h ---*/
/*--------------------------------------------------------------------*/

View File

@@ -534,9 +534,10 @@ PTH_FUNC(int, pthreadZumutexZuunlock, // pthread_mutex_unlock
/* Handled: pthread_cond_wait pthread_cond_timedwait
pthread_cond_signal pthread_cond_broadcast
pthread_cond_destroy
Unhandled: pthread_cond_init pthread_cond_destroy
-- are these important?
Unhandled: pthread_cond_init
-- is this important?
*/
// pthread_cond_wait
@@ -719,6 +720,73 @@ PTH_FUNC(int, pthreadZucondZubroadcastZAZa, // pthread_cond_broadcast@*
}
// pthread_cond_destroy
PTH_FUNC(int, pthreadZucondZudestroyZAZa, // pthread_cond_destroy@*
pthread_cond_t* cond)
{
int ret;
OrigFn fn;
VALGRIND_GET_ORIG_FN(fn);
if (TRACE_PTH_FNS) {
fprintf(stderr, "<< pthread_cond_destroy %p", cond);
fflush(stderr);
}
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE,
pthread_cond_t*,cond);
CALL_FN_W_W(ret, fn, cond);
if (ret != 0) {
DO_PthAPIerror( "pthread_cond_destroy", ret );
}
if (TRACE_PTH_FNS) {
fprintf(stderr, " codestr -> %d >>\n", ret);
}
return ret;
}
/*----------------------------------------------------------------*/
/*--- pthread_barrier_t functions ---*/
/*----------------------------------------------------------------*/
PTH_FUNC(int, pthreadZubarrierZuwait, // pthread_barrier_wait.
pthread_barrier_t* b)
{
int ret;
OrigFn fn;
VALGRIND_GET_ORIG_FN(fn);
if (TRACE_PTH_FNS) {
fprintf(stderr, "<< pthread_barrier_wait %p", b);
fflush(stderr);
}
// We blocked, signal.
DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE,
void*,b);
CALL_FN_W_W(ret, fn, b);
// FIXME: handle ret
// We unblocked, finish wait.
DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,
void *, b, void *, b);
if (TRACE_PTH_FNS) {
fprintf(stderr, " pthread_barrier_wait -> %d >>\n", ret);
}
return ret;
}
/*----------------------------------------------------------------*/
/*--- pthread_rwlock_t functions ---*/
/*----------------------------------------------------------------*/

123
helgrind/hg_lock_n_thread.c Normal file
View File

@@ -0,0 +1,123 @@
/*--------------------------------------------------------------------*/
/*--- Definitions for Locks and Threads. ---*/
/*--- hg_lock_n_thread.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Helgrind, a Valgrind tool for detecting errors
in threaded programs.
Copyright (C) 2007-2008 OpenWorks Ltd
info@open-works.co.uk
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#include "pub_tool_basics.h"
#include "pub_tool_libcbase.h"
#include "pub_tool_libcassert.h"
#include "pub_tool_execontext.h"
#include "pub_tool_threadstate.h"
#include "pub_tool_wordfm.h"
#include "hg_basics.h"
#include "hg_wordset.h"
#include "hg_lock_n_thread.h" /* self */
/*----------------------------------------------------------------*/
/*--- Sanity checking ---*/
/*----------------------------------------------------------------*/
inline Bool HG_(is_sane_Thread) ( Thread* thr ) {
return thr != NULL && thr->magic == Thread_MAGIC;
}
static Bool is_sane_Bag_of_Threads ( WordBag* bag )
{
Thread* thr;
Word count;
VG_(initIterBag)( bag );
while (VG_(nextIterBag)( bag, (Word*)&thr, &count )) {
if (count < 1) return False;
if (!HG_(is_sane_Thread)(thr)) return False;
}
VG_(doneIterBag)( bag );
return True;
}
static Bool is_sane_Lock_BASE ( Lock* lock )
{
if (lock == NULL
|| (lock->magic != LockN_MAGIC && lock->magic != LockP_MAGIC))
return False;
switch (lock->kind) {
case LK_mbRec: case LK_nonRec: case LK_rdwr: break;
default: return False;
}
if (lock->heldBy == NULL) {
if (lock->acquired_at != NULL) return False;
/* Unheld. We arbitrarily require heldW to be False. */
return !lock->heldW;
} else {
if (lock->acquired_at == NULL) return False;
}
/* If heldBy is non-NULL, we require it to contain at least one
thread. */
if (VG_(isEmptyBag)(lock->heldBy))
return False;
/* Lock is either r- or w-held. */
if (!is_sane_Bag_of_Threads(lock->heldBy))
return False;
if (lock->heldW) {
/* Held in write-mode */
if ((lock->kind == LK_nonRec || lock->kind == LK_rdwr)
&& !VG_(isSingletonTotalBag)(lock->heldBy))
return False;
} else {
/* Held in read-mode */
if (lock->kind != LK_rdwr) return False;
}
return True;
}
Bool HG_(is_sane_LockP) ( Lock* lock ) {
return lock != NULL
&& lock->magic == LockP_MAGIC
&& lock->hbso == NULL
&& is_sane_Lock_BASE(lock);
}
Bool HG_(is_sane_LockN) ( Lock* lock ) {
return lock != NULL
&& lock->magic == LockN_MAGIC
&& lock->hbso != NULL
&& is_sane_Lock_BASE(lock);
}
Bool HG_(is_sane_LockNorP) ( Lock* lock ) {
return is_sane_Lock_BASE(lock);
}
/*--------------------------------------------------------------------*/
/*--- end hg_lock_n_thread.c ---*/
/*--------------------------------------------------------------------*/

165
helgrind/hg_lock_n_thread.h Normal file
View File

@@ -0,0 +1,165 @@
/*--------------------------------------------------------------------*/
/*--- Definitions for Locks and Threads. ---*/
/*--- hg_lock_n_thread.h ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of Helgrind, a Valgrind tool for detecting errors
in threaded programs.
Copyright (C) 2007-2008 OpenWorks Ltd
info@open-works.co.uk
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#ifndef __HG_LOCK_N_THREAD_H
#define __HG_LOCK_N_THREAD_H
/*----------------------------------------------------------------*/
/*--- Primary data definitions ---*/
/*----------------------------------------------------------------*/
/* Magic numbers, for doing assertions that structures really are of
the right type. Useful as some of the code can get a bit
complex. */
#define Thread_MAGIC 0x504fc5e5
#define LockN_MAGIC 0x6545b557 /* normal nonpersistent locks */
#define LockP_MAGIC 0x755b5456 /* persistent (copied) locks */
/* These are handles for Word sets. CONSTRAINTS: must be (very) small
ints numbered from zero, since < 30-bit versions of them are used to
encode thread-sets and lock-sets in 32-bit shadow words. */
typedef WordSet WordSetID;
/* Synchronisation Objects, exported abstractly by libhb. */
typedef struct _SO SO;
/* Thr, libhb's private thread record, exported abstractly */
typedef struct _Thr Thr;
/* Stores information about a thread. Addresses of these also serve
as unique thread identifiers and so are never freed, so they should
be as small as possible. Freeing Thread structures makes the
storage management just too complex, and most programs don't create
many threads, so tolerating this leak seems like a not-bad
tradeoff.
Since these are never freed, the .coretid field only indicates the
core's ThreadId associated with this Thread whilst it is alive.
Once the thread finishes, the ThreadId is set to
VG_INVALID_THREADID.
The core may later re-use the same ThreadId for what is a logically
completely different thread, which of course must have a different
Thread structure. */
typedef
struct _Thread {
/* ADMIN */
struct _Thread* admin;
UInt magic;
Thr* hbthr;
ThreadId coretid;
/* USEFUL */
WordSetID locksetA; /* WordSet of Lock* currently held by thread */
WordSetID locksetW; /* subset of locksetA held in w-mode */
/* EXPOSITION */
/* Place where parent was when this thread was created. */
ExeContext* created_at;
Bool announced;
/* Index for generating references in error messages. */
Int errmsg_index;
}
Thread;
/* Stores information about a lock's current state. These are
allocated and later freed (when the containing memory becomes
NoAccess). This gives a problem for the XError type, which
contains Lock*s. Solution is to copy any Lock which is to be
incorporated into an XErrors, so as to make it independent from the
'normal' collection of Locks, which can come and go. When the lock
is copied, its .magic is changed from LockN_Magic to
LockP_Magic. */
/* Lock kinds. */
typedef
enum {
LK_mbRec=1001, /* normal mutex, possibly recursive */
LK_nonRec, /* normal mutex, definitely non recursive */
LK_rdwr /* reader-writer lock */
}
LockKind;
typedef
struct _Lock {
/* ADMIN */
struct _Lock* admin;
ULong unique; /* used for persistence-hashing */
UInt magic; /* LockN_MAGIC or LockP_MAGIC */
/* EXPOSITION */
/* Place where lock first came to the attention of Helgrind. */
ExeContext* appeared_at;
/* If the lock is held, place where the lock most recently made
an unlocked->locked transition. Must be sync'd with .heldBy:
either both NULL or both non-NULL. */
ExeContext* acquired_at;
/* USEFUL-STATIC */
SO* hbso; /* associated SO */
Addr guestaddr; /* Guest address of lock */
LockKind kind; /* what kind of lock this is */
/* USEFUL-DYNAMIC */
Bool heldW;
WordBag* heldBy; /* bag of threads that hold this lock */
/* .heldBy is NULL: lock is unheld, and .heldW is meaningless
but arbitrarily set to False
.heldBy is non-NULL:
.heldW is True: lock is w-held by threads in heldBy
.heldW is False: lock is r-held by threads in heldBy
Either way, heldBy may not validly be an empty Bag.
for LK_nonRec, r-holdings are not allowed, and w-holdings may
only have sizeTotal(heldBy) == 1
for LK_mbRec, r-holdings are not allowed, and w-holdings may
only have sizeUnique(heldBy) == 1
for LK_rdwr, w-holdings may only have sizeTotal(heldBy) == 1 */
}
Lock;
/*----------------------------------------------------------------*/
/*--- Sanity checking ---*/
/*----------------------------------------------------------------*/
Bool HG_(is_sane_Thread) ( Thread* thr );
Bool HG_(is_sane_LockP) ( Lock* lock );
Bool HG_(is_sane_LockN) ( Lock* lock );
Bool HG_(is_sane_LockNorP) ( Lock* lock );
#endif /* ! __HG_LOCK_N_THREAD_H */
/*--------------------------------------------------------------------*/
/*--- end hg_lock_n_thread.h ---*/
/*--------------------------------------------------------------------*/

File diff suppressed because it is too large Load Diff

View File

@@ -38,10 +38,11 @@
#include "pub_tool_libcassert.h"
#include "pub_tool_libcbase.h"
#include "pub_tool_libcprint.h"
#include "pub_tool_threadstate.h"
#include "pub_tool_wordfm.h"
#define HG_(str) VGAPPEND(vgHelgrind_,str)
#include "hg_wordset.h"
#include "hg_basics.h"
#include "hg_wordset.h" /* self */
//------------------------------------------------------------------//
//--- Word Cache ---//
@@ -140,7 +141,8 @@ typedef
corresponding ix2vec entry number. The two mappings are mutually
redundant. */
struct _WordSetU {
void* (*alloc)(HChar*, SizeT);
void* (*alloc)(HChar*,SizeT);
HChar* cc;
void (*dealloc)(void*);
WordFM* vec2ix; /* WordVec-to-WordSet mapping tree */
WordVec** ix2vec; /* WordSet-to-WordVec mapping array */
@@ -176,12 +178,12 @@ static WordVec* new_WV_of_size ( WordSetU* wsu, UWord sz )
{
WordVec* wv;
tl_assert(sz >= 0);
wv = wsu->alloc( "hg", sizeof(WordVec) );
wv = wsu->alloc( wsu->cc, sizeof(WordVec) );
wv->owner = wsu;
wv->words = NULL;
wv->size = sz;
if (sz > 0) {
wv->words = wsu->alloc( "hg", (SizeT)sz * sizeof(UWord) );
wv->words = wsu->alloc( wsu->cc, (SizeT)sz * sizeof(UWord) );
}
return wv;
}
@@ -238,7 +240,7 @@ static void ensure_ix2vec_space ( WordSetU* wsu )
return;
new_sz = 2 * wsu->ix2vec_size;
if (new_sz == 0) new_sz = 2;
new_vec = wsu->alloc( "hg", new_sz * sizeof(WordVec*) );
new_vec = wsu->alloc( wsu->cc, new_sz * sizeof(WordVec*) );
tl_assert(new_vec);
for (i = 0; i < wsu->ix2vec_size; i++)
new_vec[i] = wsu->ix2vec[i];
@@ -306,17 +308,19 @@ static WordSet add_or_dealloc_WordVec( WordSetU* wsu, WordVec* wv_new )
WordSetU* HG_(newWordSetU) ( void* (*alloc_nofail)( HChar*, SizeT ),
HChar* cc,
void (*dealloc)(void*),
Word cacheSize )
{
WordSetU* wsu;
WordVec* empty;
wsu = alloc_nofail( "hg", sizeof(WordSetU) );
wsu = alloc_nofail( cc, sizeof(WordSetU) );
VG_(memset)( wsu, 0, sizeof(WordSetU) );
wsu->alloc = alloc_nofail;
wsu->cc = cc;
wsu->dealloc = dealloc;
wsu->vec2ix = VG_(newFM)( alloc_nofail, "hg",
wsu->vec2ix = VG_(newFM)( alloc_nofail, cc,
dealloc, cmp_WordVecs_for_FM );
wsu->ix2vec_used = 0;
wsu->ix2vec_size = 0;

View File

@@ -48,6 +48,7 @@ typedef UInt WordSet; /* opaque, small int index */
/* Allocate and initialise a WordSetU */
WordSetU* HG_(newWordSetU) ( void* (*alloc_nofail)( HChar*, SizeT ),
HChar* cc,
void (*dealloc)(void*),
Word cacheSize );

154
helgrind/libhb.h Normal file
View File

@@ -0,0 +1,154 @@
/*--------------------------------------------------------------------*/
/*--- LibHB: a library for implementing and checking ---*/
/*--- the happens-before relationship in concurrent programs. ---*/
/*--- libhb_main.c ---*/
/*--------------------------------------------------------------------*/
/*
This file is part of LibHB, a library for implementing and checking
the happens-before relationship in concurrent programs.
Copyright (C) 2008-2008 OpenWorks Ltd
info@open-works.co.uk
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA.
The GNU General Public License is contained in the file COPYING.
*/
#ifndef __LIBHB_H
#define __LIBHB_H
/* Abstract to user: thread identifiers */
/* typedef struct _Thr Thr; */ /* now in hg_lock_n_thread.h */
/* Abstract to user: synchronisation objects */
/* typedef struct _SO SO; */ /* now in hg_lock_n_thread.h */
/* Abstract to the lib: execution contexts */
/* struct _EC will be defined by user at some point. */
typedef struct _EC EC;
/* Initialise library; returns Thr* for root thread. 'shadow_alloc'
should never return NULL, instead it should simply not return if
they encounter an out-of-memory condition. */
Thr* libhb_init (
void (*get_stacktrace)( Thr*, Addr*, UWord ),
struct _EC* (*stacktrace_to_EC)( Addr*, UWord ),
struct _EC* (*get_EC)( Thr* )
);
/* Shut down the library, and print stats (in fact that's _all_
this is for.) */
void libhb_shutdown ( Bool show_stats );
/* Thread creation: returns Thr* for new thread */
Thr* libhb_create ( Thr* parent );
/* Thread async exit */
void libhb_async_exit ( Thr* exitter );
/* Synchronisation objects (abstract to caller) */
/* Allocate a new one (alloc'd by library) */
SO* libhb_so_alloc ( void );
/* Dealloc one */
void libhb_so_dealloc ( SO* so );
/* Send a message via a sync object. If strong_send is true, the
resulting inter-thread dependency seen by a future receiver of this
message will be a dependency on this thread only. That is, in a
strong send, the VC inside the SO is replaced by the clock of the
sending thread. For a weak send, the sender's VC is joined into
that already in the SO, if any. This subtlety is needed to model
rwlocks: a strong send corresponds to releasing a rwlock that had
been w-held (or releasing a standard mutex). A weak send
corresponds to releasing a rwlock that has been r-held.
(rationale): Since in general many threads may hold a rwlock in
r-mode, a weak send facility is necessary in order that the final
SO reflects the join of the VCs of all the threads releasing the
rwlock, rather than merely holding the VC of the most recent thread
to release it. */
void libhb_so_send ( Thr* thr, SO* so, Bool strong_send );
/* Recv a message from a sync object. If strong_recv is True, the
resulting inter-thread dependency is considered adequate to induce
a h-b ordering on both reads and writes. If it is False, the
implied h-b ordering exists only for reads, not writes. This is
subtlety is required in order to support reader-writer locks: a
thread doing a write-acquire of a rwlock (or acquiring a normal
mutex) models this by doing a strong receive. A thread doing a
read-acquire of a rwlock models this by doing a !strong_recv. */
void libhb_so_recv ( Thr* thr, SO* so, Bool strong_recv );
/* Has this SO ever been sent on? */
Bool libhb_so_everSent ( SO* so );
/* Memory accesses (1/2/4/8 byte size). They report a race if one is
found. */
#define LIBHB_WRITE_1(_thr,_a) zsm_apply8___msm_write((_thr),(_a))
#define LIBHB_WRITE_2(_thr,_a) zsm_apply16___msm_write((_thr),(_a))
#define LIBHB_WRITE_4(_thr,_a) zsm_apply32___msm_write((_thr),(_a))
#define LIBHB_WRITE_8(_thr,_a) zsm_apply64___msm_write((_thr),(_a))
#define LIBHB_WRITE_N(_thr,_a,_n) zsm_apply_range___msm_read((_thr),(_a),(_n))
#define LIBHB_READ_1(_thr,_a) zsm_apply8___msm_read((_thr),(_a))
#define LIBHB_READ_2(_thr,_a) zsm_apply16___msm_read((_thr),(_a))
#define LIBHB_READ_4(_thr,_a) zsm_apply32___msm_read((_thr),(_a))
#define LIBHB_READ_8(_thr,_a) zsm_apply64___msm_read((_thr),(_a))
#define LIBHB_READ_N(_thr,_a,_n) zsm_apply_range___msm_read((_thr),(_a),(_n))
void zsm_apply8___msm_write ( Thr* thr, Addr a );
void zsm_apply16___msm_write ( Thr* thr, Addr a );
void zsm_apply32___msm_write ( Thr* thr, Addr a );
void zsm_apply64___msm_write ( Thr* thr, Addr a );
void zsm_apply_range___msm_write ( Thr* thr,
Addr a, SizeT len );
void zsm_apply8___msm_read ( Thr* thr, Addr a );
void zsm_apply16___msm_read ( Thr* thr, Addr a );
void zsm_apply32___msm_read ( Thr* thr, Addr a );
void zsm_apply64___msm_read ( Thr* thr, Addr a );
void zsm_apply_range___msm_read ( Thr* thr,
Addr a, SizeT len );
/* Set memory address ranges to new (freshly allocated), or noaccess
(no longer accessible). */
void libhb_range_new ( Thr*, Addr, SizeT );
void libhb_range_noaccess ( Thr*, Addr, SizeT );
/* For the convenience of callers, we offer to store one void* item in
a Thr, which we ignore, but the caller can get or set any time. */
void* libhb_get_Thr_opaque ( Thr* );
void libhb_set_Thr_opaque ( Thr*, void* );
/* Low level copy of shadow state from [src,src+len) to [dst,dst+len).
Overlapping moves are checked for and asserted against. */
void libhb_copy_shadow_state ( Addr src, Addr dst, SizeT len );
/* Call this periodically to give libhb the opportunity to
garbage-collect its internal data structures. */
void libhb_maybe_GC ( void );
#endif /* __LIBHB_H */
/*--------------------------------------------------------------------*/
/*--- end libhb.h ---*/
/*--------------------------------------------------------------------*/

4562
helgrind/libhb_core.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -104,6 +104,8 @@ static inline Bool VG_(is_plausible_ECU)( UInt ecu ) {
return (ecu > 0) && ((ecu & 3) == 0);
}
// Make an ExeContext containing exactly the specified stack frames.
ExeContext* VG_(make_ExeContext_from_StackTrace)( Addr* ips, UInt n_ips );
#endif // __PUB_TOOL_EXECONTEXT_H