Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
next array
  • Loading branch information
VSadov committed Nov 9, 2021
commit 5330325530c4147816b8cad81eb83e87e28664c6
24 changes: 12 additions & 12 deletions src/coreclr/vm/clsload.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1140,18 +1140,18 @@ TypeHandle ClassLoader::LookupTypeHandleForTypeKey(TypeKey *pKey)
// Make an initial lookup without taking any locks.
TypeHandle th = LookupTypeHandleForTypeKeyInner(pKey, FALSE);

// A non-null TypeHandle for the above lookup indicates success
// A null TypeHandle only indicates "well, it might have been there,
// try again with a lock". This kind of negative result will
// only happen while accessing the underlying EETypeHashTable
// during a resize, i.e. very rarely. In such a case, we just
// perform the lookup again, but indicate that appropriate locks
// should be taken.

if (th.IsNull())
{
th = LookupTypeHandleForTypeKeyInner(pKey, TRUE);
}
//// A non-null TypeHandle for the above lookup indicates success
//// A null TypeHandle only indicates "well, it might have been there,
//// try again with a lock". This kind of negative result will
//// only happen while accessing the underlying EETypeHashTable
//// during a resize, i.e. very rarely. In such a case, we just
//// perform the lookup again, but indicate that appropriate locks
//// should be taken.

//if (th.IsNull())
//{
// th = LookupTypeHandleForTypeKeyInner(pKey, TRUE);
//}

return th;
}
Expand Down
3 changes: 3 additions & 0 deletions src/coreclr/vm/dacenumerablehash.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ class DacEnumerableHashTable
TADDR m_pEntry; // The entry the caller is currently looking at (or NULL to begin
// with). This is a VolatileEntry* and should always be a target address
// not a DAC PTR_.
TADDR m_curTable;
};

// This opaque structure provides enumeration context when walking all entries in the table. Initialized
Expand Down Expand Up @@ -187,6 +188,8 @@ class DacEnumerableHashTable
DacEnumerableHashValue m_iHashValue; // The hash value associated with the entry
};

DPTR(VALUE) BaseFindFirstEntryByHashCore(PTR_VolatileEntry* curBuckets, DacEnumerableHashValue iHash, LookupContext* pContext);

#ifndef DACCESS_COMPILE
// Determine loader heap to be used for allocation of entries and bucket lists.
LoaderHeap *GetHeap();
Expand Down
77 changes: 55 additions & 22 deletions src/coreclr/vm/dacenumerablehash.inl
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,10 @@ void DacEnumerableHashTable<DAC_ENUM_HASH_ARGS>::GrowTable()
return;

((size_t*)pNewBuckets)[0] = cNewBuckets;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it make sense to define constants or something for the fake 0 and 1 bucket indices to make this easier to understand?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think making "Length" and "Next" helpers that take an array could make this more clear.

((PTR_VolatileEntry**)curBuckets)[1] = pNewBuckets;

// the new buckets must be published before we start moving entries.
MemoryBarrier();

// All buckets are initially empty.
// Note: Memory allocated on loader heap is zero filled
Expand Down Expand Up @@ -215,8 +219,7 @@ void DacEnumerableHashTable<DAC_ENUM_HASH_ARGS>::GrowTable()
}

// Make sure that all writes are visible before publishing the new array.
MemoryBarrier();
m_pBuckets = pNewBuckets;
VolatileStore(&m_pBuckets, pNewBuckets);
}

// Returns the next prime larger (or equal to) than the number given.
Expand Down Expand Up @@ -264,36 +267,57 @@ DPTR(VALUE) DacEnumerableHashTable<DAC_ENUM_HASH_ARGS>::BaseFindFirstEntryByHash
if (m_cEntries == 0)
return NULL;

auto curBuckets = GetBuckets();
DWORD cBuckets = (DWORD)dac_cast<size_t>(curBuckets[0]);

// Compute which bucket the entry belongs in based on the hash.
DWORD dwBucket = iHash % cBuckets;
PTR_VolatileEntry* curBuckets = GetBuckets();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not think that this works with DAC. The cast from DPTR(PTR_VolatileEntry) to PTR_VolatileEntry* will only marshal the first array entry. This needs to pass DPTR(PTR_VolatileEntry) all the way through to the places we index into it.

return BaseFindFirstEntryByHashCore(curBuckets, iHash, pContext);
}

// Point at the first entry in the bucket chain which would contain any entries with the given hash code.
PTR_VolatileEntry pEntry = curBuckets[dwBucket + 2];
template <DAC_ENUM_HASH_PARAMS>
DPTR(VALUE) DacEnumerableHashTable<DAC_ENUM_HASH_ARGS>::BaseFindFirstEntryByHashCore(PTR_VolatileEntry* curBuckets, DacEnumerableHashValue iHash, LookupContext* pContext)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(pContext));
}
CONTRACTL_END;

// Walk the bucket chain one entry at a time.
while (pEntry)
do
{
if (pEntry->m_iHashValue == iHash)
DWORD cBuckets = (DWORD)dac_cast<size_t>(curBuckets[0]);

// Compute which bucket the entry belongs in based on the hash.
DWORD dwBucket = iHash % cBuckets;

// Point at the first entry in the bucket chain which would contain any entries with the given hash code.
PTR_VolatileEntry pEntry = curBuckets[dwBucket + 2];

// Walk the bucket chain one entry at a time.
while (pEntry)
{
// We've found our match.
if (pEntry->m_iHashValue == iHash)
{
// We've found our match.

// Record our current search state into the provided context so that a subsequent call to
// BaseFindNextEntryByHash can pick up the search where it left off.
pContext->m_pEntry = dac_cast<TADDR>(pEntry);
// Record our current search state into the provided context so that a subsequent call to
// BaseFindNextEntryByHash can pick up the search where it left off.
pContext->m_pEntry = dac_cast<TADDR>(pEntry);
pContext->m_curTable = dac_cast<TADDR>(curBuckets);

// Return the address of the sub-classes' embedded entry structure.
return VALUE_FROM_VOLATILE_ENTRY(pEntry);
// Return the address of the sub-classes' embedded entry structure.
return VALUE_FROM_VOLATILE_ENTRY(pEntry);
}

// Move to the next entry in the chain.
pEntry = pEntry->m_pNextEntry;
}

// Move to the next entry in the chain.
pEntry = pEntry->m_pNextEntry;
}
curBuckets = (DPTR(PTR_VolatileEntry))dac_cast<TADDR>(curBuckets[1]);
} while (curBuckets != nullptr);

// If we get here then none of the entries in the target bucket matched the hash code and we have a miss
// (for this section of the table at least).
return NULL;
}

Expand Down Expand Up @@ -334,6 +358,15 @@ DPTR(VALUE) DacEnumerableHashTable<DAC_ENUM_HASH_ARGS>::BaseFindNextEntryByHash(
}
}

// check for next buckets must hapen after we looked through current
MemoryBarrier();

auto nextBuckets = ((DPTR(PTR_VolatileEntry)*)pContext->m_curTable)[1];
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@hoyosjs - I assume this is correct? (modulo auto). In this case I want to index on the remote side. Is this the right way to do this?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
auto nextBuckets = ((DPTR(PTR_VolatileEntry)*)pContext->m_curTable)[1];
auto nextBuckets = dac_cast<DPTR(PTR_VolatileEntry)>(pContext->m_curTable)[1];

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I made GetNext a common helper. With that it is more convenient if m_curTable is (DPTR(PTR_VolatileEntry)

if (nextBuckets != nullptr)
{
return BaseFindFirstEntryByHashCore(nextBuckets, iHash, pContext);
}

return NULL;
}

Expand Down