Bug 649576: Extricate JSHashTable from JSAtomList death grip. (r=luke)

This commit is contained in:
Chris Leary
2011-06-24 14:22:30 -07:00
parent e24ac418e6
commit e699ce9e64
25 changed files with 1866 additions and 918 deletions

View File

@@ -641,13 +641,6 @@ js_DumpAtoms(JSContext *cx, FILE *fp)
}
#endif
static JSHashNumber
js_hash_atom_ptr(const void *key)
{
const JSAtom *atom = (const JSAtom *) key;
return ATOM_HASH(atom);
}
#if JS_BITS_PER_WORD == 32
# define TEMP_SIZE_START_LOG2 5
#else
@@ -660,312 +653,31 @@ js_hash_atom_ptr(const void *key)
JS_STATIC_ASSERT(TEMP_SIZE_START >= sizeof(JSHashTable));
static void *
js_alloc_temp_space(void *priv, size_t size)
{
Parser *parser = (Parser *) priv;
void *space;
if (size < TEMP_SIZE_LIMIT) {
int bin = JS_CeilingLog2(size) - TEMP_SIZE_START_LOG2;
JS_ASSERT(unsigned(bin) < NUM_TEMP_FREELISTS);
space = parser->tempFreeList[bin];
if (space) {
parser->tempFreeList[bin] = *(void **)space;
return space;
}
}
JS_ARENA_ALLOCATE(space, &parser->context->tempPool, size);
if (!space)
js_ReportOutOfMemory(parser->context);
return space;
}
static void
js_free_temp_space(void *priv, void *item, size_t size)
{
if (size >= TEMP_SIZE_LIMIT)
return;
Parser *parser = (Parser *) priv;
int bin = JS_CeilingLog2(size) - TEMP_SIZE_START_LOG2;
JS_ASSERT(unsigned(bin) < NUM_TEMP_FREELISTS);
*(void **)item = parser->tempFreeList[bin];
parser->tempFreeList[bin] = item;
}
static JSHashEntry *
js_alloc_temp_entry(void *priv, const void *key)
{
Parser *parser = (Parser *) priv;
JSAtomListElement *ale;
ale = parser->aleFreeList;
if (ale) {
parser->aleFreeList = ALE_NEXT(ale);
return &ale->entry;
}
JS_ARENA_ALLOCATE_TYPE(ale, JSAtomListElement, &parser->context->tempPool);
if (!ale) {
js_ReportOutOfMemory(parser->context);
return NULL;
}
return &ale->entry;
}
static void
js_free_temp_entry(void *priv, JSHashEntry *he, uintN flag)
{
Parser *parser = (Parser *) priv;
JSAtomListElement *ale = (JSAtomListElement *) he;
ALE_SET_NEXT(ale, parser->aleFreeList);
parser->aleFreeList = ale;
}
static JSHashAllocOps temp_alloc_ops = {
js_alloc_temp_space, js_free_temp_space,
js_alloc_temp_entry, js_free_temp_entry
};
JSAtomListElement *
JSAtomList::rawLookup(JSAtom *atom, JSHashEntry **&hep)
{
if (table) {
hep = JS_HashTableRawLookup(table, ATOM_HASH(atom), atom);
return (JSAtomListElement *) *hep;
}
JSHashEntry **alep = &list;
hep = NULL;
JSAtomListElement *ale;
while ((ale = (JSAtomListElement *)*alep) != NULL) {
if (ALE_ATOM(ale) == atom) {
/* Hit, move atom's element to the front of the list. */
*alep = ale->entry.next;
ale->entry.next = list;
list = &ale->entry;
break;
}
alep = &ale->entry.next;
}
return ale;
}
#define ATOM_LIST_HASH_THRESHOLD 12
JSAtomListElement *
JSAtomList::add(Parser *parser, JSAtom *atom, AddHow how)
{
JS_ASSERT(!set);
JSAtomListElement *ale, *ale2, *next;
JSHashEntry **hep;
ale = rawLookup(atom, hep);
if (!ale || how != UNIQUE) {
if (count < ATOM_LIST_HASH_THRESHOLD && !table) {
/* Few enough for linear search and no hash table yet needed. */
ale = (JSAtomListElement *)js_alloc_temp_entry(parser, atom);
if (!ale)
return NULL;
ALE_SET_ATOM(ale, atom);
if (how == HOIST) {
ale->entry.next = NULL;
hep = (JSHashEntry **) &list;
while (*hep)
hep = &(*hep)->next;
*hep = &ale->entry;
} else {
ale->entry.next = list;
list = &ale->entry;
}
} else {
/*
* We should hash, or else we already are hashing, but count was
* reduced by JSAtomList::rawRemove below ATOM_LIST_HASH_THRESHOLD.
* Check whether we should create the table.
*/
if (!table) {
/* No hash table yet, so hep had better be null! */
JS_ASSERT(!hep);
table = JS_NewHashTable(count + 1, js_hash_atom_ptr,
JS_CompareValues, JS_CompareValues,
&temp_alloc_ops, parser);
if (!table)
return NULL;
/*
* Set ht->nentries explicitly, because we are moving entries
* from list to ht, not calling JS_HashTable(Raw|)Add.
*/
table->nentries = count;
/*
* Insert each ale on list into the new hash table. Append to
* the hash chain rather than inserting at the bucket head, to
* preserve order among entries with the same key.
*/
for (ale2 = (JSAtomListElement *)list; ale2; ale2 = next) {
next = ALE_NEXT(ale2);
ale2->entry.keyHash = ATOM_HASH(ALE_ATOM(ale2));
hep = JS_HashTableRawLookup(table, ale2->entry.keyHash,
ale2->entry.key);
while (*hep)
hep = &(*hep)->next;
*hep = &ale2->entry;
ale2->entry.next = NULL;
}
list = NULL;
/* Set hep for insertion of atom's ale, immediately below. */
hep = JS_HashTableRawLookup(table, ATOM_HASH(atom), atom);
}
/* Finally, add an entry for atom into the hash bucket at hep. */
ale = (JSAtomListElement *)
JS_HashTableRawAdd(table, hep, ATOM_HASH(atom), atom, NULL);
if (!ale)
return NULL;
/*
* If hoisting, move ale to the end of its chain after we called
* JS_HashTableRawAdd, since RawAdd may have grown the table and
* then recomputed hep to refer to the pointer to the first entry
* with the given key.
*/
if (how == HOIST && ale->entry.next) {
JS_ASSERT(*hep == &ale->entry);
*hep = ale->entry.next;
ale->entry.next = NULL;
do {
hep = &(*hep)->next;
} while (*hep);
*hep = &ale->entry;
}
}
ALE_SET_INDEX(ale, count++);
}
return ale;
}
void
JSAtomList::rawRemove(Parser *parser, JSAtomListElement *ale, JSHashEntry **hep)
js_InitAtomMap(JSContext *cx, JSAtomMap *map, AtomIndexMap *indices)
{
JS_ASSERT(!set);
JS_ASSERT(count != 0);
if (table) {
JS_ASSERT(hep);
JS_HashTableRawRemove(table, hep, &ale->entry);
} else {
JS_ASSERT(!hep);
hep = &list;
while (*hep != &ale->entry) {
JS_ASSERT(*hep);
hep = &(*hep)->next;
}
*hep = ale->entry.next;
js_free_temp_entry(parser, &ale->entry, HT_FREE_ENTRY);
}
--count;
}
JSAutoAtomList::~JSAutoAtomList()
{
if (table) {
JS_HashTableDestroy(table);
} else {
JSHashEntry *hep = list;
while (hep) {
JSHashEntry *next = hep->next;
js_free_temp_entry(parser, hep, HT_FREE_ENTRY);
hep = next;
}
}
}
JSAtomListElement *
JSAtomListIterator::operator ()()
{
JSAtomListElement *ale;
JSHashTable *ht;
if (index == uint32(-1))
return NULL;
ale = next;
if (!ale) {
ht = list->table;
if (!ht)
goto done;
do {
if (index == JS_BIT(JS_HASH_BITS - ht->shift))
goto done;
next = (JSAtomListElement *) ht->buckets[index++];
} while (!next);
ale = next;
}
next = ALE_NEXT(ale);
return ale;
done:
index = uint32(-1);
return NULL;
}
static intN
js_map_atom(JSHashEntry *he, intN i, void *arg)
{
JSAtomListElement *ale = (JSAtomListElement *)he;
JSAtom **vector = (JSAtom **) arg;
vector[ALE_INDEX(ale)] = ALE_ATOM(ale);
return HT_ENUMERATE_NEXT;
}
#ifdef DEBUG
static jsrefcount js_atom_map_count;
static jsrefcount js_atom_map_hash_table_count;
#endif
void
js_InitAtomMap(JSContext *cx, JSAtomMap *map, JSAtomList *al)
{
JSAtom **vector;
JSAtomListElement *ale;
/* Map length must already be initialized. */
JS_ASSERT(al->count == map->length);
#ifdef DEBUG
JS_ATOMIC_INCREMENT(&js_atom_map_count);
#endif
ale = (JSAtomListElement *)al->list;
if (!ale && !al->table) {
JS_ASSERT(!map->vector);
return;
}
JS_ASSERT(indices->count() == map->length);
vector = map->vector;
if (al->table) {
#ifdef DEBUG
JS_ATOMIC_INCREMENT(&js_atom_map_hash_table_count);
#endif
JS_HashTableEnumerateEntries(al->table, js_map_atom, vector);
if (indices->isMap()) {
typedef AtomIndexMap::WordMap WordMap;
const WordMap &wm = indices->asMap();
for (WordMap::Range r = wm.all(); !r.empty(); r.popFront()) {
JSAtom *atom = r.front().key;
jsatomid index = r.front().value;
JS_ASSERT(index < map->length);
map->vector[index] = atom;
}
} else {
do {
vector[ALE_INDEX(ale)] = ALE_ATOM(ale);
} while ((ale = ALE_NEXT(ale)) != NULL);
for (const AtomIndexMap::InlineElem *it = indices->asInline(), *end = indices->inlineEnd();
it != end; ++it) {
JSAtom *atom = it->key;
if (!atom)
continue;
JS_ASSERT(it->value < map->length);
map->vector[it->value] = atom;
}
}
al->clear();
}
#if JS_HAS_XML_SUPPORT