Skip to content

Commit ac823b9

Browse files
committed
Rename table.shrink() reorder parameter to be more descriptive, handle no-op better
1 parent 1b7bc9f commit ac823b9

4 files changed

Lines changed: 49 additions & 22 deletions

File tree

VM/src/ltable.cpp

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1063,11 +1063,11 @@ void luaH_overrideiterorder(lua_State* L, LuaTable* t, int override)
10631063
}
10641064

10651065
// ServerLua: shrink table to optimal size
1066-
void luaH_shrink(lua_State* L, LuaTable* t, bool reorder)
1066+
void luaH_shrink(lua_State* L, LuaTable* t, bool shrink_sparse)
10671067
{
10681068
// Shrink table to reduce memory usage.
1069-
// If reorder=false (default), preserves iteration order by not moving elements to hash.
1070-
// If reorder=true, may shrink to boundary and move sparse elements to hash.
1069+
// If shrink_sparse=false (default), elements stay in their current part (array or hash).
1070+
// If shrink_sparse=true, sparse array elements beyond boundary may move to hash to save memory.
10711071

10721072
// No-op for empty tables
10731073
if (!t->sizearray && !t->lsizenode)
@@ -1098,8 +1098,8 @@ void luaH_shrink(lua_State* L, LuaTable* t, bool reorder)
10981098
hash_count++;
10991099
}
11001100

1101-
// If reorder allowed and there are sparse elements, try shrinking to boundary
1102-
if (reorder && sparse_count > 0)
1101+
// If shrink_sparse, try shrinking array to boundary and moving sparse elements to hash
1102+
if (shrink_sparse && sparse_count > 0)
11031103
{
11041104
// We're allowed to move things to the hash part of the table, check if it would be cheaper
11051105
// memory-wise to do that than it would be for us to just shrink array and node to their minimum
@@ -1108,20 +1108,33 @@ void luaH_shrink(lua_State* L, LuaTable* t, bool reorder)
11081108
int new_hash_count = hash_count + sparse_count;
11091109
// hash portion resizes in pow2 increments, sometimes putting things in the hash is basically
11101110
// free if we're already using it.
1111-
int reorder_hash_capacity = 1 << ceillog2(new_hash_count);
1112-
int no_reorder_hash_capacity = hash_count == 0 ? 0 : (1 << ceillog2(hash_count));
1113-
int reorder_cost = boundary + (2 * reorder_hash_capacity);
1114-
int no_reorder_cost = max_used_idx + (2 * no_reorder_hash_capacity);
1111+
int sparse_hash_capacity = 1 << ceillog2(new_hash_count);
1112+
int keep_hash_capacity = hash_count == 0 ? 0 : (1 << ceillog2(hash_count));
1113+
int sparse_cost = boundary + (2 * sparse_hash_capacity);
1114+
int keep_cost = max_used_idx + (2 * keep_hash_capacity);
11151115

11161116
// Would this be smaller memory-wise if we moved some things to the hash portion?
1117-
if (reorder_cost < no_reorder_cost)
1117+
if (sparse_cost < keep_cost)
11181118
{
11191119
resize(L, t, boundary, new_hash_count);
11201120
return;
11211121
}
11221122
}
11231123

1124-
// Default: shrink to max_used_idx (no elements move, preserves iteration order)
1125-
if (max_used_idx < t->sizearray || hash_count < orig_sizenode)
1124+
// Compute actual new hash capacity to avoid rebuilds that can't shrink
1125+
bool array_shrinks = max_used_idx < t->sizearray;
1126+
int new_node_capacity = hash_count == 0 ? 0 : twoto(ceillog2(hash_count));
1127+
bool hash_shrinks = new_node_capacity < orig_sizenode;
1128+
1129+
if (hash_shrinks)
1130+
{
1131+
// Hash genuinely shrinks (possibly array too), full resize needed
11261132
resize(L, t, max_used_idx, hash_count);
1133+
}
1134+
else if (array_shrinks)
1135+
{
1136+
// Only array needs shrinking. All elements past max_used_idx are nil
1137+
// so no elements move to hash, plain array realloc suffices.
1138+
setarrayvector(L, t, max_used_idx);
1139+
}
11271140
}

VM/src/ltable.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ LUAI_FUNC void luaH_clear(LuaTable* tt);
3737
// ServerLua: set whether or not a custom iter order should be used
3838
LUAI_FUNC void luaH_overrideiterorder(lua_State* L, LuaTable* t, int override);
3939
// ServerLua: shrink table to optimal size
40-
// If reorder=true, may move sparse array elements to hash (changes iteration order)
41-
LUAI_FUNC void luaH_shrink(lua_State* L, LuaTable* t, bool reorder);
40+
// If shrink_sparse=true, may move sparse array elements to hash to save memory
41+
LUAI_FUNC void luaH_shrink(lua_State* L, LuaTable* t, bool shrink_sparse);
4242

4343
#define luaH_setslot(L, t, slot, key) (invalidateTMcache(t), (slot == luaO_nilobject ? luaH_newkey(L, t, key) : cast_to(TValue*, slot)))
4444

VM/src/ltablib.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -865,8 +865,8 @@ static int tshrink(lua_State* L)
865865
if (t->memcat < LUA_FIRST_USER_MEMCAT)
866866
luaL_argerror(L, 1, "cannot shrink system table");
867867

868-
bool reorder = lua_toboolean(L, 2) != 0;
869-
luaH_shrink(L, t, reorder);
868+
bool shrink_sparse = lua_toboolean(L, 2) != 0;
869+
luaH_shrink(L, t, shrink_sparse);
870870
lua_pushvalue(L, 1);
871871
return 1;
872872
}

tests/conformance/table_sizing.lua

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,20 @@ t.c = 3
172172
table.shrink(t)
173173
check_sizes(t, 0, 4, "shrunk hash only") -- 4 is smallest power-of-2 >= 3
174174

175+
-- Shrink is no-op when hash is already at minimal capacity (no useless rebuild)
176+
t = {}
177+
for i = 1, 12 do t[`key {i}`] = true end
178+
check_sizes(t, 0, 16, "12 elements in 16-slot hash")
179+
local order_before = {}
180+
for k in t do table.insert(order_before, k) end
181+
table.shrink(t)
182+
check_sizes(t, 0, 16, "hash capacity unchanged after shrink")
183+
local order_after = {}
184+
for k in t do table.insert(order_after, k) end
185+
for i = 1, #order_before do
186+
assert(order_before[i] == order_after[i], "iteration order preserved when hash cannot shrink")
187+
end
188+
175189
-- Shrink mixed array/hash - dense path shrinks to exact boundary
176190
t = make_array(100)
177191
t.foo = "bar"
@@ -218,19 +232,19 @@ assert(t[1] == true, "t[1] preserved after shrink")
218232
assert(t[100] == true, "t[100] preserved after shrink")
219233
assert(t.key == "value", "hash data preserved after shrink")
220234

221-
-- Default shrink preserves iteration order (no elements move to hash)
235+
-- Default shrink keeps elements in their current part (no elements move to hash)
222236
t = table.create(2000)
223237
for i = 1, 600 do t[i] = true end
224238
t[1500] = true -- Sparse element in array at high index
225239
table.shrink(t)
226240
check_sizes(t, 1500, 0, "default shrink - preserves order")
227241

228-
-- With reorder=true, sparse elements move to hash for better memory usage
242+
-- With shrink_sparse=true, sparse elements move to hash for better memory usage
229243
t = table.create(2000)
230244
for i = 1, 600 do t[i] = true end
231245
t[1500] = true
232246
table.shrink(t, true)
233-
check_sizes(t, 600, 1, "reorder shrink - sparse element to hash")
247+
check_sizes(t, 600, 1, "shrink_sparse - sparse element to hash")
234248
assert(t[1500] == true, "sparse element preserved after move to hash")
235249

236250
-- Reorder with element closer to boundary
@@ -239,17 +253,17 @@ for i = 1, 600 do t[i] = true end
239253
t[700] = true -- Sparse element
240254
table.shrink(t, true)
241255
-- boundary=600, element at 700 moves to hash, array shrinks to 600
242-
check_sizes(t, 600, 1, "reorder shrink - element near boundary to hash")
256+
check_sizes(t, 600, 1, "shrink_sparse - element near boundary to hash")
243257

244-
-- Even with reorder=true, if moving elements to hash would grow the table,
258+
-- Even with shrink_sparse=true, if moving elements to hash would grow the table,
245259
-- we fall back to shrinking to max_used_idx instead.
246260
t = table.create(100)
247261
t[1] = true
248262
for i = 3, 52 do t[i] = true end
249263
check_sizes(t, 100, 0, "starts at 100 array elements")
250264
table.shrink(t, true)
251265
-- Falls back to max_used_idx since boundary shrink would grow table
252-
check_sizes(t, 52, 0, "reorder fallback - boundary too expensive")
266+
check_sizes(t, 52, 0, "shrink_sparse fallback - boundary too expensive")
253267

254268
-- Idempotent - second shrink is no-op
255269
t = make_array(1000)

0 commit comments

Comments
 (0)