Skip to content

Commit df26a2b

Browse files
committed
allocatorimpl: consult with mma for lease count convergence
Previously, the replicate queue was updated to check with MMA before considering a change as a candidate when LBRebalancingMultiMetricAndCount is set. This commit applies the same logic to the lease queue, filtering out candidates deemed undesirable.
1 parent 3fcc4cb commit df26a2b

File tree

7 files changed

+244
-53
lines changed

7 files changed

+244
-53
lines changed

pkg/kv/kvserver/allocator/allocatorimpl/allocator.go

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2543,9 +2543,17 @@ func (a *Allocator) TransferLeaseTarget(
25432543
return validTargets[a.randGen.Intn(len(validTargets))]
25442544
}
25452545

2546+
targetStores := make([]roachpb.StoreID, 0, len(sl.Stores))
2547+
for _, s := range sl.Stores {
2548+
targetStores = append(targetStores, s.StoreID)
2549+
}
2550+
handle := a.as.BuildMMARebalanceAdvisor(source.StoreID, targetStores)
25462551
var bestOption roachpb.ReplicaDescriptor
25472552
candidates := make([]roachpb.ReplicaDescriptor, 0, len(validTargets))
25482553
bestOptionLeaseCount := int32(math.MaxInt32)
2554+
// Similar to replicate queue, lease queue only filters out overloaded
2555+
// stores at the final target selection step. See comments on top of
2556+
// allocatorSync.BuildMMARebalanceAdvisor for more details.
25492557
for _, repl := range validTargets {
25502558
if leaseRepl.StoreID() == repl.StoreID {
25512559
continue
@@ -2555,7 +2563,13 @@ func (a *Allocator) TransferLeaseTarget(
25552563
continue
25562564
}
25572565
if float64(storeDesc.Capacity.LeaseCount) < candidateLeasesMean-0.5 {
2558-
candidates = append(candidates, repl)
2566+
// Only include the candidate if it is not in conflict with mma's goals.
2567+
// Note that even if all candidates are excluded, the len(candidates) ==
2568+
// 0 branch below will still return the replica with the lowest lease
2569+
// count if we are required to shed the lease (excludeLeaseRepl==true).
2570+
if !a.as.IsInConflictWithMMA(ctx, repl.StoreID, handle, true /*cpuOnly*/) {
2571+
candidates = append(candidates, repl)
2572+
}
25592573
}
25602574
if storeDesc.Capacity.LeaseCount < bestOptionLeaseCount {
25612575
bestOption = repl

pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go

Lines changed: 177 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2205,6 +2205,183 @@ func TestAllocatorTransferLeaseTargetIOOverloadCheck(t *testing.T) {
22052205

22062206
}
22072207

2208+
// TestAllocatorTransferLeaseTargetMMAConflict tests that the MMA conflict
2209+
// checking logic in TransferLeaseTarget works correctly. It should check with
2210+
// mma but still allow transfer if the transfer is needed to shed due to IO
2211+
// overload or lease preference violation.
2212+
func TestAllocatorTransferLeaseTargetMMAConflict(t *testing.T) {
2213+
defer leaktest.AfterTest(t)()
2214+
defer log.Scope(t).Close(t)
2215+
ctx := context.Background()
2216+
2217+
existing := replicas(1, 2, 3)
2218+
2219+
leasePreferences := func() []roachpb.LeasePreference {
2220+
return []roachpb.LeasePreference{
2221+
{Constraints: []roachpb.Constraint{{Key: "region", Value: "us-east", Type: roachpb.Constraint_REQUIRED}}},
2222+
}
2223+
}
2224+
2225+
// Helper function to create default store descriptors
2226+
createDefaultStores := func() []*roachpb.StoreDescriptor {
2227+
var stores []*roachpb.StoreDescriptor
2228+
for i := 1; i <= 3; i++ {
2229+
region := "us-east"
2230+
if i == 1 {
2231+
region = "us-west" // Store 1 violates us-east preference
2232+
}
2233+
2234+
leaseCount := int32(50)
2235+
if i == 1 {
2236+
leaseCount = 100 // Store 1 has more leases for convergence testing
2237+
}
2238+
2239+
stores = append(stores, &roachpb.StoreDescriptor{
2240+
StoreID: roachpb.StoreID(i),
2241+
Node: roachpb.NodeDescriptor{
2242+
NodeID: roachpb.NodeID(i),
2243+
Locality: roachpb.Locality{
2244+
Tiers: []roachpb.Tier{
2245+
{Key: "region", Value: region},
2246+
},
2247+
},
2248+
},
2249+
Capacity: roachpb.StoreCapacity{
2250+
LeaseCount: leaseCount,
2251+
IOThresholdMax: TestingIOThresholdWithScore(0.1), // Default low IO score
2252+
},
2253+
})
2254+
}
2255+
return stores
2256+
}
2257+
2258+
testCases := []struct {
2259+
name string
2260+
leaseholder roachpb.StoreID
2261+
mmaReturnsConflict bool
2262+
expected roachpb.StoreID
2263+
conf *roachpb.SpanConfig
2264+
enforcement IOOverloadEnforcementLevel
2265+
setupStores func() []*roachpb.StoreDescriptor // Function to create stores for this test case
2266+
}{
2267+
{
2268+
name: "normal lease count convergence respects MMA conflict",
2269+
leaseholder: 1,
2270+
mmaReturnsConflict: true,
2271+
expected: 0, // Should be blocked by MMA conflict
2272+
conf: emptySpanConfig(),
2273+
setupStores: createDefaultStores,
2274+
},
2275+
{
2276+
name: "normal lease count convergence proceeds when MMA allows",
2277+
leaseholder: 1,
2278+
mmaReturnsConflict: false,
2279+
// 2, 3 are both valid targets, 2 is picked since this is a random
2280+
// deterministic choice.
2281+
expected: 2,
2282+
conf: emptySpanConfig(),
2283+
setupStores: createDefaultStores,
2284+
},
2285+
{
2286+
name: "lease preferences bypass MMA conflict",
2287+
leaseholder: 1, // Store 1 violates preference (us-west)
2288+
mmaReturnsConflict: true,
2289+
// Pick store 2 regardless of MMA conflict.
2290+
expected: 2,
2291+
conf: &roachpb.SpanConfig{LeasePreferences: leasePreferences()},
2292+
setupStores: createDefaultStores,
2293+
},
2294+
{
2295+
name: "lease preferences respect MMA conflict when no violation",
2296+
leaseholder: 2, // Store 2 satisfies preference (us-east)
2297+
mmaReturnsConflict: true,
2298+
// Should be blocked by MMA conflict since no preference violation.
2299+
expected: 0,
2300+
conf: &roachpb.SpanConfig{LeasePreferences: leasePreferences()},
2301+
setupStores: createDefaultStores,
2302+
},
2303+
{
2304+
name: "lease preferences work when MMA allows",
2305+
leaseholder: 1, // Store 1 violates preference (us-west)
2306+
mmaReturnsConflict: false,
2307+
// Should move to store 2 (first preferred store).
2308+
expected: 2,
2309+
conf: &roachpb.SpanConfig{LeasePreferences: leasePreferences()},
2310+
setupStores: createDefaultStores,
2311+
},
2312+
{
2313+
name: "IO overload bypasses MMA conflict when needs to be shed",
2314+
leaseholder: 1,
2315+
mmaReturnsConflict: true,
2316+
// Pick store 2 even though it's in conflict with MMA.
2317+
expected: 2,
2318+
conf: emptySpanConfig(),
2319+
enforcement: IOOverloadThresholdShed,
2320+
setupStores: func() []*roachpb.StoreDescriptor {
2321+
stores := createDefaultStores()
2322+
stores[0].Capacity.IOThresholdMax = TestingIOThresholdWithScore(0.5) // Store 1 is IO overloaded
2323+
return stores
2324+
},
2325+
},
2326+
{
2327+
name: "transfer allowed by IO overload and mma",
2328+
leaseholder: 1,
2329+
mmaReturnsConflict: false,
2330+
// Should move to store 3. 2 is blocked by
2331+
// IOOverloadThresholdBlockTransfers. 3 is allowed by check and mma.
2332+
expected: 3,
2333+
conf: emptySpanConfig(),
2334+
enforcement: IOOverloadThresholdBlockTransfers,
2335+
setupStores: func() []*roachpb.StoreDescriptor {
2336+
stores := createDefaultStores()
2337+
stores[1].Capacity.IOThresholdMax = TestingIOThresholdWithScore(0.5) // Store 2 is IO overloaded
2338+
return stores
2339+
},
2340+
},
2341+
}
2342+
2343+
for _, tc := range testCases {
2344+
t.Run(tc.name, func(t *testing.T) {
2345+
// Create allocator with custom MMA knobs for this test case
2346+
stopper, g, sp, a, _ := CreateTestAllocatorWithKnobs(ctx, 10, true /* deterministic */, nil, &mmaintegration.TestingKnobs{
2347+
OverrideIsInConflictWithMMA: func(cand roachpb.StoreID) bool {
2348+
return tc.mmaReturnsConflict
2349+
},
2350+
})
2351+
defer stopper.Stop(ctx)
2352+
2353+
// Set up stores using the test case's setup function
2354+
stores := tc.setupStores()
2355+
sg := gossiputil.NewStoreGossiper(g)
2356+
sg.GossipStores(stores, t)
2357+
2358+
// Set up IO overload enforcement if specified
2359+
if tc.enforcement != 0 {
2360+
LeaseIOOverloadThresholdEnforcement.Override(ctx, &a.st.SV, tc.enforcement)
2361+
}
2362+
2363+
EnableLoadBasedLeaseRebalancing.Override(ctx, &a.st.SV, false)
2364+
2365+
target := a.TransferLeaseTarget(
2366+
ctx,
2367+
sp,
2368+
&roachpb.RangeDescriptor{},
2369+
tc.conf,
2370+
existing,
2371+
&mockRepl{
2372+
replicationFactor: 3,
2373+
storeID: tc.leaseholder,
2374+
},
2375+
allocator.RangeUsageInfo{}, /* stats */
2376+
allocator.TransferLeaseOptions{
2377+
CheckCandidateFullness: true,
2378+
},
2379+
)
2380+
require.Equal(t, tc.expected, target.StoreID)
2381+
})
2382+
}
2383+
}
2384+
22082385
func TestAllocatorTransferLeaseToReplicasNeedingSnapshot(t *testing.T) {
22092386
defer leaktest.AfterTest(t)()
22102387
defer log.Scope(t).Close(t)

pkg/kv/kvserver/asim/tests/testdata/non_rand/mma/high_cpu_able_to_shed_leases.txt

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,17 +41,17 @@ write_bytes_per_second#1: last: [s1=25989, s2=25993, s3=29967, s4=31991, s5=359
4141
write_bytes_per_second#1: thrash_pct: [s1=161%, s2=82%, s3=144%, s4=176%, s5=158%] (sum=722%)
4242
artifacts[mma-only]: 9a1122b07e8a1e71
4343
==========================
44-
cpu#1: last: [s1=3199860194, s2=2799497761, s3=3199674637, s4=2800510026, s5=2999404241] (stddev=178780081.94, mean=2999789371.80, sum=14998946859)
45-
cpu#1: thrash_pct: [s1=124%, s2=62%, s3=49%, s4=108%, s5=79%] (sum=422%)
44+
cpu#1: last: [s1=3200015723, s2=2799409624, s3=3199923468, s4=2800455801, s5=2999667444] (stddev=178902277.04, mean=2999894412.00, sum=14999472060)
45+
cpu#1: thrash_pct: [s1=109%, s2=62%, s3=49%, s4=108%, s5=79%] (sum=408%)
4646
cpu_util#1: last: [s1=0.36, s2=0.31, s3=0.36, s4=0.31, s5=0.33] (stddev=0.02, mean=0.33, sum=2)
47-
cpu_util#1: thrash_pct: [s1=124%, s2=62%, s3=49%, s4=108%, s5=79%] (sum=422%)
47+
cpu_util#1: thrash_pct: [s1=109%, s2=62%, s3=49%, s4=108%, s5=79%] (sum=408%)
4848
leases#1: first: [s1=25, s2=0, s3=0, s4=0, s5=0] (stddev=10.00, mean=5.00, sum=25)
49-
leases#1: last: [s1=6, s2=2, s3=5, s4=5, s5=7] (stddev=1.67, mean=5.00, sum=25)
49+
leases#1: last: [s1=7, s2=3, s3=3, s4=5, s5=7] (stddev=1.79, mean=5.00, sum=25)
5050
leases#1: thrash_pct: [s1=0%, s2=0%, s3=0%, s4=0%, s5=0%] (sum=0%)
5151
replicas#1: first: [s1=25, s2=13, s3=13, s4=12, s5=12] (stddev=5.02, mean=15.00, sum=75)
5252
replicas#1: last: [s1=16, s2=14, s3=16, s4=14, s5=15] (stddev=0.89, mean=15.00, sum=75)
5353
replicas#1: thrash_pct: [s1=0%, s2=0%, s3=0%, s4=0%, s5=0%] (sum=0%)
54-
write_bytes_per_second#1: last: [s1=31998, s2=27994, s3=31996, s4=28005, s5=29994] (stddev=1787.74, mean=29997.40, sum=149987)
55-
write_bytes_per_second#1: thrash_pct: [s1=124%, s2=62%, s3=49%, s4=108%, s5=79%] (sum=422%)
56-
artifacts[mma-count]: e81950c3e42f43d
54+
write_bytes_per_second#1: last: [s1=32000, s2=27994, s3=31999, s4=28004, s5=29996] (stddev=1789.08, mean=29998.60, sum=149993)
55+
write_bytes_per_second#1: thrash_pct: [s1=109%, s2=62%, s3=49%, s4=107%, s5=79%] (sum=408%)
56+
artifacts[mma-count]: afcb108e54d06e3c
5757
==========================

pkg/kv/kvserver/asim/tests/testdata/non_rand/mma/high_write_uniform_cpu.txt

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -47,20 +47,20 @@ write_bytes_per_second#1: last: [s1=5987983, s2=5994822, s3=5992147, s4=6010824
4747
write_bytes_per_second#1: thrash_pct: [s1=95%, s2=73%, s3=98%, s4=123%, s5=83%, s6=70%, s7=56%, s8=49%, s9=97%, s10=77%] (sum=821%)
4848
artifacts[mma-only]: a187f7fcdc7520a5
4949
==========================
50-
cpu#1: last: [s1=497905231, s2=499033106, s3=501825000, s4=500466725, s5=498817870, s6=497161276, s7=500212885, s8=499757059, s9=500662500, s10=500093532] (stddev=1312513.07, mean=499593518.40, sum=4995935184)
51-
cpu#1: thrash_pct: [s1=216%, s2=351%, s3=34%, s4=83%, s5=276%, s6=173%, s7=275%, s8=269%, s9=36%, s10=273%] (sum=1985%)
52-
cpu_util#1: last: [s1=0.17, s2=0.17, s3=0.17, s4=0.17, s5=0.17, s6=0.17, s7=0.17, s8=0.17, s9=0.17, s10=0.17] (stddev=0.00, mean=0.17, sum=2)
53-
cpu_util#1: thrash_pct: [s1=216%, s2=351%, s3=34%, s4=83%, s5=276%, s6=173%, s7=275%, s8=269%, s9=36%, s10=273%] (sum=1985%)
50+
cpu#1: last: [s1=507916789, s2=498501253, s3=500579751, s4=499842814, s5=501335541, s6=494153659, s7=496397433, s8=499418762, s9=499983556, s10=504531465] (stddev=3665905.75, mean=500266102.30, sum=5002661023)
51+
cpu#1: thrash_pct: [s1=492%, s2=358%, s3=429%, s4=435%, s5=479%, s6=229%, s7=232%, s8=428%, s9=435%, s10=417%] (sum=3933%)
52+
cpu_util#1: last: [s1=0.17, s2=0.17, s3=0.17, s4=0.17, s5=0.17, s6=0.16, s7=0.17, s8=0.17, s9=0.17, s10=0.17] (stddev=0.00, mean=0.17, sum=2)
53+
cpu_util#1: thrash_pct: [s1=492%, s2=358%, s3=429%, s4=435%, s5=479%, s6=229%, s7=232%, s8=428%, s9=435%, s10=417%] (sum=3933%)
5454
disk_fraction_used#1: first: [s1=0.00, s2=0.00, s3=0.00, s4=0.00, s5=0.00, s6=0.00, s7=0.00, s8=0.00, s9=0.00, s10=0.00] (stddev=0.00, mean=0.00, sum=0)
55-
disk_fraction_used#1: last: [s1=0.03, s2=0.03, s3=0.03, s4=0.03, s5=0.03, s6=0.03, s7=0.03, s8=0.03, s9=0.03, s10=0.03] (stddev=0.00, mean=0.03, sum=0)
56-
disk_fraction_used#1: thrash_pct: [s1=35%, s2=131%, s3=61%, s4=65%, s5=50%, s6=69%, s7=59%, s8=132%, s9=40%, s10=64%] (sum=707%)
55+
disk_fraction_used#1: last: [s1=0.04, s2=0.04, s3=0.03, s4=0.03, s5=0.03, s6=0.02, s7=0.04, s8=0.03, s9=0.03, s10=0.04] (stddev=0.01, mean=0.03, sum=0)
56+
disk_fraction_used#1: thrash_pct: [s1=440%, s2=282%, s3=270%, s4=437%, s5=536%, s6=284%, s7=139%, s8=248%, s9=473%, s10=235%] (sum=3344%)
5757
leases#1: first: [s1=19, s2=11, s3=6, s4=3, s5=3, s6=3, s7=4, s8=3, s9=4, s10=4] (stddev=4.92, mean=6.00, sum=60)
58-
leases#1: last: [s1=9, s2=6, s3=5, s4=6, s5=4, s6=5, s7=6, s8=6, s9=5, s10=8] (stddev=1.41, mean=6.00, sum=60)
59-
leases#1: thrash_pct: [s1=57%, s2=130%, s3=64%, s4=53%, s5=76%, s6=77%, s7=115%, s8=116%, s9=51%, s10=154%] (sum=894%)
58+
leases#1: last: [s1=6, s2=8, s3=5, s4=7, s5=9, s6=6, s7=7, s8=3, s9=6, s10=3] (stddev=1.84, mean=6.00, sum=60)
59+
leases#1: thrash_pct: [s1=245%, s2=181%, s3=201%, s4=304%, s5=284%, s6=114%, s7=148%, s8=189%, s9=302%, s10=201%] (sum=2169%)
6060
replicas#1: first: [s1=39, s2=33, s3=23, s4=16, s5=13, s6=12, s7=11, s8=11, s9=11, s10=11] (stddev=9.76, mean=18.00, sum=180)
61-
replicas#1: last: [s1=20, s2=18, s3=20, s4=18, s5=18, s6=18, s7=17, s8=17, s9=17, s10=17] (stddev=1.10, mean=18.00, sum=180)
62-
replicas#1: thrash_pct: [s1=87%, s2=274%, s3=116%, s4=159%, s5=96%, s6=111%, s7=190%, s8=254%, s9=82%, s10=182%] (sum=1551%)
63-
write_bytes_per_second#1: last: [s1=6011172, s2=5993736, s3=5993997, s4=5993701, s5=5994247, s6=6010778, s7=5991339, s8=6011998, s9=5993515, s10=5994635] (stddev=8166.94, mean=5998911.80, sum=59989118)
64-
write_bytes_per_second#1: thrash_pct: [s1=122%, s2=299%, s3=169%, s4=214%, s5=140%, s6=179%, s7=155%, s8=217%, s9=140%, s10=140%] (sum=1775%)
65-
artifacts[mma-count]: d9bbbb822e478251
61+
replicas#1: last: [s1=18, s2=19, s3=18, s4=18, s5=18, s6=14, s7=18, s8=18, s9=18, s10=21] (stddev=1.61, mean=18.00, sum=180)
62+
replicas#1: thrash_pct: [s1=621%, s2=423%, s3=396%, s4=730%, s5=682%, s6=259%, s7=283%, s8=426%, s9=769%, s10=321%] (sum=4911%)
63+
write_bytes_per_second#1: last: [s1=6665505, s2=6679332, s3=4655369, s4=5343865, s5=5999973, s6=3993692, s7=7993314, s8=5328834, s9=6010782, s10=7317305] (stddev=1154442.08, mean=5998797.10, sum=59987971)
64+
write_bytes_per_second#1: thrash_pct: [s1=592%, s2=450%, s3=421%, s4=587%, s5=529%, s6=409%, s7=333%, s8=413%, s9=607%, s10=371%] (sum=4712%)
65+
artifacts[mma-count]: 5f5d90a8fe809362
6666
==========================

pkg/kv/kvserver/asim/tests/testdata/non_rand/mma/skewed_cpu_even_ranges_mma.txt

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -72,17 +72,17 @@ write_bytes_per_second#1: last: [s1=3751, s2=3916, s3=4019, s4=6903, s5=6911, s
7272
write_bytes_per_second#1: thrash_pct: [s1=565%, s2=570%, s3=599%, s4=134%, s5=162%, s6=134%, s7=162%, s8=159%, s9=173%] (sum=2659%)
7373
artifacts[mma-only]: 8b414911c677f8f9
7474
==========================
75-
cpu#1: last: [s1=564321656, s2=570703549, s3=567256633, s4=561501935, s5=560527044, s6=555833144, s7=574286961, s8=558080746, s9=557632305] (stddev=5946449.12, mean=563349330.33, sum=5070143973)
76-
cpu#1: thrash_pct: [s1=67%, s2=123%, s3=155%, s4=40%, s5=102%, s6=49%, s7=72%, s8=26%, s9=56%] (sum=689%)
77-
cpu_util#1: last: [s1=0.11, s2=0.11, s3=0.11, s4=0.11, s5=0.11, s6=0.11, s7=0.11, s8=0.11, s9=0.11] (stddev=0.00, mean=0.11, sum=1)
78-
cpu_util#1: thrash_pct: [s1=67%, s2=123%, s3=155%, s4=40%, s5=102%, s6=49%, s7=72%, s8=26%, s9=56%] (sum=689%)
75+
cpu#1: last: [s1=705611295, s2=851106563, s3=567039636, s4=429874660, s5=555084030, s6=565901683, s7=415672487, s8=555517775, s9=414198713] (stddev=135421522.68, mean=562222982.44, sum=5060006842)
76+
cpu#1: thrash_pct: [s1=127%, s2=175%, s3=165%, s4=68%, s5=99%, s6=64%, s7=76%, s8=48%, s9=78%] (sum=900%)
77+
cpu_util#1: last: [s1=0.14, s2=0.17, s3=0.11, s4=0.09, s5=0.11, s6=0.11, s7=0.08, s8=0.11, s9=0.08] (stddev=0.03, mean=0.11, sum=1)
78+
cpu_util#1: thrash_pct: [s1=127%, s2=175%, s3=165%, s4=68%, s5=99%, s6=64%, s7=76%, s8=48%, s9=78%] (sum=900%)
7979
leases#1: first: [s1=36, s2=0, s3=0, s4=36, s5=0, s6=0, s7=36, s8=0, s9=0] (stddev=16.97, mean=12.00, sum=108)
80-
leases#1: last: [s1=8, s2=7, s3=7, s4=16, s5=14, s6=11, s7=15, s8=13, s9=17] (stddev=3.68, mean=12.00, sum=108)
81-
leases#1: thrash_pct: [s1=72%, s2=94%, s3=123%, s4=94%, s5=184%, s6=108%, s7=106%, s8=74%, s9=127%] (sum=981%)
80+
leases#1: last: [s1=10, s2=8, s3=7, s4=13, s5=12, s6=14, s7=16, s8=13, s9=15] (stddev=2.91, mean=12.00, sum=108)
81+
leases#1: thrash_pct: [s1=113%, s2=129%, s3=129%, s4=106%, s5=148%, s6=97%, s7=111%, s8=68%, s9=109%] (sum=1011%)
8282
replicas#1: first: [s1=36, s2=36, s3=36, s4=36, s5=36, s6=36, s7=36, s8=36, s9=36] (stddev=0.00, mean=36.00, sum=324)
83-
replicas#1: last: [s1=35, s2=36, s3=34, s4=38, s5=33, s6=36, s7=38, s8=38, s9=36] (stddev=1.70, mean=36.00, sum=324)
84-
replicas#1: thrash_pct: [s1=348%, s2=213%, s3=216%, s4=589%, s5=1204%, s6=840%, s7=643%, s8=643%, s9=987%] (sum=5682%)
85-
write_bytes_per_second#1: last: [s1=5514, s2=5506, s3=5089, s4=6396, s5=5508, s6=6117, s7=6365, s8=6580, s9=6210] (stddev=492.12, mean=5920.56, sum=53285)
86-
write_bytes_per_second#1: thrash_pct: [s1=1214%, s2=1091%, s3=1145%, s4=1119%, s5=1628%, s6=1261%, s7=1349%, s8=1334%, s9=1459%] (sum=11600%)
87-
artifacts[mma-count]: 6790eb7f413fc1f4
83+
replicas#1: last: [s1=37, s2=33, s3=33, s4=38, s5=36, s6=38, s7=38, s8=35, s9=36] (stddev=1.89, mean=36.00, sum=324)
84+
replicas#1: thrash_pct: [s1=295%, s2=124%, s3=257%, s4=483%, s5=813%, s6=549%, s7=536%, s8=361%, s9=520%] (sum=3938%)
85+
write_bytes_per_second#1: last: [s1=5845, s2=4977, s3=5056, s4=6368, s5=6060, s6=6557, s7=6546, s8=6035, s9=6137] (stddev=548.69, mean=5953.44, sum=53581)
86+
write_bytes_per_second#1: thrash_pct: [s1=1367%, s2=1468%, s3=1377%, s4=1191%, s5=1312%, s6=1216%, s7=1216%, s8=990%, s9=1263%] (sum=11400%)
87+
artifacts[mma-count]: 521e92db29757c36
8888
==========================

pkg/kv/kvserver/asim/tests/testdata/non_rand/mma/skewed_cpu_skewed_write.txt

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -57,19 +57,19 @@ write_bytes_per_second#1: last: [s1=8367672, s2=10580757, s3=10581322, s4=10531
5757
write_bytes_per_second#1: thrash_pct: [s1=1%, s2=5%, s3=7%, s4=2%, s5=115%, s6=110%] (sum=241%)
5858
artifacts[mma-only]: 31964600a8a5db0a
5959
==========================
60-
cpu#1: last: [s1=830551694, s2=843605039, s3=831306454, s4=831088101, s5=832966580, s6=830458451] (stddev=4668942.17, mean=833329386.50, sum=4999976319)
61-
cpu#1: thrash_pct: [s1=15%, s2=83%, s3=74%, s4=39%, s5=46%, s6=51%] (sum=309%)
60+
cpu#1: last: [s1=832135023, s2=829718568, s3=829738010, s4=845587865, s5=833948228, s6=829560377] (stddev=5658868.56, mean=833448011.83, sum=5000688071)
61+
cpu#1: thrash_pct: [s1=59%, s2=84%, s3=90%, s4=46%, s5=47%, s6=44%] (sum=370%)
6262
cpu_util#1: last: [s1=0.17, s2=0.17, s3=0.17, s4=0.17, s5=0.17, s6=0.17] (stddev=0.00, mean=0.17, sum=1)
63-
cpu_util#1: thrash_pct: [s1=15%, s2=83%, s3=74%, s4=39%, s5=46%, s6=51%] (sum=309%)
63+
cpu_util#1: thrash_pct: [s1=59%, s2=84%, s3=90%, s4=46%, s5=47%, s6=44%] (sum=370%)
6464
leases#1: first: [s1=36, s2=0, s3=0, s4=36, s5=0, s6=0] (stddev=16.97, mean=12.00, sum=72)
65-
leases#1: last: [s1=11, s2=16, s3=11, s4=15, s5=11, s6=8] (stddev=2.71, mean=12.00, sum=72)
66-
leases#1: thrash_pct: [s1=53%, s2=75%, s3=79%, s4=76%, s5=79%, s6=89%] (sum=451%)
65+
leases#1: last: [s1=10, s2=13, s3=16, s4=13, s5=11, s6=9] (stddev=2.31, mean=12.00, sum=72)
66+
leases#1: thrash_pct: [s1=83%, s2=85%, s3=80%, s4=77%, s5=90%, s6=84%] (sum=500%)
6767
replicas#1: first: [s1=36, s2=36, s3=36, s4=36, s5=36, s6=36] (stddev=0.00, mean=36.00, sum=216)
68-
replicas#1: last: [s1=38, s2=35, s3=35, s4=37, s5=35, s6=36] (stddev=1.15, mean=36.00, sum=216)
69-
replicas#1: thrash_pct: [s1=336%, s2=388%, s3=388%, s4=401%, s5=415%, s6=293%] (sum=2221%)
70-
write_bytes_per_second#1: last: [s1=10028292, s2=10525501, s3=9475047, s4=10022859, s5=9422060, s6=10524839] (stddev=440174.25, mean=9999766.33, sum=59998598)
71-
write_bytes_per_second#1: thrash_pct: [s1=62%, s2=88%, s3=98%, s4=192%, s5=204%, s6=186%] (sum=830%)
72-
artifacts[mma-count]: b42b5562c69d2ae7
68+
replicas#1: last: [s1=36, s2=35, s3=37, s4=34, s5=37, s6=37] (stddev=1.15, mean=36.00, sum=216)
69+
replicas#1: thrash_pct: [s1=815%, s2=525%, s3=571%, s4=680%, s5=632%, s6=402%] (sum=3624%)
70+
write_bytes_per_second#1: last: [s1=8867846, s2=10027943, s3=10524259, s4=10023228, s5=9973422, s6=10582360] (stddev=562404.06, mean=9999843.00, sum=59999058)
71+
write_bytes_per_second#1: thrash_pct: [s1=160%, s2=129%, s3=99%, s4=244%, s5=250%, s6=228%] (sum=1111%)
72+
artifacts[mma-count]: d266bb6748d76c02
7373
==========================
7474
Cluster Set Up
7575
n1(AU_EAST,AU_EAST_1,5vcpu): {s1:(256GiB)}

0 commit comments

Comments
 (0)