mirror of
https://activitypub.software/TransFem-org/Sharkey
synced 2024-12-18 22:40:07 +00:00
merge: Fix rate limits under multi-node environments (!809)
View MR for information: https://activitypub.software/TransFem-org/Sharkey/-/merge_requests/809 Approved-by: dakkar <dakkar@thenautilus.net> Approved-by: Marie <github@yuugi.dev>
This commit is contained in:
commit
fd0ecb22cf
16 changed files with 719 additions and 400 deletions
|
@ -117,12 +117,27 @@ export interface LimitInfo {
|
|||
fullResetMs: number;
|
||||
}
|
||||
|
||||
export const disabledLimitInfo: Readonly<LimitInfo> = Object.freeze({
|
||||
blocked: false,
|
||||
remaining: Number.MAX_SAFE_INTEGER,
|
||||
resetSec: 0,
|
||||
resetMs: 0,
|
||||
fullResetSec: 0,
|
||||
fullResetMs: 0,
|
||||
});
|
||||
|
||||
export function isLegacyRateLimit(limit: RateLimit): limit is LegacyRateLimit {
|
||||
return limit.type === undefined;
|
||||
}
|
||||
|
||||
export function hasMinLimit(limit: LegacyRateLimit): limit is LegacyRateLimit & { minInterval: number } {
|
||||
return !!limit.minInterval;
|
||||
export type MaxLegacyLimit = LegacyRateLimit & { duration: number, max: number };
|
||||
export function hasMaxLimit(limit: LegacyRateLimit): limit is MaxLegacyLimit {
|
||||
return limit.max != null && limit.duration != null;
|
||||
}
|
||||
|
||||
export type MinLegacyLimit = LegacyRateLimit & { minInterval: number };
|
||||
export function hasMinLimit(limit: LegacyRateLimit): limit is MinLegacyLimit {
|
||||
return limit.minInterval != null;
|
||||
}
|
||||
|
||||
export function sendRateLimitHeaders(reply: FastifyReply, info: LimitInfo): void {
|
||||
|
|
143
packages/backend/src/server/SkRateLimiterService.md
Normal file
143
packages/backend/src/server/SkRateLimiterService.md
Normal file
|
@ -0,0 +1,143 @@
|
|||
# SkRateLimiterService - Leaky Bucket Rate Limit Implementation
|
||||
|
||||
SkRateLimiterService replaces Misskey's RateLimiterService for all use cases.
|
||||
It offers a simplified API, detailed metrics, and support for Rate Limit headers.
|
||||
The prime feature is an implementation of Leaky Bucket - a flexible rate limiting scheme that better supports bursty request patterns common with human interaction.
|
||||
|
||||
## Compatibility
|
||||
|
||||
The API is backwards-compatible with existing limit definitions, but it's preferred to use the new BucketRateLimit interface.
|
||||
Legacy limits will be "translated" into a bucket limit in a way that attempts to respect max, duration, and minInterval (if present).
|
||||
SkRateLimiterService is not quite plug-and-play compatible with existing call sites, as it no longer throws when a limit is exceeded.
|
||||
Instead, the returned LimitInfo object will have `blocked` set to true.
|
||||
Callers are responsible for checking this property and taking any desired action, such as rejecting a request or returning limit details.
|
||||
|
||||
## Headers
|
||||
|
||||
LimitInfo objects (returned by `SkRateLimitService.limit()`) can be passed to `rate-limit-utils.sendRateLimitHeaders()` to send standard rate limit headers with an HTTP response.
|
||||
The defined headers are:
|
||||
|
||||
| Header | Definition | Example |
|
||||
|-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------|
|
||||
| `X-RateLimit-Remaining` | Number of calls that can be made without triggering the rate limit. Will be zero if the limit is already exceeded, or will be exceeded by the next request. | `X-RateLimit-Remaining: 1` |
|
||||
| `X-RateLimit-Clear` | Time in seconds required to completely clear the rate limit "bucket". | `X-RateLimit-Clear: 1.5` |
|
||||
| `X-RateLimit-Reset` | Contains the number of seconds to wait before retrying the current request. Clients should delay for at least this long before making another call. Only included if the rate limit has already been exceeded. | `X-RateLimit-Reset: 0.755` |
|
||||
| `Retry-After` | Like `X-RateLimit-Reset`, but measured in seconds (rounded up). Preserved for backwards compatibility, and only included if the rate limit has already been exceeded. | `Retry-After: 2` |
|
||||
|
||||
Note: rate limit headers are not standardized, except for `Retry-After`.
|
||||
Header meanings and usage have been devised by adapting common patterns to work with a leaky bucket rate limit model.
|
||||
|
||||
## Performance
|
||||
|
||||
SkRateLimiterService makes between 1 and 4 redis transactions per rate limit check.
|
||||
The first call is read-only, while the others perform at least one write operation.
|
||||
Two integer keys are stored per client/subject, and both expire together after the maximum duration of the limit.
|
||||
While performance has not been formally tested, it's expected that SkRateLimiterService has an impact roughly on par with the legacy RateLimiterService.
|
||||
Redis memory usage should be notably lower due to the reduced number of keys and avoidance of set / array constructions.
|
||||
|
||||
## Concurrency and Multi-Node Correctness
|
||||
|
||||
To provide consistency across multi-node environments, leaky bucket is implemented with only atomic operations (`Increment`, `Decrement`, `Add`, and `Subtract`).
|
||||
This allows the use of Optimistic Locking with read-modify-check logic.
|
||||
If a data conflict is detected during the "drip" phase, then it's safely reverted by executing its inverse (`Increment` <-> `Decrement`, `Add` <-> `Subtract`).
|
||||
We don't need to check for conflicts when adding the current request to the bucket, as all other logic already accounts for the case where the bucket has been "overfilled".
|
||||
Should an extra request slip through, the limit delay will be extended until the bucket size is back within limits.
|
||||
|
||||
There is one non-atomic `Set` operation used to populate the initial Timestamp value, but we can safely ignore data races there.
|
||||
Any possible conflict would have to occur within a few-milliseconds window, which means that the final value can be no more than a few milliseconds off from the expected value.
|
||||
This error does not compound, as all further operations are relative (Increment and Add).
|
||||
Thus, it's considered an acceptable tradeoff given the limitations imposed by Redis and ioredis.
|
||||
|
||||
## Algorithm Pseudocode
|
||||
|
||||
The Atomic Leaky Bucket algorithm is described here, in pseudocode:
|
||||
|
||||
```
|
||||
# Terms
|
||||
# * Now - UNIX timestamp of the current moment
|
||||
# * Bucket Size - Maximum number of requests allowed in the bucket
|
||||
# * Counter - Number of requests in the bucket
|
||||
# * Drip Rate - How often to decrement counter
|
||||
# * Drip Size - How much to decrement the counter
|
||||
# * Timestamp - UNIX timestamp of last bucket drip
|
||||
# * Delta Counter - Difference between current and expected counter value
|
||||
# * Delta Timestamp - Difference between current and expected timestamp value
|
||||
|
||||
# 0 - Calculations
|
||||
dripRate = ceil(limit.dripRate ?? 1000);
|
||||
dripSize = ceil(limit.dripSize ?? 1);
|
||||
bucketSize = max(ceil(limit.size / factor), 1);
|
||||
maxExpiration = max(ceil((dripRate * ceil(bucketSize / dripSize)) / 1000), 1);;
|
||||
|
||||
# 1 - Read
|
||||
MULTI
|
||||
GET 'counter' INTO counter
|
||||
GET 'timestamp' INTO timestamp
|
||||
EXEC
|
||||
|
||||
# 2 - Drip
|
||||
if (counter > 0) {
|
||||
# Deltas
|
||||
deltaCounter = floor((now - timestamp) / dripRate) * dripSize;
|
||||
deltaCounter = min(deltaCounter, counter);
|
||||
deltaTimestamp = deltaCounter * dripRate;
|
||||
if (deltaCounter > 0) {
|
||||
# Update
|
||||
expectedTimestamp = timestamp
|
||||
MULTI
|
||||
GET 'timestamp' INTO canaryTimestamp
|
||||
INCRBY 'timestamp' deltaTimestamp
|
||||
EXPIRE 'timestamp' maxExpiration
|
||||
GET 'timestamp' INTO timestamp
|
||||
DECRBY 'counter' deltaCounter
|
||||
EXPIRE 'counter' maxExpiration
|
||||
GET 'counter' INTO counter
|
||||
EXEC
|
||||
# Rollback
|
||||
if (canaryTimestamp != expectedTimestamp) {
|
||||
MULTI
|
||||
DECRBY 'timestamp' deltaTimestamp
|
||||
GET 'timestamp' INTO timestmamp
|
||||
INCRBY 'counter' deltaCounter
|
||||
GET 'counter' INTO counter
|
||||
EXEC
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# 3 - Check
|
||||
blocked = counter >= bucketSize
|
||||
if (!blocked) {
|
||||
if (timestamp == 0) {
|
||||
# Edge case - set the initial value for timestamp.
|
||||
# Otherwise the first request will immediately drip away.
|
||||
MULTI
|
||||
SET 'timestamp', now
|
||||
EXPIRE 'timestamp' maxExpiration
|
||||
INCR 'counter'
|
||||
EXPIRE 'counter' maxExpiration
|
||||
GET 'counter' INTO counter
|
||||
EXEC
|
||||
} else {
|
||||
MULTI
|
||||
INCR 'counter'
|
||||
EXPIRE 'counter' maxExpiration
|
||||
GET 'counter' INTO counter
|
||||
EXEC
|
||||
}
|
||||
}
|
||||
|
||||
# 4 - Handle
|
||||
if (blocked) {
|
||||
# Application-specific code goes here.
|
||||
# At this point blocked, counter, and timestamp are all accurate and synced to redis.
|
||||
# Caller can apply limits, calculate headers, log audit failure, or anything else.
|
||||
}
|
||||
```
|
||||
|
||||
## Notes, Resources, and Further Reading
|
||||
|
||||
* https://en.wikipedia.org/wiki/Leaky_bucket#As_a_meter
|
||||
* https://ietf-wg-httpapi.github.io/ratelimit-headers/darrelmiller-policyname/draft-ietf-httpapi-ratelimit-headers.txt
|
||||
* https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
* https://stackoverflow.com/a/16022625
|
|
@ -5,16 +5,13 @@
|
|||
|
||||
import { Inject, Injectable } from '@nestjs/common';
|
||||
import Redis from 'ioredis';
|
||||
import { LoggerService } from '@/core/LoggerService.js';
|
||||
import { TimeService } from '@/core/TimeService.js';
|
||||
import { EnvService } from '@/core/EnvService.js';
|
||||
import { BucketRateLimit, LegacyRateLimit, LimitInfo, RateLimit, hasMinLimit, isLegacyRateLimit, Keyed, hasMaxLimit, disabledLimitInfo, MaxLegacyLimit, MinLegacyLimit } from '@/misc/rate-limit-utils.js';
|
||||
import { DI } from '@/di-symbols.js';
|
||||
import type Logger from '@/logger.js';
|
||||
import { BucketRateLimit, LegacyRateLimit, LimitInfo, RateLimit, hasMinLimit, isLegacyRateLimit, Keyed } from '@/misc/rate-limit-utils.js';
|
||||
|
||||
@Injectable()
|
||||
export class SkRateLimiterService {
|
||||
private readonly logger: Logger;
|
||||
private readonly disabled: boolean;
|
||||
|
||||
constructor(
|
||||
|
@ -24,32 +21,31 @@ export class SkRateLimiterService {
|
|||
@Inject(DI.redis)
|
||||
private readonly redisClient: Redis.Redis,
|
||||
|
||||
@Inject(LoggerService)
|
||||
loggerService: LoggerService,
|
||||
|
||||
@Inject(EnvService)
|
||||
envService: EnvService,
|
||||
) {
|
||||
this.logger = loggerService.getLogger('limiter');
|
||||
this.disabled = envService.env.NODE_ENV !== 'production'; // TODO disable in TEST *only*
|
||||
this.disabled = envService.env.NODE_ENV === 'test';
|
||||
}
|
||||
|
||||
/**
|
||||
* Check & increment a rate limit
|
||||
* @param limit The limit definition
|
||||
* @param actor Client who is calling this limit
|
||||
* @param factor Scaling factor - smaller = larger limit (less restrictive)
|
||||
*/
|
||||
public async limit(limit: Keyed<RateLimit>, actor: string, factor = 1): Promise<LimitInfo> {
|
||||
if (this.disabled || factor === 0) {
|
||||
return {
|
||||
blocked: false,
|
||||
remaining: Number.MAX_SAFE_INTEGER,
|
||||
resetSec: 0,
|
||||
resetMs: 0,
|
||||
fullResetSec: 0,
|
||||
fullResetMs: 0,
|
||||
};
|
||||
return disabledLimitInfo;
|
||||
}
|
||||
|
||||
if (factor < 0) {
|
||||
throw new Error(`Rate limit factor is zero or negative: ${factor}`);
|
||||
}
|
||||
|
||||
return await this.tryLimit(limit, actor, factor);
|
||||
}
|
||||
|
||||
private async tryLimit(limit: Keyed<RateLimit>, actor: string, factor: number): Promise<LimitInfo> {
|
||||
if (isLegacyRateLimit(limit)) {
|
||||
return await this.limitLegacy(limit, actor, factor);
|
||||
} else {
|
||||
|
@ -58,141 +54,200 @@ export class SkRateLimiterService {
|
|||
}
|
||||
|
||||
private async limitLegacy(limit: Keyed<LegacyRateLimit>, actor: string, factor: number): Promise<LimitInfo> {
|
||||
const promises: Promise<LimitInfo | null>[] = [];
|
||||
|
||||
// The "min" limit - if present - is handled directly.
|
||||
if (hasMinLimit(limit)) {
|
||||
promises.push(
|
||||
this.limitMin(limit, actor, factor),
|
||||
);
|
||||
if (hasMaxLimit(limit)) {
|
||||
return await this.limitLegacyMinMax(limit, actor, factor);
|
||||
} else if (hasMinLimit(limit)) {
|
||||
return await this.limitLegacyMinOnly(limit, actor, factor);
|
||||
} else {
|
||||
return disabledLimitInfo;
|
||||
}
|
||||
|
||||
// Convert the "max" limit into a leaky bucket with 1 drip / second rate.
|
||||
if (limit.max != null && limit.duration != null) {
|
||||
promises.push(
|
||||
this.limitBucket({
|
||||
type: 'bucket',
|
||||
key: limit.key,
|
||||
size: limit.max,
|
||||
dripRate: Math.max(Math.round(limit.duration / limit.max), 1),
|
||||
}, actor, factor),
|
||||
);
|
||||
}
|
||||
|
||||
const [lim1, lim2] = await Promise.all(promises);
|
||||
return {
|
||||
blocked: (lim1?.blocked || lim2?.blocked) ?? false,
|
||||
remaining: Math.min(lim1?.remaining ?? Number.MAX_SAFE_INTEGER, lim2?.remaining ?? Number.MAX_SAFE_INTEGER),
|
||||
resetSec: Math.max(lim1?.resetSec ?? 0, lim2?.resetSec ?? 0),
|
||||
resetMs: Math.max(lim1?.resetMs ?? 0, lim2?.resetMs ?? 0),
|
||||
fullResetSec: Math.max(lim1?.fullResetSec ?? 0, lim2?.fullResetSec ?? 0),
|
||||
fullResetMs: Math.max(lim1?.fullResetMs ?? 0, lim2?.fullResetMs ?? 0),
|
||||
};
|
||||
}
|
||||
|
||||
private async limitMin(limit: Keyed<LegacyRateLimit> & { minInterval: number }, actor: string, factor: number): Promise<LimitInfo | null> {
|
||||
if (limit.minInterval === 0) return null;
|
||||
private async limitLegacyMinMax(limit: Keyed<MaxLegacyLimit>, actor: string, factor: number): Promise<LimitInfo> {
|
||||
if (limit.duration === 0) return disabledLimitInfo;
|
||||
if (limit.duration < 0) throw new Error(`Invalid rate limit ${limit.key}: duration is negative (${limit.duration})`);
|
||||
if (limit.max < 1) throw new Error(`Invalid rate limit ${limit.key}: max is less than 1 (${limit.max})`);
|
||||
|
||||
// Derive initial dripRate from minInterval OR duration/max.
|
||||
const initialDripRate = Math.max(limit.minInterval ?? Math.round(limit.duration / limit.max), 1);
|
||||
|
||||
// Calculate dripSize to reach max at exactly duration
|
||||
const dripSize = Math.max(Math.round(limit.max / (limit.duration / initialDripRate)), 1);
|
||||
|
||||
// Calculate final dripRate from dripSize and duration/max
|
||||
const dripRate = Math.max(Math.round(limit.duration / (limit.max / dripSize)), 1);
|
||||
|
||||
const bucketLimit: Keyed<BucketRateLimit> = {
|
||||
type: 'bucket',
|
||||
key: limit.key,
|
||||
size: limit.max,
|
||||
dripRate,
|
||||
dripSize,
|
||||
};
|
||||
return await this.limitBucket(bucketLimit, actor, factor);
|
||||
}
|
||||
|
||||
private async limitLegacyMinOnly(limit: Keyed<MinLegacyLimit>, actor: string, factor: number): Promise<LimitInfo> {
|
||||
if (limit.minInterval === 0) return disabledLimitInfo;
|
||||
if (limit.minInterval < 0) throw new Error(`Invalid rate limit ${limit.key}: minInterval is negative (${limit.minInterval})`);
|
||||
|
||||
const counter = await this.getLimitCounter(limit, actor, 'min');
|
||||
const minInterval = Math.max(Math.ceil(limit.minInterval * factor), 0);
|
||||
|
||||
// Update expiration
|
||||
if (counter.c > 0) {
|
||||
const isCleared = this.timeService.now - counter.t >= minInterval;
|
||||
if (isCleared) {
|
||||
counter.c = 0;
|
||||
}
|
||||
}
|
||||
|
||||
const blocked = counter.c > 0;
|
||||
if (!blocked) {
|
||||
counter.c++;
|
||||
counter.t = this.timeService.now;
|
||||
}
|
||||
|
||||
// Calculate limit status
|
||||
const resetMs = Math.max(Math.ceil(minInterval - (this.timeService.now - counter.t)), 0);
|
||||
const resetSec = Math.ceil(resetMs / 1000);
|
||||
const limitInfo: LimitInfo = { blocked, remaining: 0, resetSec, resetMs, fullResetSec: resetSec, fullResetMs: resetMs };
|
||||
|
||||
// Update the limit counter, but not if blocked
|
||||
if (!blocked) {
|
||||
// Don't await, or we will slow down the API.
|
||||
this.setLimitCounter(limit, actor, counter, resetSec, 'min')
|
||||
.catch(err => this.logger.error(`Failed to update limit ${limit.key}:min for ${actor}:`, err));
|
||||
}
|
||||
|
||||
return limitInfo;
|
||||
const dripRate = Math.max(Math.round(limit.minInterval), 1);
|
||||
const bucketLimit: Keyed<BucketRateLimit> = {
|
||||
type: 'bucket',
|
||||
key: limit.key,
|
||||
size: 1,
|
||||
dripRate,
|
||||
dripSize: 1,
|
||||
};
|
||||
return await this.limitBucket(bucketLimit, actor, factor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of Leaky Bucket rate limiting - see SkRateLimiterService.md for details.
|
||||
*/
|
||||
private async limitBucket(limit: Keyed<BucketRateLimit>, actor: string, factor: number): Promise<LimitInfo> {
|
||||
if (limit.size < 1) throw new Error(`Invalid rate limit ${limit.key}: size is less than 1 (${limit.size})`);
|
||||
if (limit.dripRate != null && limit.dripRate < 1) throw new Error(`Invalid rate limit ${limit.key}: dripRate is less than 1 (${limit.dripRate})`);
|
||||
if (limit.dripSize != null && limit.dripSize < 1) throw new Error(`Invalid rate limit ${limit.key}: dripSize is less than 1 (${limit.dripSize})`);
|
||||
|
||||
const counter = await this.getLimitCounter(limit, actor, 'bucket');
|
||||
// 0 - Calculate
|
||||
const now = this.timeService.now;
|
||||
const bucketSize = Math.max(Math.ceil(limit.size / factor), 1);
|
||||
const dripRate = Math.ceil(limit.dripRate ?? 1000);
|
||||
const dripSize = Math.ceil(limit.dripSize ?? 1);
|
||||
const expirationSec = Math.max(Math.ceil((dripRate * Math.ceil(bucketSize / dripSize)) / 1000), 1);
|
||||
|
||||
// Update drips
|
||||
if (counter.c > 0) {
|
||||
const dripsSinceLastTick = Math.floor((this.timeService.now - counter.t) / dripRate) * dripSize;
|
||||
counter.c = Math.max(counter.c - dripsSinceLastTick, 0);
|
||||
// 1 - Read
|
||||
const counterKey = createLimitKey(limit, actor, 'c');
|
||||
const timestampKey = createLimitKey(limit, actor, 't');
|
||||
const counter = await this.getLimitCounter(counterKey, timestampKey);
|
||||
|
||||
// 2 - Drip
|
||||
const dripsSinceLastTick = Math.floor((now - counter.timestamp) / dripRate) * dripSize;
|
||||
const deltaCounter = Math.min(dripsSinceLastTick, counter.counter);
|
||||
const deltaTimestamp = dripsSinceLastTick * dripRate;
|
||||
if (deltaCounter > 0) {
|
||||
// Execute the next drip(s)
|
||||
const results = await this.executeRedisMulti(
|
||||
['get', timestampKey],
|
||||
['incrby', timestampKey, deltaTimestamp],
|
||||
['expire', timestampKey, expirationSec],
|
||||
['get', timestampKey],
|
||||
['decrby', counterKey, deltaCounter],
|
||||
['expire', counterKey, expirationSec],
|
||||
['get', counterKey],
|
||||
);
|
||||
const expectedTimestamp = counter.timestamp;
|
||||
const canaryTimestamp = results[0] ? parseInt(results[0]) : 0;
|
||||
counter.timestamp = results[3] ? parseInt(results[3]) : 0;
|
||||
counter.counter = results[6] ? parseInt(results[6]) : 0;
|
||||
|
||||
// Check for a data collision and rollback
|
||||
if (canaryTimestamp !== expectedTimestamp) {
|
||||
const rollbackResults = await this.executeRedisMulti(
|
||||
['decrby', timestampKey, deltaTimestamp],
|
||||
['get', timestampKey],
|
||||
['incrby', counterKey, deltaCounter],
|
||||
['get', counterKey],
|
||||
);
|
||||
counter.timestamp = rollbackResults[1] ? parseInt(rollbackResults[1]) : 0;
|
||||
counter.counter = rollbackResults[3] ? parseInt(rollbackResults[3]) : 0;
|
||||
}
|
||||
}
|
||||
|
||||
const blocked = counter.c >= bucketSize;
|
||||
// 3 - Check
|
||||
const blocked = counter.counter >= bucketSize;
|
||||
if (!blocked) {
|
||||
counter.c++;
|
||||
counter.t = this.timeService.now;
|
||||
if (counter.timestamp === 0) {
|
||||
const results = await this.executeRedisMulti(
|
||||
['set', timestampKey, now],
|
||||
['expire', timestampKey, expirationSec],
|
||||
['incr', counterKey],
|
||||
['expire', counterKey, expirationSec],
|
||||
['get', counterKey],
|
||||
);
|
||||
counter.timestamp = now;
|
||||
counter.counter = results[4] ? parseInt(results[4]) : 0;
|
||||
} else {
|
||||
const results = await this.executeRedisMulti(
|
||||
['incr', counterKey],
|
||||
['expire', counterKey, expirationSec],
|
||||
['get', counterKey],
|
||||
);
|
||||
counter.counter = results[2] ? parseInt(results[2]) : 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate how much time is needed to free up a bucket slot
|
||||
const overflow = Math.max((counter.counter + 1) - bucketSize, 0);
|
||||
const dripsNeeded = Math.ceil(overflow / dripSize);
|
||||
const timeNeeded = Math.max((dripRate * dripsNeeded) - (this.timeService.now - counter.timestamp), 0);
|
||||
|
||||
// Calculate limit status
|
||||
const remaining = Math.max(bucketSize - counter.c, 0);
|
||||
const resetMs = remaining > 0 ? 0 : Math.max(dripRate - (this.timeService.now - counter.t), 0);
|
||||
const remaining = Math.max(bucketSize - counter.counter, 0);
|
||||
const resetMs = timeNeeded;
|
||||
const resetSec = Math.ceil(resetMs / 1000);
|
||||
const fullResetMs = Math.ceil(counter.c / dripSize) * dripRate;
|
||||
const fullResetMs = Math.ceil(counter.counter / dripSize) * dripRate;
|
||||
const fullResetSec = Math.ceil(fullResetMs / 1000);
|
||||
const limitInfo: LimitInfo = { blocked, remaining, resetSec, resetMs, fullResetSec, fullResetMs };
|
||||
return { blocked, remaining, resetSec, resetMs, fullResetSec, fullResetMs };
|
||||
}
|
||||
|
||||
// Update the limit counter, but not if blocked
|
||||
if (!blocked) {
|
||||
// Don't await, or we will slow down the API.
|
||||
this.setLimitCounter(limit, actor, counter, fullResetSec, 'bucket')
|
||||
.catch(err => this.logger.error(`Failed to update limit ${limit.key} for ${actor}:`, err));
|
||||
private async getLimitCounter(counterKey: string, timestampKey: string): Promise<LimitCounter> {
|
||||
const [counter, timestamp] = await this.executeRedisMulti(
|
||||
['get', counterKey],
|
||||
['get', timestampKey],
|
||||
);
|
||||
|
||||
return {
|
||||
counter: counter ? parseInt(counter) : 0,
|
||||
timestamp: timestamp ? parseInt(timestamp) : 0,
|
||||
};
|
||||
}
|
||||
|
||||
private async executeRedisMulti(...batch: RedisCommand[]): Promise<RedisResult[]> {
|
||||
const results = await this.redisClient.multi(batch).exec();
|
||||
|
||||
// Transaction conflict (retryable)
|
||||
if (!results) {
|
||||
throw new ConflictError('Redis error: transaction conflict');
|
||||
}
|
||||
|
||||
return limitInfo;
|
||||
}
|
||||
|
||||
private async getLimitCounter(limit: Keyed<RateLimit>, actor: string, subject: string): Promise<LimitCounter> {
|
||||
const key = createLimitKey(limit, actor, subject);
|
||||
|
||||
const value = await this.redisClient.get(key);
|
||||
if (value == null) {
|
||||
return { t: 0, c: 0 };
|
||||
// Transaction failed (fatal)
|
||||
if (results.length !== batch.length) {
|
||||
throw new Error('Redis error: failed to execute batch');
|
||||
}
|
||||
|
||||
return JSON.parse(value);
|
||||
}
|
||||
// Map responses
|
||||
const errors: Error[] = [];
|
||||
const responses: RedisResult[] = [];
|
||||
for (const [error, response] of results) {
|
||||
if (error) errors.push(error);
|
||||
responses.push(response as RedisResult);
|
||||
}
|
||||
|
||||
private async setLimitCounter(limit: Keyed<RateLimit>, actor: string, counter: LimitCounter, expiration: number, subject: string): Promise<void> {
|
||||
const key = createLimitKey(limit, actor, subject);
|
||||
const value = JSON.stringify(counter);
|
||||
const expirationSec = Math.max(expiration, 1);
|
||||
await this.redisClient.set(key, value, 'EX', expirationSec);
|
||||
// Command failed (fatal)
|
||||
if (errors.length > 0) {
|
||||
const errorMessages = errors
|
||||
.map((e, i) => `Error in command ${i}: ${e}`)
|
||||
.join('\', \'');
|
||||
throw new AggregateError(errors, `Redis error: failed to execute command(s): '${errorMessages}'`);
|
||||
}
|
||||
|
||||
return responses;
|
||||
}
|
||||
}
|
||||
|
||||
function createLimitKey(limit: Keyed<RateLimit>, actor: string, subject: string): string {
|
||||
return `rl_${actor}_${limit.key}_${subject}`;
|
||||
// Not correct, but good enough for the basic commands we use.
|
||||
type RedisResult = string | null;
|
||||
type RedisCommand = [command: string, ...args: unknown[]];
|
||||
|
||||
function createLimitKey(limit: Keyed<RateLimit>, actor: string, value: string): string {
|
||||
return `rl_${actor}_${limit.key}_${value}`;
|
||||
}
|
||||
|
||||
export interface LimitCounter {
|
||||
/** Timestamp */
|
||||
t: number;
|
||||
class ConflictError extends Error {}
|
||||
|
||||
/** Counter */
|
||||
c: number;
|
||||
interface LimitCounter {
|
||||
timestamp: number;
|
||||
counter: number;
|
||||
}
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -17,10 +17,11 @@ export const meta = {
|
|||
allowGet: true,
|
||||
cacheSec: 60 * 60,
|
||||
|
||||
// 10 calls per 5 seconds
|
||||
// Burst up to 100, then 2/sec average
|
||||
limit: {
|
||||
duration: 1000 * 5,
|
||||
max: 10,
|
||||
type: 'bucket',
|
||||
size: 100,
|
||||
dripRate: 500,
|
||||
},
|
||||
} as const;
|
||||
|
||||
|
|
|
@ -3,25 +3,19 @@
|
|||
* SPDX-License-Identifier: AGPL-3.0-only
|
||||
*/
|
||||
|
||||
import { KEYWORD } from 'color-convert/conversions.js';
|
||||
import { jest } from '@jest/globals';
|
||||
import type Redis from 'ioredis';
|
||||
import { LimitCounter, SkRateLimiterService } from '@/server/api/SkRateLimiterService.js';
|
||||
import { LoggerService } from '@/core/LoggerService.js';
|
||||
import { SkRateLimiterService } from '@/server/api/SkRateLimiterService.js';
|
||||
import { BucketRateLimit, Keyed, LegacyRateLimit } from '@/misc/rate-limit-utils.js';
|
||||
|
||||
/* eslint-disable @typescript-eslint/no-non-null-assertion */
|
||||
/* eslint-disable @typescript-eslint/no-unnecessary-condition */
|
||||
|
||||
describe(SkRateLimiterService, () => {
|
||||
let mockTimeService: { now: number, date: Date } = null!;
|
||||
let mockRedisGet: ((key: string) => string | null) | undefined = undefined;
|
||||
let mockRedisSet: ((args: unknown[]) => void) | undefined = undefined;
|
||||
let mockRedis: Array<(command: [string, ...unknown[]]) => [Error | null, unknown] | null> = null!;
|
||||
let mockRedisExec: (batch: [string, ...unknown[]][]) => Promise<[Error | null, unknown][] | null> = null!;
|
||||
let mockEnvironment: Record<string, string | undefined> = null!;
|
||||
let serviceUnderTest: () => SkRateLimiterService = null!;
|
||||
|
||||
let loggedMessages: { level: string, data: unknown[] }[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
mockTimeService = {
|
||||
now: 0,
|
||||
|
@ -30,15 +24,41 @@ describe(SkRateLimiterService, () => {
|
|||
},
|
||||
};
|
||||
|
||||
mockRedisGet = undefined;
|
||||
mockRedisSet = undefined;
|
||||
function callMockRedis(command: [string, ...unknown[]]) {
|
||||
const handlerResults = mockRedis.map(handler => handler(command));
|
||||
const finalResult = handlerResults.findLast(result => result != null);
|
||||
return finalResult ?? [null, null];
|
||||
}
|
||||
|
||||
// I apologize to anyone who tries to read this later 🥲
|
||||
mockRedis = [];
|
||||
mockRedisExec = (batch) => {
|
||||
const results: [Error | null, unknown][] = batch.map(command => {
|
||||
return callMockRedis(command);
|
||||
});
|
||||
return Promise.resolve(results);
|
||||
};
|
||||
const mockRedisClient = {
|
||||
get(key: string) {
|
||||
if (mockRedisGet) return Promise.resolve(mockRedisGet(key));
|
||||
else return Promise.resolve(null);
|
||||
watch(...args: unknown[]) {
|
||||
const result = callMockRedis(['watch', ...args]);
|
||||
return Promise.resolve(result[0] ?? result[1]);
|
||||
},
|
||||
set(...args: unknown[]): Promise<void> {
|
||||
if (mockRedisSet) mockRedisSet(args);
|
||||
get(...args: unknown[]) {
|
||||
const result = callMockRedis(['get', ...args]);
|
||||
return Promise.resolve(result[0] ?? result[1]);
|
||||
},
|
||||
set(...args: unknown[]) {
|
||||
const result = callMockRedis(['set', ...args]);
|
||||
return Promise.resolve(result[0] ?? result[1]);
|
||||
},
|
||||
multi(batch: [string, ...unknown[]][]) {
|
||||
return {
|
||||
exec() {
|
||||
return mockRedisExec(batch);
|
||||
},
|
||||
};
|
||||
},
|
||||
reset() {
|
||||
return Promise.resolve();
|
||||
},
|
||||
} as unknown as Redis.Redis;
|
||||
|
@ -49,89 +69,98 @@ describe(SkRateLimiterService, () => {
|
|||
env: mockEnvironment,
|
||||
};
|
||||
|
||||
loggedMessages = [];
|
||||
const mockLogService = {
|
||||
getLogger() {
|
||||
return {
|
||||
createSubLogger(context: string, color?: KEYWORD) {
|
||||
return mockLogService.getLogger(context, color);
|
||||
},
|
||||
error(...data: unknown[]) {
|
||||
loggedMessages.push({ level: 'error', data });
|
||||
},
|
||||
warn(...data: unknown[]) {
|
||||
loggedMessages.push({ level: 'warn', data });
|
||||
},
|
||||
succ(...data: unknown[]) {
|
||||
loggedMessages.push({ level: 'succ', data });
|
||||
},
|
||||
debug(...data: unknown[]) {
|
||||
loggedMessages.push({ level: 'debug', data });
|
||||
},
|
||||
info(...data: unknown[]) {
|
||||
loggedMessages.push({ level: 'info', data });
|
||||
},
|
||||
};
|
||||
},
|
||||
} as unknown as LoggerService;
|
||||
|
||||
let service: SkRateLimiterService | undefined = undefined;
|
||||
serviceUnderTest = () => {
|
||||
return service ??= new SkRateLimiterService(mockTimeService, mockRedisClient, mockLogService, mockEnvService);
|
||||
return service ??= new SkRateLimiterService(mockTimeService, mockRedisClient, mockEnvService);
|
||||
};
|
||||
});
|
||||
|
||||
function expectNoUnhandledErrors() {
|
||||
const unhandledErrors = loggedMessages.filter(m => m.level === 'error');
|
||||
if (unhandledErrors.length > 0) {
|
||||
throw new Error(`Test failed: got unhandled errors ${unhandledErrors.join('\n')}`);
|
||||
}
|
||||
}
|
||||
|
||||
describe('limit', () => {
|
||||
const actor = 'actor';
|
||||
const key = 'test';
|
||||
|
||||
let counter: LimitCounter | undefined = undefined;
|
||||
let minCounter: LimitCounter | undefined = undefined;
|
||||
let limitCounter: number | undefined = undefined;
|
||||
let limitTimestamp: number | undefined = undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
counter = undefined;
|
||||
minCounter = undefined;
|
||||
limitCounter = undefined;
|
||||
limitTimestamp = undefined;
|
||||
|
||||
mockRedisGet = (key: string) => {
|
||||
if (key === 'rl_actor_test_bucket' && counter) {
|
||||
return JSON.stringify(counter);
|
||||
mockRedis.push(([command, ...args]) => {
|
||||
if (command === 'get') {
|
||||
if (args[0] === 'rl_actor_test_c') {
|
||||
const data = limitCounter?.toString() ?? null;
|
||||
return [null, data];
|
||||
}
|
||||
if (args[0] === 'rl_actor_test_t') {
|
||||
const data = limitTimestamp?.toString() ?? null;
|
||||
return [null, data];
|
||||
}
|
||||
}
|
||||
|
||||
if (key === 'rl_actor_test_min' && minCounter) {
|
||||
return JSON.stringify(minCounter);
|
||||
if (command === 'set') {
|
||||
if (args[0] === 'rl_actor_test_c') {
|
||||
limitCounter = parseInt(args[1] as string);
|
||||
return [null, args[1]];
|
||||
}
|
||||
if (args[0] === 'rl_actor_test_t') {
|
||||
limitTimestamp = parseInt(args[1] as string);
|
||||
return [null, args[1]];
|
||||
}
|
||||
}
|
||||
|
||||
if (command === 'incr') {
|
||||
if (args[0] === 'rl_actor_test_c') {
|
||||
limitCounter = (limitCounter ?? 0) + 1;
|
||||
return [null, null];
|
||||
}
|
||||
if (args[0] === 'rl_actor_test_t') {
|
||||
limitTimestamp = (limitTimestamp ?? 0) + 1;
|
||||
return [null, null];
|
||||
}
|
||||
}
|
||||
|
||||
if (command === 'incrby') {
|
||||
if (args[0] === 'rl_actor_test_c') {
|
||||
limitCounter = (limitCounter ?? 0) + parseInt(args[1] as string);
|
||||
return [null, null];
|
||||
}
|
||||
if (args[0] === 'rl_actor_test_t') {
|
||||
limitTimestamp = (limitTimestamp ?? 0) + parseInt(args[1] as string);
|
||||
return [null, null];
|
||||
}
|
||||
}
|
||||
|
||||
if (command === 'decr') {
|
||||
if (args[0] === 'rl_actor_test_c') {
|
||||
limitCounter = (limitCounter ?? 0) - 1;
|
||||
return [null, null];
|
||||
}
|
||||
if (args[0] === 'rl_actor_test_t') {
|
||||
limitTimestamp = (limitTimestamp ?? 0) - 1;
|
||||
return [null, null];
|
||||
}
|
||||
}
|
||||
|
||||
if (command === 'decrby') {
|
||||
if (args[0] === 'rl_actor_test_c') {
|
||||
limitCounter = (limitCounter ?? 0) - parseInt(args[1] as string);
|
||||
return [null, null];
|
||||
}
|
||||
if (args[0] === 'rl_actor_test_t') {
|
||||
limitTimestamp = (limitTimestamp ?? 0) - parseInt(args[1] as string);
|
||||
return [null, null];
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
mockRedisSet = (args: unknown[]) => {
|
||||
const [key, value] = args;
|
||||
|
||||
if (key === 'rl_actor_test_bucket') {
|
||||
if (value == null) counter = undefined;
|
||||
else if (typeof(value) === 'string') counter = JSON.parse(value);
|
||||
else throw new Error('invalid redis call');
|
||||
}
|
||||
|
||||
if (key === 'rl_actor_test_min') {
|
||||
if (value == null) minCounter = undefined;
|
||||
else if (typeof(value) === 'string') minCounter = JSON.parse(value);
|
||||
else throw new Error('invalid redis call');
|
||||
}
|
||||
};
|
||||
});
|
||||
});
|
||||
|
||||
it('should bypass in non-production', async () => {
|
||||
it('should bypass in test environment', async () => {
|
||||
mockEnvironment.NODE_ENV = 'test';
|
||||
|
||||
const info = await serviceUnderTest().limit({ key: 'l', type: undefined, max: 0 }, 'actor');
|
||||
const info = await serviceUnderTest().limit({ key: 'l', type: undefined, max: 0 }, actor);
|
||||
|
||||
expect(info.blocked).toBeFalsy();
|
||||
expect(info.remaining).toBe(Number.MAX_SAFE_INTEGER);
|
||||
|
@ -158,15 +187,10 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.blocked).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should not error when allowed', async () => {
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expectNoUnhandledErrors();
|
||||
});
|
||||
|
||||
it('should return correct info when allowed', async () => {
|
||||
limit.size = 2;
|
||||
counter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -180,8 +204,7 @@ describe(SkRateLimiterService, () => {
|
|||
it('should increment counter when called', async () => {
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(counter).not.toBeUndefined();
|
||||
expect(counter?.c).toBe(1);
|
||||
expect(limitCounter).toBe(1);
|
||||
});
|
||||
|
||||
it('should set timestamp when called', async () => {
|
||||
|
@ -189,29 +212,28 @@ describe(SkRateLimiterService, () => {
|
|||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(counter).not.toBeUndefined();
|
||||
expect(counter?.t).toBe(1000);
|
||||
expect(limitTimestamp).toBe(1000);
|
||||
});
|
||||
|
||||
it('should decrement counter when dripRate has passed', async () => {
|
||||
counter = { c: 2, t: 0 };
|
||||
limitCounter = 2;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now = 2000;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(counter).not.toBeUndefined();
|
||||
expect(counter?.c).toBe(1); // 2 (starting) - 2 (2x1 drip) + 1 (call) = 1
|
||||
expect(limitCounter).toBe(1); // 2 (starting) - 2 (2x1 drip) + 1 (call) = 1
|
||||
});
|
||||
|
||||
it('should decrement counter by dripSize', async () => {
|
||||
counter = { c: 2, t: 0 };
|
||||
limitCounter = 2;
|
||||
limitTimestamp = 0;
|
||||
limit.dripSize = 2;
|
||||
mockTimeService.now = 1000;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(counter).not.toBeUndefined();
|
||||
expect(counter?.c).toBe(1); // 2 (starting) - 2 (1x2 drip) + 1 (call) = 1
|
||||
expect(limitCounter).toBe(1); // 2 (starting) - 2 (1x2 drip) + 1 (call) = 1
|
||||
});
|
||||
|
||||
it('should maintain counter between calls over time', async () => {
|
||||
|
@ -226,25 +248,13 @@ describe(SkRateLimiterService, () => {
|
|||
mockTimeService.now += 1000; // 2 - 1 = 1
|
||||
await serviceUnderTest().limit(limit, actor); // 1 + 1 = 2
|
||||
|
||||
expect(counter?.c).toBe(2);
|
||||
expect(counter?.t).toBe(3000);
|
||||
});
|
||||
|
||||
it('should log error and continue when update fails', async () => {
|
||||
mockRedisSet = () => {
|
||||
throw new Error('test error');
|
||||
};
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
const matchingError = loggedMessages
|
||||
.find(m => m.level === 'error' && m.data
|
||||
.some(d => typeof(d) === 'string' && d.includes('Failed to update limit')));
|
||||
expect(matchingError).toBeTruthy();
|
||||
expect(limitCounter).toBe(2);
|
||||
expect(limitTimestamp).toBe(3000);
|
||||
});
|
||||
|
||||
it('should block when bucket is filled', async () => {
|
||||
counter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -252,7 +262,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should calculate correct info when blocked', async () => {
|
||||
counter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -263,7 +274,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should allow when bucket is filled but should drip', async () => {
|
||||
counter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now = 1000;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
@ -272,7 +284,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should scale limit by factor', async () => {
|
||||
counter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const i1 = await serviceUnderTest().limit(limit, actor, 0.5); // 1 + 1 = 2
|
||||
const i2 = await serviceUnderTest().limit(limit, actor, 0.5); // 2 + 1 = 3
|
||||
|
@ -281,23 +294,39 @@ describe(SkRateLimiterService, () => {
|
|||
expect(i2.blocked).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should set key expiration', async () => {
|
||||
const mock = jest.fn(mockRedisSet);
|
||||
mockRedisSet = mock;
|
||||
it('should set counter expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(mock).toHaveBeenCalledWith(['rl_actor_test_bucket', '{"t":0,"c":1}', 'EX', 1]);
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 1]);
|
||||
});
|
||||
|
||||
it('should set timestamp expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 1]);
|
||||
});
|
||||
|
||||
it('should not increment when already blocked', async () => {
|
||||
counter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 100;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(counter?.c).toBe(1);
|
||||
expect(counter?.t).toBe(0);
|
||||
expect(limitCounter).toBe(1);
|
||||
expect(limitTimestamp).toBe(0);
|
||||
});
|
||||
|
||||
it('should skip if factor is zero', async () => {
|
||||
|
@ -384,6 +413,19 @@ describe(SkRateLimiterService, () => {
|
|||
|
||||
await expect(promise).rejects.toThrow(/dripSize is less than 1/);
|
||||
});
|
||||
|
||||
it('should apply correction if extra calls slip through', async () => {
|
||||
limitCounter = 2;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.blocked).toBeTruthy();
|
||||
expect(info.remaining).toBe(0);
|
||||
expect(info.resetMs).toBe(2000);
|
||||
expect(info.resetSec).toBe(2);
|
||||
expect(info.fullResetMs).toBe(2000);
|
||||
expect(info.fullResetSec).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('with min interval', () => {
|
||||
|
@ -403,12 +445,6 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.blocked).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should not error when allowed', async () => {
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expectNoUnhandledErrors();
|
||||
});
|
||||
|
||||
it('should calculate correct info when allowed', async () => {
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -422,8 +458,8 @@ describe(SkRateLimiterService, () => {
|
|||
it('should increment counter when called', async () => {
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(minCounter).not.toBeUndefined();
|
||||
expect(minCounter?.c).toBe(1);
|
||||
expect(limitCounter).not.toBeUndefined();
|
||||
expect(limitCounter).toBe(1);
|
||||
});
|
||||
|
||||
it('should set timestamp when called', async () => {
|
||||
|
@ -431,28 +467,19 @@ describe(SkRateLimiterService, () => {
|
|||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(minCounter).not.toBeUndefined();
|
||||
expect(minCounter?.t).toBe(1000);
|
||||
expect(limitCounter).not.toBeUndefined();
|
||||
expect(limitTimestamp).toBe(1000);
|
||||
});
|
||||
|
||||
it('should decrement counter when minInterval has passed', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now = 1000;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(minCounter).not.toBeUndefined();
|
||||
expect(minCounter?.c).toBe(1); // 1 (starting) - 1 (interval) + 1 (call) = 1
|
||||
});
|
||||
|
||||
it('should reset counter entirely', async () => {
|
||||
minCounter = { c: 2, t: 0 };
|
||||
mockTimeService.now = 1000;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(minCounter).not.toBeUndefined();
|
||||
expect(minCounter?.c).toBe(1); // 2 (starting) - 2 (interval) + 1 (call) = 1
|
||||
expect(limitCounter).not.toBeUndefined();
|
||||
expect(limitCounter).toBe(1); // 1 (starting) - 1 (interval) + 1 (call) = 1
|
||||
});
|
||||
|
||||
it('should maintain counter between calls over time', async () => {
|
||||
|
@ -463,27 +490,16 @@ describe(SkRateLimiterService, () => {
|
|||
await serviceUnderTest().limit(limit, actor); // blocked
|
||||
mockTimeService.now += 1000; // 1 - 1 = 0
|
||||
mockTimeService.now += 1000; // 0 - 1 = 0
|
||||
await serviceUnderTest().limit(limit, actor); // 0 + 1 = 1
|
||||
const info = await serviceUnderTest().limit(limit, actor); // 0 + 1 = 1
|
||||
|
||||
expect(minCounter?.c).toBe(1);
|
||||
expect(minCounter?.t).toBe(3000);
|
||||
});
|
||||
|
||||
it('should log error and continue when update fails', async () => {
|
||||
mockRedisSet = () => {
|
||||
throw new Error('test error');
|
||||
};
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
const matchingError = loggedMessages
|
||||
.find(m => m.level === 'error' && m.data
|
||||
.some(d => typeof(d) === 'string' && d.includes('Failed to update limit')));
|
||||
expect(matchingError).toBeTruthy();
|
||||
expect(info.blocked).toBeFalsy();
|
||||
expect(limitCounter).toBe(1);
|
||||
expect(limitTimestamp).toBe(3000);
|
||||
});
|
||||
|
||||
it('should block when interval exceeded', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -491,7 +507,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should calculate correct info when blocked', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -502,7 +519,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should allow when bucket is filled but interval has passed', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now = 1000;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
@ -511,7 +529,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should scale interval by factor', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 500;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor, 0.5);
|
||||
|
@ -519,23 +538,39 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.blocked).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should set key expiration', async () => {
|
||||
const mock = jest.fn(mockRedisSet);
|
||||
mockRedisSet = mock;
|
||||
it('should set counter expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(mock).toHaveBeenCalledWith(['rl_actor_test_min', '{"t":0,"c":1}', 'EX', 1]);
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 1]);
|
||||
});
|
||||
|
||||
it('should set timer expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 1]);
|
||||
});
|
||||
|
||||
it('should not increment when already blocked', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 100;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(minCounter?.c).toBe(1);
|
||||
expect(minCounter?.t).toBe(0);
|
||||
expect(limitCounter).toBe(1);
|
||||
expect(limitTimestamp).toBe(0);
|
||||
});
|
||||
|
||||
it('should skip if factor is zero', async () => {
|
||||
|
@ -567,6 +602,19 @@ describe(SkRateLimiterService, () => {
|
|||
|
||||
await expect(promise).rejects.toThrow(/minInterval is negative/);
|
||||
});
|
||||
|
||||
it('should apply correction if extra calls slip through', async () => {
|
||||
limitCounter = 2;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.blocked).toBeTruthy();
|
||||
expect(info.remaining).toBe(0);
|
||||
expect(info.resetMs).toBe(2000);
|
||||
expect(info.resetSec).toBe(2);
|
||||
expect(info.fullResetMs).toBe(2000);
|
||||
expect(info.fullResetSec).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('with legacy limit', () => {
|
||||
|
@ -587,16 +635,11 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.blocked).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should not error when allowed', async () => {
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expectNoUnhandledErrors();
|
||||
});
|
||||
|
||||
it('should infer dripRate from duration', async () => {
|
||||
limit.max = 10;
|
||||
limit.duration = 10000;
|
||||
counter = { c: 10, t: 0 };
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const i1 = await serviceUnderTest().limit(limit, actor);
|
||||
mockTimeService.now += 1000;
|
||||
|
@ -619,7 +662,8 @@ describe(SkRateLimiterService, () => {
|
|||
it('should calculate correct info when allowed', async () => {
|
||||
limit.max = 10;
|
||||
limit.duration = 10000;
|
||||
counter = { c: 10, t: 0 };
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 2000;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
@ -634,7 +678,8 @@ describe(SkRateLimiterService, () => {
|
|||
it('should calculate correct info when blocked', async () => {
|
||||
limit.max = 10;
|
||||
limit.duration = 10000;
|
||||
counter = { c: 10, t: 0 };
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -646,7 +691,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should allow when bucket is filled but interval has passed', async () => {
|
||||
counter = { c: 10, t: 0 };
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now = 1000;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
@ -655,37 +701,55 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should scale limit by factor', async () => {
|
||||
counter = { c: 10, t: 0 };
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor, 0.5); // 10 + 1 = 11
|
||||
|
||||
expect(info.blocked).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should set key expiration', async () => {
|
||||
const mock = jest.fn(mockRedisSet);
|
||||
mockRedisSet = mock;
|
||||
it('should set counter expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(mock).toHaveBeenCalledWith(['rl_actor_test_bucket', '{"t":0,"c":1}', 'EX', 1]);
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 1]);
|
||||
});
|
||||
|
||||
it('should set timestamp expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 1]);
|
||||
});
|
||||
|
||||
it('should not increment when already blocked', async () => {
|
||||
counter = { c: 1, t: 0 };
|
||||
limitCounter = 1;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 100;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(counter?.c).toBe(1);
|
||||
expect(counter?.t).toBe(0);
|
||||
expect(limitCounter).toBe(1);
|
||||
expect(limitTimestamp).toBe(0);
|
||||
});
|
||||
|
||||
it('should not allow dripRate to be lower than 0', async () => {
|
||||
// real-world case; taken from StreamingApiServerService
|
||||
limit.max = 4096;
|
||||
limit.duration = 2000;
|
||||
counter = { c: 4096, t: 0 };
|
||||
limitCounter = 4096;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const i1 = await serviceUnderTest().limit(limit, actor);
|
||||
mockTimeService.now = 1;
|
||||
|
@ -708,12 +772,21 @@ describe(SkRateLimiterService, () => {
|
|||
await expect(promise).rejects.toThrow(/factor is zero or negative/);
|
||||
});
|
||||
|
||||
it('should skip if duration is zero', async () => {
|
||||
limit.duration = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.blocked).toBeFalsy();
|
||||
expect(info.remaining).toBe(Number.MAX_SAFE_INTEGER);
|
||||
});
|
||||
|
||||
it('should throw if max is zero', async () => {
|
||||
limit.max = 0;
|
||||
|
||||
const promise = serviceUnderTest().limit(limit, actor);
|
||||
|
||||
await expect(promise).rejects.toThrow(/size is less than 1/);
|
||||
await expect(promise).rejects.toThrow(/max is less than 1/);
|
||||
});
|
||||
|
||||
it('should throw if max is negative', async () => {
|
||||
|
@ -721,7 +794,20 @@ describe(SkRateLimiterService, () => {
|
|||
|
||||
const promise = serviceUnderTest().limit(limit, actor);
|
||||
|
||||
await expect(promise).rejects.toThrow(/size is less than 1/);
|
||||
await expect(promise).rejects.toThrow(/max is less than 1/);
|
||||
});
|
||||
|
||||
it('should apply correction if extra calls slip through', async () => {
|
||||
limitCounter = 2;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.blocked).toBeTruthy();
|
||||
expect(info.remaining).toBe(0);
|
||||
expect(info.resetMs).toBe(2000);
|
||||
expect(info.resetSec).toBe(2);
|
||||
expect(info.fullResetMs).toBe(2000);
|
||||
expect(info.fullResetSec).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -732,7 +818,7 @@ describe(SkRateLimiterService, () => {
|
|||
limit = {
|
||||
type: undefined,
|
||||
key,
|
||||
max: 5,
|
||||
max: 10,
|
||||
duration: 5000,
|
||||
minInterval: 1000,
|
||||
};
|
||||
|
@ -744,22 +830,9 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.blocked).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should not error when allowed', async () => {
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expectNoUnhandledErrors();
|
||||
});
|
||||
|
||||
it('should block when limit exceeded', async () => {
|
||||
counter = { c: 5, t: 0 };
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.blocked).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should block when minInterval exceeded', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -767,19 +840,8 @@ describe(SkRateLimiterService, () => {
|
|||
});
|
||||
|
||||
it('should calculate correct info when allowed', async () => {
|
||||
counter = { c: 1, t: 0 };
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.remaining).toBe(0);
|
||||
expect(info.resetSec).toBe(1);
|
||||
expect(info.resetMs).toBe(1000);
|
||||
expect(info.fullResetSec).toBe(2);
|
||||
expect(info.fullResetMs).toBe(2000);
|
||||
});
|
||||
|
||||
it('should calculate correct info when blocked by limit', async () => {
|
||||
counter = { c: 5, t: 0 };
|
||||
limitCounter = 9;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
|
@ -790,20 +852,22 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.fullResetMs).toBe(5000);
|
||||
});
|
||||
|
||||
it('should calculate correct info when blocked by minInterval', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
it('should calculate correct info when blocked', async () => {
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.remaining).toBe(0);
|
||||
expect(info.resetSec).toBe(1);
|
||||
expect(info.resetMs).toBe(1000);
|
||||
expect(info.fullResetSec).toBe(1);
|
||||
expect(info.fullResetMs).toBe(1000);
|
||||
expect(info.fullResetSec).toBe(5);
|
||||
expect(info.fullResetMs).toBe(5000);
|
||||
});
|
||||
|
||||
it('should allow when counter is filled but interval has passed', async () => {
|
||||
counter = { c: 5, t: 0 };
|
||||
limitCounter = 5;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now = 1000;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
@ -811,18 +875,23 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.blocked).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should allow when minCounter is filled but interval has passed', async () => {
|
||||
minCounter = { c: 1, t: 0 };
|
||||
mockTimeService.now = 1000;
|
||||
it('should drip according to minInterval', async () => {
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 1000;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
const i1 = await serviceUnderTest().limit(limit, actor);
|
||||
const i2 = await serviceUnderTest().limit(limit, actor);
|
||||
const i3 = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.blocked).toBeFalsy();
|
||||
expect(i1.blocked).toBeFalsy();
|
||||
expect(i2.blocked).toBeFalsy();
|
||||
expect(i3.blocked).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should scale limit and interval by factor', async () => {
|
||||
counter = { c: 5, t: 0 };
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 5;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 500;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor, 0.5);
|
||||
|
@ -830,27 +899,52 @@ describe(SkRateLimiterService, () => {
|
|||
expect(info.blocked).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should set key expiration', async () => {
|
||||
const mock = jest.fn(mockRedisSet);
|
||||
mockRedisSet = mock;
|
||||
it('should set counter expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(mock).toHaveBeenCalledWith(['rl_actor_test_bucket', '{"t":0,"c":1}', 'EX', 1]);
|
||||
expect(mock).toHaveBeenCalledWith(['rl_actor_test_min', '{"t":0,"c":1}', 'EX', 1]);
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_c', 5]);
|
||||
});
|
||||
|
||||
it('should set timestamp expiration', async () => {
|
||||
const commands: unknown[][] = [];
|
||||
mockRedis.push(command => {
|
||||
commands.push(command);
|
||||
return null;
|
||||
});
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(commands).toContainEqual(['expire', 'rl_actor_test_t', 5]);
|
||||
});
|
||||
|
||||
it('should not increment when already blocked', async () => {
|
||||
counter = { c: 5, t: 0 };
|
||||
minCounter = { c: 1, t: 0 };
|
||||
limitCounter = 10;
|
||||
limitTimestamp = 0;
|
||||
mockTimeService.now += 100;
|
||||
|
||||
await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(counter?.c).toBe(5);
|
||||
expect(counter?.t).toBe(0);
|
||||
expect(minCounter?.c).toBe(1);
|
||||
expect(minCounter?.t).toBe(0);
|
||||
expect(limitCounter).toBe(10);
|
||||
expect(limitTimestamp).toBe(0);
|
||||
});
|
||||
|
||||
it('should apply correction if extra calls slip through', async () => {
|
||||
limitCounter = 12;
|
||||
|
||||
const info = await serviceUnderTest().limit(limit, actor);
|
||||
|
||||
expect(info.blocked).toBeTruthy();
|
||||
expect(info.remaining).toBe(0);
|
||||
expect(info.resetMs).toBe(2000);
|
||||
expect(info.resetSec).toBe(2);
|
||||
expect(info.fullResetMs).toBe(6000);
|
||||
expect(info.fullResetSec).toBe(6);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
Loading…
Reference in a new issue