[feature] add rate limit middleware (#741)

* feat: add rate limit middleware

* chore: update vendor dir

* chore: update readme with new dependency

* chore: add rate limit infos to swagger.md file

* refactor: add ipv6 mask limiter option

Add IPv6 CIDR /64 mask

* refactor: increase rate limit to 1000

Address https://github.com/superseriousbusiness/gotosocial/pull/741#discussion_r945584800

Co-authored-by: tobi <31960611+tsmethurst@users.noreply.github.com>
This commit is contained in:
nya1 2022-08-31 12:06:14 +02:00 committed by GitHub
commit bee8458a2d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
43 changed files with 4692 additions and 443 deletions

5
vendor/github.com/ulule/limiter/v3/.dockerignore generated vendored Normal file
View file

@ -0,0 +1,5 @@
# Circle CI directory
.circleci
# Example directory
examples

25
vendor/github.com/ulule/limiter/v3/.editorconfig generated vendored Normal file
View file

@ -0,0 +1,25 @@
root = true
[*]
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
insert_final_newline = true
charset = utf-8
[*.{yml,yaml}]
indent_size = 2
[*.go]
indent_size = 8
indent_style = tab
[*.json]
indent_size = 4
indent_style = space
[Makefile]
indent_style = tab
indent_size = 4

2
vendor/github.com/ulule/limiter/v3/.gitignore generated vendored Normal file
View file

@ -0,0 +1,2 @@
/vendor
.idea

79
vendor/github.com/ulule/limiter/v3/.golangci.yml generated vendored Normal file
View file

@ -0,0 +1,79 @@
run:
concurrency: 4
deadline: 1m
issues-exit-code: 1
tests: true
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
linters-settings:
errcheck:
check-type-assertions: false
check-blank: false
govet:
check-shadowing: false
use-installed-packages: false
golint:
min-confidence: 0.8
gofmt:
simplify: true
gocyclo:
min-complexity: 10
maligned:
suggest-new: true
dupl:
threshold: 80
goconst:
min-len: 3
min-occurrences: 3
misspell:
locale: US
lll:
line-length: 140
unused:
check-exported: false
unparam:
algo: cha
check-exported: false
nakedret:
max-func-lines: 30
linters:
enable:
- megacheck
- govet
- errcheck
- gas
- structcheck
- varcheck
- ineffassign
- deadcode
- typecheck
- unconvert
- gocyclo
- gofmt
- misspell
- lll
- nakedret
enable-all: false
disable:
- depguard
- prealloc
- dupl
- maligned
disable-all: false
issues:
exclude-use-default: false
max-per-linter: 1024
max-same: 1024
exclude:
- "G304"
- "G101"
- "G104"

5
vendor/github.com/ulule/limiter/v3/AUTHORS generated vendored Normal file
View file

@ -0,0 +1,5 @@
Primary contributors:
Gilles FABIO <gilles@ulule.com>
Florent MESSA <florent@ulule.com>
Thomas LE ROUX <thomas@leroux.io>

21
vendor/github.com/ulule/limiter/v3/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015-2018 Ulule
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

7
vendor/github.com/ulule/limiter/v3/Makefile generated vendored Normal file
View file

@ -0,0 +1,7 @@
.PHONY: test lint
test:
@(scripts/test)
lint:
@(scripts/lint)

255
vendor/github.com/ulule/limiter/v3/README.md generated vendored Normal file
View file

@ -0,0 +1,255 @@
# Limiter
[![Documentation][godoc-img]][godoc-url]
![License][license-img]
[![Build Status][circle-img]][circle-url]
[![Go Report Card][goreport-img]][goreport-url]
_Dead simple rate limit middleware for Go._
- Simple API
- "Store" approach for backend
- Redis support (but not tied too)
- Middlewares: HTTP, [FastHTTP][6] and [Gin][4]
## Installation
Using [Go Modules](https://github.com/golang/go/wiki/Modules)
```bash
$ go get github.com/ulule/limiter/v3@v3.10.0
```
## Usage
In five steps:
- Create a `limiter.Rate` instance _(the number of requests per period)_
- Create a `limiter.Store` instance _(see [Redis](https://github.com/ulule/limiter/blob/master/drivers/store/redis/store.go) or [In-Memory](https://github.com/ulule/limiter/blob/master/drivers/store/memory/store.go))_
- Create a `limiter.Limiter` instance that takes store and rate instances as arguments
- Create a middleware instance using the middleware of your choice
- Give the limiter instance to your middleware initializer
**Example:**
```go
// Create a rate with the given limit (number of requests) for the given
// period (a time.Duration of your choice).
import "github.com/ulule/limiter/v3"
rate := limiter.Rate{
Period: 1 * time.Hour,
Limit: 1000,
}
// You can also use the simplified format "<limit>-<period>"", with the given
// periods:
//
// * "S": second
// * "M": minute
// * "H": hour
// * "D": day
//
// Examples:
//
// * 5 reqs/second: "5-S"
// * 10 reqs/minute: "10-M"
// * 1000 reqs/hour: "1000-H"
// * 2000 reqs/day: "2000-D"
//
rate, err := limiter.NewRateFromFormatted("1000-H")
if err != nil {
panic(err)
}
// Then, create a store. Here, we use the bundled Redis store. Any store
// compliant to limiter.Store interface will do the job. The defaults are
// "limiter" as Redis key prefix and a maximum of 3 retries for the key under
// race condition.
import "github.com/ulule/limiter/v3/drivers/store/redis"
store, err := redis.NewStore(client)
if err != nil {
panic(err)
}
// Alternatively, you can pass options to the store with the "WithOptions"
// function. For example, for Redis store:
import "github.com/ulule/limiter/v3/drivers/store/redis"
store, err := redis.NewStoreWithOptions(pool, limiter.StoreOptions{
Prefix: "your_own_prefix",
})
if err != nil {
panic(err)
}
// Or use a in-memory store with a goroutine which clears expired keys.
import "github.com/ulule/limiter/v3/drivers/store/memory"
store := memory.NewStore()
// Then, create the limiter instance which takes the store and the rate as arguments.
// Now, you can give this instance to any supported middleware.
instance := limiter.New(store, rate)
// Alternatively, you can pass options to the limiter instance with several options.
instance := limiter.New(store, rate, limiter.WithClientIPHeader("True-Client-IP"), limiter.WithIPv6Mask(mask))
// Finally, give the limiter instance to your middleware initializer.
import "github.com/ulule/limiter/v3/drivers/middleware/stdlib"
middleware := stdlib.NewMiddleware(instance)
```
See middleware examples:
- [HTTP](https://github.com/ulule/limiter-examples/tree/master/http/main.go)
- [Gin](https://github.com/ulule/limiter-examples/tree/master/gin/main.go)
- [Beego](https://github.com/ulule/limiter-examples/blob/master//beego/main.go)
- [Chi](https://github.com/ulule/limiter-examples/tree/master/chi/main.go)
- [Echo](https://github.com/ulule/limiter-examples/tree/master/echo/main.go)
- [Fasthttp](https://github.com/ulule/limiter-examples/tree/master/fasthttp/main.go)
## How it works
The ip address of the request is used as a key in the store.
If the key does not exist in the store we set a default
value with an expiration period.
You will find two stores:
- Redis: rely on [TTL](http://redis.io/commands/ttl) and incrementing the rate limit on each request.
- In-Memory: rely on a fork of [go-cache](https://github.com/patrickmn/go-cache) with a goroutine to clear expired keys using a default interval.
When the limit is reached, a `429` HTTP status code is sent.
## Limiter behind a reverse proxy
### Introduction
If your limiter is behind a reverse proxy, it could be difficult to obtain the "real" client IP.
Some reverse proxies, like AWS ALB, lets all header values through that it doesn't set itself.
Like for example, `True-Client-IP` and `X-Real-IP`.
Similarly, `X-Forwarded-For` is a list of comma-separated IPs that gets appended to by each traversed proxy.
The idea is that the first IP _(added by the first proxy)_ is the true client IP. Each subsequent IP is another proxy along the path.
An attacker can spoof either of those headers, which could be reported as a client IP.
By default, limiter doesn't trust any of those headers: you have to explicitly enable them in order to use them.
If you enable them, **you must always be aware** that any header added by any _(reverse)_ proxy not controlled
by you **are completely unreliable.**
### X-Forwarded-For
For example, if you make this request to your load balancer:
```bash
curl -X POST https://example.com/login -H "X-Forwarded-For: 1.2.3.4, 11.22.33.44"
```
And your server behind the load balancer obtain this:
```
X-Forwarded-For: 1.2.3.4, 11.22.33.44, <actual client IP>
```
That's mean you can't use `X-Forwarded-For` header, because it's **unreliable** and **untrustworthy**.
So keep `TrustForwardHeader` disabled in your limiter option.
However, if you have configured your reverse proxy to always remove/overwrite `X-Forwarded-For` and/or `X-Real-IP` headers
so that if you execute this _(same)_ request:
```bash
curl -X POST https://example.com/login -H "X-Forwarded-For: 1.2.3.4, 11.22.33.44"
```
And your server behind the load balancer obtain this:
```
X-Forwarded-For: <actual client IP>
```
Then, you can enable `TrustForwardHeader` in your limiter option.
### Custom header
Many CDN and Cloud providers add a custom header to define the client IP. Like for example, this non exhaustive list:
* `Fastly-Client-IP` from Fastly
* `CF-Connecting-IP` from Cloudflare
* `X-Azure-ClientIP` from Azure
You can use these headers using `ClientIPHeader` in your limiter option.
### None of the above
If none of the above solution are working, please use a custom `KeyGetter` in your middleware.
You can use this excellent article to help you define the best strategy depending on your network topology and your security need:
https://adam-p.ca/blog/2022/03/x-forwarded-for/
If you have any idea/suggestions on how we could simplify this steps, don't hesitate to raise an issue.
We would like some feedback on how we could implement this steps in the Limiter API.
Thank you.
## Why Yet Another Package
You could ask us: why yet another rate limit package?
Because existing packages did not suit our needs.
We tried a lot of alternatives:
1. [Throttled][1]. This package uses the generic cell-rate algorithm. To cite the
documentation: _"The algorithm has been slightly modified from its usual form to
support limiting with an additional quantity parameter, such as for limiting the
number of bytes uploaded"_. It is brillant in term of algorithm but
documentation is quite unclear at the moment, we don't need _burst_ feature for
now, impossible to get a correct `After-Retry` (when limit exceeds, we can still
make a few requests, because of the max burst) and it only supports `http.Handler`
middleware (we use [Gin][4]). Currently, we only need to return `429`
and `X-Ratelimit-*` headers for `n reqs/duration`.
2. [Speedbump][3]. Good package but maybe too lightweight. No `Reset` support,
only one middleware for [Gin][4] framework and too Redis-coupled. We rather
prefer to use a "store" approach.
3. [Tollbooth][5]. Good one too but does both too much and too little. It limits by
remote IP, path, methods, custom headers and basic auth usernames... but does not
provide any Redis support (only _in-memory_) and a ready-to-go middleware that sets
`X-Ratelimit-*` headers. `tollbooth.LimitByRequest(limiter, r)` only returns an HTTP
code.
4. [ratelimit][2]. Probably the closer to our needs but, once again, too
lightweight, no middleware available and not active (last commit was in August
2014). Some parts of code (Redis) comes from this project. It should deserve much
more love.
There are other many packages on GitHub but most are either too lightweight, too
old (only support old Go versions) or unmaintained. So that's why we decided to
create yet another one.
## Contributing
- Ping us on twitter:
- [@oibafsellig](https://twitter.com/oibafsellig)
- [@thoas](https://twitter.com/thoas)
- [@novln\_](https://twitter.com/novln_)
- Fork the [project](https://github.com/ulule/limiter)
- Fix [bugs](https://github.com/ulule/limiter/issues)
Don't hesitate ;)
[1]: https://github.com/throttled/throttled
[2]: https://github.com/r8k/ratelimit
[3]: https://github.com/etcinit/speedbump
[4]: https://github.com/gin-gonic/gin
[5]: https://github.com/didip/tollbooth
[6]: https://github.com/valyala/fasthttp
[godoc-url]: https://pkg.go.dev/github.com/ulule/limiter/v3
[godoc-img]: https://pkg.go.dev/badge/github.com/ulule/limiter/v3
[license-img]: https://img.shields.io/badge/license-MIT-blue.svg
[goreport-url]: https://goreportcard.com/report/github.com/ulule/limiter
[goreport-img]: https://goreportcard.com/badge/github.com/ulule/limiter
[circle-url]: https://circleci.com/gh/ulule/limiter/tree/master
[circle-img]: https://circleci.com/gh/ulule/limiter.svg?style=shield&circle-token=baf62ec320dd871b3a4a7e67fa99530fbc877c99

15
vendor/github.com/ulule/limiter/v3/defaults.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package limiter
import "time"
const (
// DefaultPrefix is the default prefix to use for the key in the store.
DefaultPrefix = "limiter"
// DefaultMaxRetry is the default maximum number of key retries under
// race condition (mainly used with database-based stores).
DefaultMaxRetry = 3
// DefaultCleanUpInterval is the default time duration for cleanup.
DefaultCleanUpInterval = 30 * time.Second
)

View file

@ -0,0 +1,65 @@
package gin
import (
"strconv"
"github.com/gin-gonic/gin"
"github.com/ulule/limiter/v3"
)
// Middleware is the middleware for gin.
type Middleware struct {
Limiter *limiter.Limiter
OnError ErrorHandler
OnLimitReached LimitReachedHandler
KeyGetter KeyGetter
ExcludedKey func(string) bool
}
// NewMiddleware return a new instance of a gin middleware.
func NewMiddleware(limiter *limiter.Limiter, options ...Option) gin.HandlerFunc {
middleware := &Middleware{
Limiter: limiter,
OnError: DefaultErrorHandler,
OnLimitReached: DefaultLimitReachedHandler,
KeyGetter: DefaultKeyGetter,
ExcludedKey: nil,
}
for _, option := range options {
option.apply(middleware)
}
return func(ctx *gin.Context) {
middleware.Handle(ctx)
}
}
// Handle gin request.
func (middleware *Middleware) Handle(c *gin.Context) {
key := middleware.KeyGetter(c)
if middleware.ExcludedKey != nil && middleware.ExcludedKey(key) {
c.Next()
return
}
context, err := middleware.Limiter.Get(c, key)
if err != nil {
middleware.OnError(c, err)
c.Abort()
return
}
c.Header("X-RateLimit-Limit", strconv.FormatInt(context.Limit, 10))
c.Header("X-RateLimit-Remaining", strconv.FormatInt(context.Remaining, 10))
c.Header("X-RateLimit-Reset", strconv.FormatInt(context.Reset, 10))
if context.Reached {
middleware.OnLimitReached(c)
c.Abort()
return
}
c.Next()
}

View file

@ -0,0 +1,71 @@
package gin
import (
"net/http"
"github.com/gin-gonic/gin"
)
// Option is used to define Middleware configuration.
type Option interface {
apply(*Middleware)
}
type option func(*Middleware)
func (o option) apply(middleware *Middleware) {
o(middleware)
}
// ErrorHandler is an handler used to inform when an error has occurred.
type ErrorHandler func(c *gin.Context, err error)
// WithErrorHandler will configure the Middleware to use the given ErrorHandler.
func WithErrorHandler(handler ErrorHandler) Option {
return option(func(middleware *Middleware) {
middleware.OnError = handler
})
}
// DefaultErrorHandler is the default ErrorHandler used by a new Middleware.
func DefaultErrorHandler(c *gin.Context, err error) {
panic(err)
}
// LimitReachedHandler is an handler used to inform when the limit has exceeded.
type LimitReachedHandler func(c *gin.Context)
// WithLimitReachedHandler will configure the Middleware to use the given LimitReachedHandler.
func WithLimitReachedHandler(handler LimitReachedHandler) Option {
return option(func(middleware *Middleware) {
middleware.OnLimitReached = handler
})
}
// DefaultLimitReachedHandler is the default LimitReachedHandler used by a new Middleware.
func DefaultLimitReachedHandler(c *gin.Context) {
c.String(http.StatusTooManyRequests, "Limit exceeded")
}
// KeyGetter will define the rate limiter key given the gin Context.
type KeyGetter func(c *gin.Context) string
// WithKeyGetter will configure the Middleware to use the given KeyGetter.
func WithKeyGetter(handler KeyGetter) Option {
return option(func(middleware *Middleware) {
middleware.KeyGetter = handler
})
}
// DefaultKeyGetter is the default KeyGetter used by a new Middleware.
// It returns the Client IP address.
func DefaultKeyGetter(c *gin.Context) string {
return c.ClientIP()
}
// WithExcludedKey will configure the Middleware to ignore key(s) using the given function.
func WithExcludedKey(handler func(string) bool) Option {
return option(func(middleware *Middleware) {
middleware.ExcludedKey = handler
})
}

View file

@ -0,0 +1,28 @@
package common
import (
"time"
"github.com/ulule/limiter/v3"
)
// GetContextFromState generate a new limiter.Context from given state.
func GetContextFromState(now time.Time, rate limiter.Rate, expiration time.Time, count int64) limiter.Context {
limit := rate.Limit
remaining := int64(0)
reached := true
if count <= limit {
remaining = limit - count
reached = false
}
reset := expiration.Unix()
return limiter.Context{
Limit: limit,
Remaining: remaining,
Reset: reset,
Reached: reached,
}
}

View file

@ -0,0 +1,240 @@
package memory
import (
"runtime"
"sync"
"time"
)
// Forked from https://github.com/patrickmn/go-cache
// CacheWrapper is used to ensure that the underlying cleaner goroutine used to clean expired keys will not prevent
// Cache from being garbage collected.
type CacheWrapper struct {
*Cache
}
// A cleaner will periodically delete expired keys from cache.
type cleaner struct {
interval time.Duration
stop chan bool
}
// Run will periodically delete expired keys from given cache until GC notify that it should stop.
func (cleaner *cleaner) Run(cache *Cache) {
ticker := time.NewTicker(cleaner.interval)
for {
select {
case <-ticker.C:
cache.Clean()
case <-cleaner.stop:
ticker.Stop()
return
}
}
}
// stopCleaner is a callback from GC used to stop cleaner goroutine.
func stopCleaner(wrapper *CacheWrapper) {
wrapper.cleaner.stop <- true
wrapper.cleaner = nil
}
// startCleaner will start a cleaner goroutine for given cache.
func startCleaner(cache *Cache, interval time.Duration) {
cleaner := &cleaner{
interval: interval,
stop: make(chan bool),
}
cache.cleaner = cleaner
go cleaner.Run(cache)
}
// Counter is a simple counter with an expiration.
type Counter struct {
mutex sync.RWMutex
value int64
expiration int64
}
// Value returns the counter current value.
func (counter *Counter) Value() int64 {
counter.mutex.RLock()
defer counter.mutex.RUnlock()
return counter.value
}
// Expiration returns the counter expiration.
func (counter *Counter) Expiration() int64 {
counter.mutex.RLock()
defer counter.mutex.RUnlock()
return counter.expiration
}
// Expired returns true if the counter has expired.
func (counter *Counter) Expired() bool {
counter.mutex.RLock()
defer counter.mutex.RUnlock()
return counter.expiration == 0 || time.Now().UnixNano() > counter.expiration
}
// Load returns the value and the expiration of this counter.
// If the counter is expired, it will use the given expiration.
func (counter *Counter) Load(expiration int64) (int64, int64) {
counter.mutex.RLock()
defer counter.mutex.RUnlock()
if counter.expiration == 0 || time.Now().UnixNano() > counter.expiration {
return 0, expiration
}
return counter.value, counter.expiration
}
// Increment increments given value on this counter.
// If the counter is expired, it will use the given expiration.
// It returns its current value and expiration.
func (counter *Counter) Increment(value int64, expiration int64) (int64, int64) {
counter.mutex.Lock()
defer counter.mutex.Unlock()
if counter.expiration == 0 || time.Now().UnixNano() > counter.expiration {
counter.value = value
counter.expiration = expiration
return counter.value, counter.expiration
}
counter.value += value
return counter.value, counter.expiration
}
// Cache contains a collection of counters.
type Cache struct {
counters sync.Map
cleaner *cleaner
}
// NewCache returns a new cache.
func NewCache(cleanInterval time.Duration) *CacheWrapper {
cache := &Cache{}
wrapper := &CacheWrapper{Cache: cache}
if cleanInterval > 0 {
startCleaner(cache, cleanInterval)
runtime.SetFinalizer(wrapper, stopCleaner)
}
return wrapper
}
// LoadOrStore returns the existing counter for the key if present.
// Otherwise, it stores and returns the given counter.
// The loaded result is true if the counter was loaded, false if stored.
func (cache *Cache) LoadOrStore(key string, counter *Counter) (*Counter, bool) {
val, loaded := cache.counters.LoadOrStore(key, counter)
if val == nil {
return counter, false
}
actual := val.(*Counter)
return actual, loaded
}
// Load returns the counter stored in the map for a key, or nil if no counter is present.
// The ok result indicates whether counter was found in the map.
func (cache *Cache) Load(key string) (*Counter, bool) {
val, ok := cache.counters.Load(key)
if val == nil || !ok {
return nil, false
}
actual := val.(*Counter)
return actual, true
}
// Store sets the counter for a key.
func (cache *Cache) Store(key string, counter *Counter) {
cache.counters.Store(key, counter)
}
// Delete deletes the value for a key.
func (cache *Cache) Delete(key string) {
cache.counters.Delete(key)
}
// Range calls handler sequentially for each key and value present in the cache.
// If handler returns false, range stops the iteration.
func (cache *Cache) Range(handler func(key string, counter *Counter)) {
cache.counters.Range(func(k interface{}, v interface{}) bool {
if v == nil {
return true
}
key := k.(string)
counter := v.(*Counter)
handler(key, counter)
return true
})
}
// Increment increments given value on key.
// If key is undefined or expired, it will create it.
func (cache *Cache) Increment(key string, value int64, duration time.Duration) (int64, time.Time) {
expiration := time.Now().Add(duration).UnixNano()
// If counter is in cache, try to load it first.
counter, loaded := cache.Load(key)
if loaded {
value, expiration = counter.Increment(value, expiration)
return value, time.Unix(0, expiration)
}
// If it's not in cache, try to atomically create it.
// We do that in two step to reduce memory allocation.
counter, loaded = cache.LoadOrStore(key, &Counter{
mutex: sync.RWMutex{},
value: value,
expiration: expiration,
})
if loaded {
value, expiration = counter.Increment(value, expiration)
return value, time.Unix(0, expiration)
}
// Otherwise, it has been created, return given value.
return value, time.Unix(0, expiration)
}
// Get returns key's value and expiration.
func (cache *Cache) Get(key string, duration time.Duration) (int64, time.Time) {
expiration := time.Now().Add(duration).UnixNano()
counter, ok := cache.Load(key)
if !ok {
return 0, time.Unix(0, expiration)
}
value, expiration := counter.Load(expiration)
return value, time.Unix(0, expiration)
}
// Clean will deleted any expired keys.
func (cache *Cache) Clean() {
cache.Range(func(key string, counter *Counter) {
if counter.Expired() {
cache.Delete(key)
}
})
}
// Reset changes the key's value and resets the expiration.
func (cache *Cache) Reset(key string, duration time.Duration) (int64, time.Time) {
cache.Delete(key)
expiration := time.Now().Add(duration).UnixNano()
return 0, time.Unix(0, expiration)
}

View file

@ -0,0 +1,82 @@
package memory
import (
"context"
"time"
"github.com/ulule/limiter/v3"
"github.com/ulule/limiter/v3/drivers/store/common"
"github.com/ulule/limiter/v3/internal/bytebuffer"
)
// Store is the in-memory store.
type Store struct {
// Prefix used for the key.
Prefix string
// cache used to store values in-memory.
cache *CacheWrapper
}
// NewStore creates a new instance of memory store with defaults.
func NewStore() limiter.Store {
return NewStoreWithOptions(limiter.StoreOptions{
Prefix: limiter.DefaultPrefix,
CleanUpInterval: limiter.DefaultCleanUpInterval,
})
}
// NewStoreWithOptions creates a new instance of memory store with options.
func NewStoreWithOptions(options limiter.StoreOptions) limiter.Store {
return &Store{
Prefix: options.Prefix,
cache: NewCache(options.CleanUpInterval),
}
}
// Get returns the limit for given identifier.
func (store *Store) Get(ctx context.Context, key string, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
count, expiration := store.cache.Increment(buffer.String(), 1, rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, count)
return lctx, nil
}
// Increment increments the limit by given count & returns the new limit value for given identifier.
func (store *Store) Increment(ctx context.Context, key string, count int64, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
newCount, expiration := store.cache.Increment(buffer.String(), count, rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, newCount)
return lctx, nil
}
// Peek returns the limit for given identifier, without modification on current values.
func (store *Store) Peek(ctx context.Context, key string, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
count, expiration := store.cache.Get(buffer.String(), rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, count)
return lctx, nil
}
// Reset returns the limit for given identifier.
func (store *Store) Reset(ctx context.Context, key string, rate limiter.Rate) (limiter.Context, error) {
buffer := bytebuffer.New()
defer buffer.Close()
buffer.Concat(store.Prefix, ":", key)
count, expiration := store.cache.Reset(buffer.String(), rate.Period)
lctx := common.GetContextFromState(time.Now(), rate, expiration, count)
return lctx, nil
}

View file

@ -0,0 +1,58 @@
package bytebuffer
import (
"sync"
"unsafe"
)
// ByteBuffer is a wrapper around a slice to reduce memory allocation while handling blob of data.
type ByteBuffer struct {
blob []byte
}
// New creates a new ByteBuffer instance.
func New() *ByteBuffer {
entry := bufferPool.Get().(*ByteBuffer)
entry.blob = entry.blob[:0]
return entry
}
// Bytes returns the content buffer.
func (buffer *ByteBuffer) Bytes() []byte {
return buffer.blob
}
// String returns the content buffer.
func (buffer *ByteBuffer) String() string {
// Copied from strings.(*Builder).String
return *(*string)(unsafe.Pointer(&buffer.blob)) // nolint: gosec
}
// Concat appends given arguments to blob content
func (buffer *ByteBuffer) Concat(args ...string) {
for i := range args {
buffer.blob = append(buffer.blob, args[i]...)
}
}
// Close recycles underlying resources of encoder.
func (buffer *ByteBuffer) Close() {
// Proper usage of a sync.Pool requires each entry to have approximately
// the same memory cost. To obtain this property when the stored type
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
// to place back in the pool.
//
// See https://golang.org/issue/23199
if buffer != nil && cap(buffer.blob) < (1<<16) {
bufferPool.Put(buffer)
}
}
// A byte buffer pool to reduce memory allocation pressure.
var bufferPool = &sync.Pool{
New: func() interface{} {
return &ByteBuffer{
blob: make([]byte, 0, 1024),
}
},
}

65
vendor/github.com/ulule/limiter/v3/limiter.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
package limiter
import (
"context"
)
// -----------------------------------------------------------------
// Context
// -----------------------------------------------------------------
// Context is the limit context.
type Context struct {
Limit int64
Remaining int64
Reset int64
Reached bool
}
// -----------------------------------------------------------------
// Limiter
// -----------------------------------------------------------------
// Limiter is the limiter instance.
type Limiter struct {
Store Store
Rate Rate
Options Options
}
// New returns an instance of Limiter.
func New(store Store, rate Rate, options ...Option) *Limiter {
opt := Options{
IPv4Mask: DefaultIPv4Mask,
IPv6Mask: DefaultIPv6Mask,
TrustForwardHeader: false,
}
for _, o := range options {
o(&opt)
}
return &Limiter{
Store: store,
Rate: rate,
Options: opt,
}
}
// Get returns the limit for given identifier.
func (limiter *Limiter) Get(ctx context.Context, key string) (Context, error) {
return limiter.Store.Get(ctx, key, limiter.Rate)
}
// Peek returns the limit for given identifier, without modification on current values.
func (limiter *Limiter) Peek(ctx context.Context, key string) (Context, error) {
return limiter.Store.Peek(ctx, key, limiter.Rate)
}
// Reset sets the limit for given identifier to zero.
func (limiter *Limiter) Reset(ctx context.Context, key string) (Context, error) {
return limiter.Store.Reset(ctx, key, limiter.Rate)
}
// Increment increments the limit by given count & gives back the new limit for given identifier
func (limiter *Limiter) Increment(ctx context.Context, key string, count int64) (Context, error) {
return limiter.Store.Increment(ctx, key, count, limiter.Rate)
}

137
vendor/github.com/ulule/limiter/v3/network.go generated vendored Normal file
View file

@ -0,0 +1,137 @@
package limiter
import (
"net"
"net/http"
"strings"
)
var (
// DefaultIPv4Mask defines the default IPv4 mask used to obtain user IP.
DefaultIPv4Mask = net.CIDRMask(32, 32)
// DefaultIPv6Mask defines the default IPv6 mask used to obtain user IP.
DefaultIPv6Mask = net.CIDRMask(128, 128)
)
// GetIP returns IP address from request.
// If options is defined and either TrustForwardHeader is true or ClientIPHeader is defined,
// it will lookup IP in HTTP headers.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
func (limiter *Limiter) GetIP(r *http.Request) net.IP {
return GetIP(r, limiter.Options)
}
// GetIPWithMask returns IP address from request by applying a mask.
// If options is defined and either TrustForwardHeader is true or ClientIPHeader is defined,
// it will lookup IP in HTTP headers.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
func (limiter *Limiter) GetIPWithMask(r *http.Request) net.IP {
return GetIPWithMask(r, limiter.Options)
}
// GetIPKey extracts IP from request and returns hashed IP to use as store key.
// If options is defined and either TrustForwardHeader is true or ClientIPHeader is defined,
// it will lookup IP in HTTP headers.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
func (limiter *Limiter) GetIPKey(r *http.Request) string {
return limiter.GetIPWithMask(r).String()
}
// GetIP returns IP address from request.
// If options is defined and either TrustForwardHeader is true or ClientIPHeader is defined,
// it will lookup IP in HTTP headers.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
func GetIP(r *http.Request, options ...Options) net.IP {
if len(options) >= 1 {
if options[0].ClientIPHeader != "" {
ip := getIPFromHeader(r, options[0].ClientIPHeader)
if ip != nil {
return ip
}
}
if options[0].TrustForwardHeader {
ip := getIPFromXFFHeader(r)
if ip != nil {
return ip
}
ip = getIPFromHeader(r, "X-Real-IP")
if ip != nil {
return ip
}
}
}
remoteAddr := strings.TrimSpace(r.RemoteAddr)
host, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
return net.ParseIP(remoteAddr)
}
return net.ParseIP(host)
}
// GetIPWithMask returns IP address from request by applying a mask.
// If options is defined and either TrustForwardHeader is true or ClientIPHeader is defined,
// it will lookup IP in HTTP headers.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
func GetIPWithMask(r *http.Request, options ...Options) net.IP {
if len(options) == 0 {
return GetIP(r)
}
ip := GetIP(r, options[0])
if ip.To4() != nil {
return ip.Mask(options[0].IPv4Mask)
}
if ip.To16() != nil {
return ip.Mask(options[0].IPv6Mask)
}
return ip
}
func getIPFromXFFHeader(r *http.Request) net.IP {
headers := r.Header.Values("X-Forwarded-For")
if len(headers) == 0 {
return nil
}
parts := []string{}
for _, header := range headers {
parts = append(parts, strings.Split(header, ",")...)
}
for i := range parts {
part := strings.TrimSpace(parts[i])
ip := net.ParseIP(part)
if ip != nil {
return ip
}
}
return nil
}
func getIPFromHeader(r *http.Request, name string) net.IP {
header := strings.TrimSpace(r.Header.Get(name))
if header == "" {
return nil
}
ip := net.ParseIP(header)
if ip != nil {
return ip
}
return nil
}

61
vendor/github.com/ulule/limiter/v3/options.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
package limiter
import (
"net"
)
// Option is a functional option.
type Option func(*Options)
// Options are limiter options.
type Options struct {
// IPv4Mask defines the mask used to obtain a IPv4 address.
IPv4Mask net.IPMask
// IPv6Mask defines the mask used to obtain a IPv6 address.
IPv6Mask net.IPMask
// TrustForwardHeader enable parsing of X-Real-IP and X-Forwarded-For headers to obtain user IP.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
TrustForwardHeader bool
// ClientIPHeader defines a custom header (likely defined by your CDN or Cloud provider) to obtain user IP.
// If configured, this option will override "TrustForwardHeader" option.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
ClientIPHeader string
}
// WithIPv4Mask will configure the limiter to use given mask for IPv4 address.
func WithIPv4Mask(mask net.IPMask) Option {
return func(o *Options) {
o.IPv4Mask = mask
}
}
// WithIPv6Mask will configure the limiter to use given mask for IPv6 address.
func WithIPv6Mask(mask net.IPMask) Option {
return func(o *Options) {
o.IPv6Mask = mask
}
}
// WithTrustForwardHeader will configure the limiter to trust X-Real-IP and X-Forwarded-For headers.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
func WithTrustForwardHeader(enable bool) Option {
return func(o *Options) {
o.TrustForwardHeader = enable
}
}
// WithClientIPHeader will configure the limiter to use a custom header to obtain user IP.
// Please be advised that using this option could be insecure (ie: spoofed) if your reverse
// proxy is not configured properly to forward a trustworthy client IP.
// Please read the section "Limiter behind a reverse proxy" in the README for further information.
func WithClientIPHeader(header string) Option {
return func(o *Options) {
o.ClientIPHeader = header
}
}

53
vendor/github.com/ulule/limiter/v3/rate.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
package limiter
import (
"strconv"
"strings"
"time"
"github.com/pkg/errors"
)
// Rate is the rate.
type Rate struct {
Formatted string
Period time.Duration
Limit int64
}
// NewRateFromFormatted returns the rate from the formatted version.
func NewRateFromFormatted(formatted string) (Rate, error) {
rate := Rate{}
values := strings.Split(formatted, "-")
if len(values) != 2 {
return rate, errors.Errorf("incorrect format '%s'", formatted)
}
periods := map[string]time.Duration{
"S": time.Second, // Second
"M": time.Minute, // Minute
"H": time.Hour, // Hour
"D": time.Hour * 24, // Day
}
limit, period := values[0], strings.ToUpper(values[1])
p, ok := periods[period]
if !ok {
return rate, errors.Errorf("incorrect period '%s'", period)
}
l, err := strconv.ParseInt(limit, 10, 64)
if err != nil {
return rate, errors.Errorf("incorrect limit '%s'", limit)
}
rate = Rate{
Formatted: formatted,
Period: p,
Limit: l,
}
return rate, nil
}

34
vendor/github.com/ulule/limiter/v3/store.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
package limiter
import (
"context"
"time"
)
// Store is the common interface for limiter stores.
type Store interface {
// Get returns the limit for given identifier.
Get(ctx context.Context, key string, rate Rate) (Context, error)
// Peek returns the limit for given identifier, without modification on current values.
Peek(ctx context.Context, key string, rate Rate) (Context, error)
// Reset resets the limit to zero for given identifier.
Reset(ctx context.Context, key string, rate Rate) (Context, error)
// Increment increments the limit by given count & gives back the new limit for given identifier
Increment(ctx context.Context, key string, count int64, rate Rate) (Context, error)
}
// StoreOptions are options for store.
type StoreOptions struct {
// Prefix is the prefix to use for the key.
Prefix string
// MaxRetry is the maximum number of retry under race conditions on redis store.
// Deprecated: this option is no longer required since all operations are atomic now.
MaxRetry int
// CleanUpInterval is the interval for cleanup (run garbage collection) on stale entries on memory store.
// Setting this to a low value will optimize memory consumption, but will likely
// reduce performance and increase lock contention.
// Setting this to a high value will maximum throughput, but will increase the memory footprint.
CleanUpInterval time.Duration
}