Merging upstream version 1.9.4.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
7ac9951505
commit
db5ed8b1cc
131 changed files with 3811 additions and 826 deletions
|
@ -1,9 +1,8 @@
|
|||
[bumpversion]
|
||||
current_version = 1.9.1
|
||||
current_version = 1.9.4
|
||||
commit = True
|
||||
tag = True
|
||||
|
||||
[bumpversion:file:iredis/__init__.py]
|
||||
|
||||
[bumpversion:file:pyproject.toml]
|
||||
|
||||
|
|
10
.github/workflows/release.yaml
vendored
10
.github/workflows/release.yaml
vendored
|
@ -89,7 +89,7 @@ jobs:
|
|||
run: |
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
pip install -U pip
|
||||
pip install pip==21.1
|
||||
pip install poetry
|
||||
poetry install
|
||||
python -c "import sys; print(sys.version)"
|
||||
|
@ -107,18 +107,12 @@ jobs:
|
|||
iredis -h
|
||||
iredis help GET
|
||||
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-cargo-registry
|
||||
|
||||
- name: Executable Build
|
||||
run: |
|
||||
# pyoxidizer doesn't know the wheel path, and it doesn't support passing env vars
|
||||
export WHEEL_PATH=`ls ./dist/iredis*.whl`
|
||||
envsubst '$WHEEL_PATH' < pyoxidizer.template.bzl > pyoxidizer.bzl
|
||||
cargo install pyoxidizer --vers 0.6.0
|
||||
pip install pyoxidizer
|
||||
pyoxidizer build --release install
|
||||
cd ./build/x86*/release/install
|
||||
tar -zcf ../../../iredis.tar.gz lib/ iredis
|
||||
|
|
77
.github/workflows/test-binary-build.yaml
vendored
Normal file
77
.github/workflows/test-binary-build.yaml
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
name: Test binary build.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test-release-binary:
|
||||
name: Test Build Executable Binary. You can download from Artifact after building.
|
||||
runs-on: ubuntu-16.04
|
||||
|
||||
# FIXME
|
||||
# help test shouldn't depends on this to run
|
||||
services:
|
||||
redis:
|
||||
image: redis
|
||||
ports:
|
||||
- 6379:6379
|
||||
options: --entrypoint redis-server
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
architecture: 'x64'
|
||||
- name: Cache venv
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: venv
|
||||
# Look to see if there is a cache hit for the corresponding requirements file
|
||||
key: ubuntu-16.04-poetryenv-${{ hashFiles('poetry.lock') }}
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
pip install pip==21.1
|
||||
pip install poetry
|
||||
poetry install
|
||||
python -c "import sys; print(sys.version)"
|
||||
pip list
|
||||
- name: Poetry Build
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
poetry build
|
||||
- name: Test Build
|
||||
run: |
|
||||
python3 -m venv fresh_env
|
||||
. fresh_env/bin/activate
|
||||
pip install dist/*.whl
|
||||
|
||||
iredis -h
|
||||
iredis help GET
|
||||
|
||||
- name: Executable Build
|
||||
run: |
|
||||
# pyoxidizer doesn't know the wheel path, and it doesn't support passing env vars
|
||||
export WHEEL_PATH=`ls ./dist/iredis*.whl`
|
||||
envsubst '$WHEEL_PATH' < pyoxidizer.template.bzl > pyoxidizer.bzl
|
||||
pip install pyoxidizer
|
||||
pyoxidizer build --release install
|
||||
cd ./build/x86*/release/install
|
||||
tar -zcf ../../../iredis.tar.gz lib/ iredis
|
||||
cd -
|
||||
|
||||
- name: Test Executable
|
||||
run: |
|
||||
./build/x86*/release/install/iredis -h
|
||||
./build/x86*/release/install/iredis help GET
|
||||
|
||||
- name: Upload Release Asset to Github Artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: iredis-${{github.sha}}.tar.gz
|
||||
path: ./build/iredis.tar.gz
|
14
.github/workflows/test.yaml
vendored
14
.github/workflows/test.yaml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-16.04]
|
||||
python: ['3.6', '3.7', '3.8']
|
||||
python: ['3.6', '3.7', '3.8', '3.9']
|
||||
redis: [5, 6]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
|
@ -25,21 +25,21 @@ jobs:
|
|||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
architecture: 'x64'
|
||||
- name: Cache venv
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: venv
|
||||
# Look to see if there is a cache hit for the corresponding requirements file
|
||||
key: ${{ matrix.os }}-poetryenv-${{ hashFiles('poetry.lock') }}
|
||||
key: poetryenv-${{ matrix.os }}-${{ matrix.python }}-${{ hashFiles('poetry.lock') }}
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
pip install -U pip
|
||||
pip install -U pip==21.1 setuptools
|
||||
pip install poetry
|
||||
poetry install
|
||||
python -c "import sys; print(sys.version)"
|
||||
|
@ -49,7 +49,7 @@ jobs:
|
|||
REDIS_VERSION: ${{ matrix.redis }}
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
pytest
|
||||
pytest || cat cli_test.log
|
||||
lint:
|
||||
name: flake8 & black
|
||||
runs-on: ubuntu-16.04
|
||||
|
@ -61,7 +61,7 @@ jobs:
|
|||
python-version: 3.7
|
||||
architecture: 'x64'
|
||||
- name: Cache venv
|
||||
uses: actions/cache@v1
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: venv
|
||||
# Look to see if there is a cache hit for the corresponding requirements file
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -105,4 +105,5 @@ venv.bak/
|
|||
*.aof
|
||||
|
||||
# IDE
|
||||
.vscode
|
||||
.vscode
|
||||
.idea/
|
25
CHANGELOG.md
25
CHANGELOG.md
|
@ -1,3 +1,26 @@
|
|||
## 1.10
|
||||
|
||||
- Feature: more human readable output for `HELP` command like `ACL HELP` and
|
||||
`MEMORY HELP`.
|
||||
- Feature: you can use <kbd>Ctrl</kbd> + <kbd>C</kbd> to cancel a blocking
|
||||
command like `BLPOP`.
|
||||
|
||||
### 1.9.4
|
||||
|
||||
- Bugfix: respect newbie_mode set in config, if cli flag is missing. thanks to [sid-maddy]
|
||||
|
||||
### 1.9.3
|
||||
|
||||
- Bugfix: When IRedis start with `--decode=utf-8`, command with shell pipe will
|
||||
fail. ( [#383](https://github.com/laixintao/iredis/issues/383)). Thanks to
|
||||
[hanaasagi].
|
||||
|
||||
### 1.9.2
|
||||
|
||||
- Bugfix: before `cluster` commands' `node-id` only accept numbers, not it's
|
||||
fixed. `node-id` can be `\w+`.
|
||||
- Feature: support set client name for iredis connections via `--client-name`.
|
||||
|
||||
### 1.9.1
|
||||
|
||||
- Feature: support auto-reissue command to another Redis server, when got a
|
||||
|
@ -216,3 +239,5 @@
|
|||
[lyqscmy]: https://github.com/lyqscmy
|
||||
[brianmaissy]: https://github.com/brianmaissy
|
||||
[otms61]: https://github.com/otms61
|
||||
[hanaasagi]: https://github.com/Hanaasagi
|
||||
[sid-maddy]: https://github.com/sid-maddy
|
||||
|
|
10
README.md
10
README.md
|
@ -2,12 +2,12 @@
|
|||
<img width="100" height="100" src="https://raw.githubusercontent.com/laixintao/iredis/master/docs/assets/logo.png" />
|
||||
</p>
|
||||
|
||||
<h3 align="center">Interactive Redis: A Cli for Redis with AutoCompletion and Syntax Highlighting.</h4>
|
||||
<h3 align="center">Interactive Redis: A Cli for Redis with AutoCompletion and Syntax Highlighting.</h3>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/laixintao/iredis/actions"><img src="https://github.com/laixintao/iredis/workflows/Test/badge.svg" alt="Github Action"></a>
|
||||
<a href="https://badge.fury.io/py/iredis"><img src="https://badge.fury.io/py/iredis.svg" alt="PyPI version"></a>
|
||||
<img src="https://badgen.net/badge/python/3.6%20|%203.7%20|%203.8/" alt="Python version">
|
||||
<img src="https://badgen.net/badge/python/3.6%20%7C%203.7%20%7C%203.8%20%7C%203.9/" alt="Python version">
|
||||
<a href="https://pepy.tech/project/iredis"><img src="https://pepy.tech/badge/iredis" alt="Download stats"></a>
|
||||
<a href="https://t.me/iredis_users"><img src="https://badgen.net/badge/icon/join?icon=telegram&label=usergroup" alt="Chat on telegram"></a>
|
||||
<a href="https://console.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/laixintao/iredis&cloudshell_print=docs/cloudshell/run-in-docker.txt"><img src="https://badgen.net/badge/run/GoogleCloudShell/blue?icon=terminal" alt="Open in Cloud Shell"></a>
|
||||
|
@ -145,7 +145,7 @@ like <kbd>Ctrl</kbd> + <kbd>F</kbd> to forward work.
|
|||
|
||||
Also:
|
||||
|
||||
- <kbd>Ctrl</kbd> + <kbd>F</kbd> (i.e. EOF) to exit; you can also use the `exit`
|
||||
- <kbd>Ctrl</kbd> + <kbd>D</kbd> (i.e. EOF) to exit; you can also use the `exit`
|
||||
command.
|
||||
- <kbd>Ctrl</kbd> + <kbd>L</kbd> to clear screen; you can also use the `clear`
|
||||
command.
|
||||
|
@ -156,8 +156,8 @@ Also:
|
|||
|
||||
### Release Strategy
|
||||
|
||||
IRedis is built and released by CircleCI. Whenever a tag is pushed to the
|
||||
`master` branch, a new release is built and uploaded to pypi.org, it's very
|
||||
IRedis is built and released by `GitHub Actions`. Whenever a tag is pushed to
|
||||
the `master` branch, a new release is built and uploaded to pypi.org, it's very
|
||||
convenient.
|
||||
|
||||
Thus, we release as often as possible, so that users can always enjoy the new
|
||||
|
|
|
@ -1 +1 @@
|
|||
__version__ = "1.9.1"
|
||||
__version__ = "1.9.4"
|
||||
|
|
|
@ -24,12 +24,12 @@ class BottomToolbar:
|
|||
|
||||
def render(self):
|
||||
text = BUTTOM_TEXT
|
||||
# add command help if valide
|
||||
# add command help if valid
|
||||
if self.command_holder.command:
|
||||
try:
|
||||
command_info = commands_summary[self.command_holder.command]
|
||||
text = command_syntax(self.command_holder.command, command_info)
|
||||
except KeyError as e:
|
||||
logger.exception(e)
|
||||
pass
|
||||
|
||||
return text
|
||||
|
|
|
@ -62,6 +62,7 @@ class Client:
|
|||
path=None,
|
||||
scheme="redis",
|
||||
username=None,
|
||||
client_name=None,
|
||||
):
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
@ -69,17 +70,11 @@ class Client:
|
|||
self.path = path
|
||||
# FIXME username is not using...
|
||||
self.username = username
|
||||
self.client_name = client_name
|
||||
self.scheme = scheme
|
||||
self.password = password
|
||||
|
||||
self.connection = self.create_connection(
|
||||
host,
|
||||
port,
|
||||
db,
|
||||
password,
|
||||
path,
|
||||
scheme,
|
||||
username,
|
||||
)
|
||||
self.build_connection()
|
||||
|
||||
# all command upper case
|
||||
self.answer_callbacks = command2callback
|
||||
|
@ -101,6 +96,21 @@ class Client:
|
|||
if config.version and re.match(r"([\d\.]+)", config.version):
|
||||
self.auth_compat(config.version)
|
||||
|
||||
def build_connection(self):
|
||||
"""
|
||||
create a new connection and replace ``self.connection``
|
||||
"""
|
||||
self.connection = self.create_connection(
|
||||
self.host,
|
||||
self.port,
|
||||
self.db,
|
||||
self.password,
|
||||
self.path,
|
||||
self.scheme,
|
||||
self.username,
|
||||
client_name=self.client_name,
|
||||
)
|
||||
|
||||
def create_connection(
|
||||
self,
|
||||
host=None,
|
||||
|
@ -110,6 +120,7 @@ class Client:
|
|||
path=None,
|
||||
scheme="redis",
|
||||
username=None,
|
||||
client_name=None,
|
||||
):
|
||||
if scheme in ("redis", "rediss"):
|
||||
connection_kwargs = {
|
||||
|
@ -118,13 +129,19 @@ class Client:
|
|||
"db": db,
|
||||
"password": password,
|
||||
"socket_keepalive": config.socket_keepalive,
|
||||
"client_name": client_name,
|
||||
}
|
||||
if scheme == "rediss":
|
||||
connection_class = SSLConnection
|
||||
else:
|
||||
connection_class = Connection
|
||||
else:
|
||||
connection_kwargs = {"db": db, "password": password, "path": path}
|
||||
connection_kwargs = {
|
||||
"db": db,
|
||||
"password": password,
|
||||
"path": path,
|
||||
"client_name": client_name,
|
||||
}
|
||||
connection_class = UnixDomainSocketConnection
|
||||
|
||||
if config.decode:
|
||||
|
@ -242,6 +259,15 @@ class Client:
|
|||
except redis.exceptions.ExecAbortError:
|
||||
config.transaction = False
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("received KeyboardInterrupt... rebuild connection...")
|
||||
connection.disconnect()
|
||||
connection.connect()
|
||||
print(
|
||||
"KeyboardInterrupt received! User canceled reading response!",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return None
|
||||
else:
|
||||
return response
|
||||
raise last_error
|
||||
|
@ -338,7 +364,7 @@ class Client:
|
|||
grammar = completer.get_completer(input_text=rawinput).compiled_grammar
|
||||
matched = grammar.match(rawinput)
|
||||
if not matched:
|
||||
# invalide command!
|
||||
# invalid command!
|
||||
return rawinput, None
|
||||
variables = matched.variables()
|
||||
shell_command = variables.get("shellcommand")
|
||||
|
@ -397,12 +423,7 @@ class Client:
|
|||
# subcommand's stdout/stderr
|
||||
if shell_command and config.shell:
|
||||
# pass the raw response of redis to shell command
|
||||
if isinstance(redis_resp, list):
|
||||
# FIXME not handling nested list, use renders.render_raw
|
||||
# instead
|
||||
stdin = b"\n".join(redis_resp)
|
||||
else:
|
||||
stdin = redis_resp
|
||||
stdin = OutputRender.render_raw(redis_resp)
|
||||
run(shell_command, input=stdin, shell=True)
|
||||
return
|
||||
|
||||
|
@ -486,7 +507,7 @@ class Client:
|
|||
redis_grammar = completer.get_completer(command).compiled_grammar
|
||||
m = redis_grammar.match(command)
|
||||
if not m:
|
||||
# invalide command!
|
||||
# invalid command!
|
||||
return
|
||||
variables = m.variables()
|
||||
# zset withscores
|
||||
|
@ -501,7 +522,7 @@ class Client:
|
|||
doc = read_text(commands_data, f"{command_docs_name}.md")
|
||||
except FileNotFoundError:
|
||||
raise NotRedisCommand(
|
||||
f"{command_summary_name} is not a valide Redis command."
|
||||
f"{command_summary_name} is not a valid Redis command."
|
||||
)
|
||||
rendered_detail = markdown.render(doc)
|
||||
summary_dict = commands_summary[command_summary_name]
|
||||
|
|
|
@ -94,7 +94,6 @@ commands_summary.update(
|
|||
"PEEK": {
|
||||
"summary": "Get the key's type and value.",
|
||||
"arguments": [{"name": "key", "type": "key"}],
|
||||
"since": "1.0",
|
||||
"complexity": "O(1).",
|
||||
"since": "1.0",
|
||||
"group": "iredis",
|
||||
|
@ -135,7 +134,7 @@ def split_command_args(command):
|
|||
input_args = command[matcher.end() :]
|
||||
break
|
||||
else:
|
||||
raise InvalidArguments(f"`{command}` is not a valide Redis Command")
|
||||
raise InvalidArguments(f"`{command}` is not a valid Redis Command")
|
||||
|
||||
args = list(strip_quote_args(input_args))
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ class IRedisCompleter(Completer):
|
|||
grammar = completer.compiled_grammar
|
||||
m = grammar.match(command)
|
||||
if not m:
|
||||
# invalide command!
|
||||
# invalid command!
|
||||
return
|
||||
variables = m.variables()
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ server,ACL CAT,command_categorynamex,render_list
|
|||
server,ACL DELUSER,command_usernames,render_int
|
||||
server,ACL GENPASS,command_countx,render_bulk_string
|
||||
server,ACL GETUSER,command_username,render_list
|
||||
server,ACL HELP,command,render_list
|
||||
server,ACL HELP,command,render_help
|
||||
server,ACL LIST,command,render_list
|
||||
server,ACL LOAD,command,render_simple_string
|
||||
server,ACL LOG,command_count_or_resetx,render_list_or_string
|
||||
|
@ -152,12 +152,12 @@ server,LOLWUT,command_version,render_bytes
|
|||
server,LASTSAVE,command,render_unixtime
|
||||
server,LATENCY DOCTOR,command,render_bulk_string_decode
|
||||
server,LATENCY GRAPH,command_graphevent,render_bulk_string_decode
|
||||
server,LATENCY HELP,command,render_list
|
||||
server,LATENCY HELP,command,render_help
|
||||
server,LATENCY HISTORY,command_graphevent,render_list
|
||||
server,LATENCY LATEST,command,render_list
|
||||
server,LATENCY RESET,command_graphevents,render_int
|
||||
server,MEMORY DOCTOR,command,render_bulk_string_decode
|
||||
server,MEMORY HELP,command,render_list
|
||||
server,MEMORY HELP,command,render_help
|
||||
server,MEMORY MALLOC-STATS,command,render_bulk_string_decode
|
||||
server,MEMORY PURGE,command,render_simple_string
|
||||
server,MEMORY STATS,command,render_nested_pair
|
||||
|
|
|
File diff suppressed because it is too large
Load diff
|
@ -8,6 +8,10 @@ rules used to configure the user, it is still functionally identical.
|
|||
|
||||
@array-reply: a list of ACL rule definitions for the user.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added Pub/Sub channel patterns.
|
||||
|
||||
@examples
|
||||
|
||||
Here's the default configuration for the default user:
|
||||
|
@ -25,4 +29,6 @@ Here's the default configuration for the default user:
|
|||
6) "+@all"
|
||||
7) "keys"
|
||||
8) 1) "*"
|
||||
9) "channels"
|
||||
10) 1) "*"
|
||||
```
|
||||
|
|
|
@ -12,6 +12,6 @@ An array of strings.
|
|||
|
||||
```
|
||||
> ACL LIST
|
||||
1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* +@all -@admin -@dangerous"
|
||||
2) "user default on nopass ~* +@all"
|
||||
1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* &* +@all -@admin -@dangerous"
|
||||
2) "user default on nopass ~* &* +@all"
|
||||
```
|
||||
|
|
|
@ -55,10 +55,17 @@ This is a list of all the supported Redis ACL rules:
|
|||
deleted user to be disconnected.
|
||||
- `~<pattern>`: add the specified key pattern (glob style pattern, like in the
|
||||
`KEYS` command), to the list of key patterns accessible by the user. You can
|
||||
add as many key patterns you want to the same user. Example: `~objects:*`
|
||||
add multiple key patterns to the same user. Example: `~objects:*`
|
||||
- `allkeys`: alias for `~*`, it allows the user to access all the keys.
|
||||
- `resetkey`: removes all the key patterns from the list of key patterns the
|
||||
- `resetkeys`: removes all the key patterns from the list of key patterns the
|
||||
user can access.
|
||||
- `&<pattern>`: add the specified glob style pattern to the list of Pub/Sub
|
||||
channel patterns accessible by the user. You can add multiple channel patterns
|
||||
to the same user. Example: `&chatroom:*`
|
||||
- `allchannels`: alias for `&*`, it allows the user to access all Pub/Sub
|
||||
channels.
|
||||
- `resetchannels`: removes all channel patterns from the list of Pub/Sub channel
|
||||
patterns the user can access.
|
||||
- `+<command>`: add this command to the list of the commands the user can call.
|
||||
Example: `+zadd`.
|
||||
- `+@<category>`: add all the commands in the specified category to the list of
|
||||
|
@ -87,7 +94,7 @@ This is a list of all the supported Redis ACL rules:
|
|||
- `>password`: Add the specified clear text password as an hashed password in
|
||||
the list of the users passwords. Every user can have many active passwords, so
|
||||
that password rotation will be simpler. The specified password is not stored
|
||||
in cleartext inside the server. Example: `>mypassword`.
|
||||
as clear text inside the server. Example: `>mypassword`.
|
||||
- `#<hashedpassword>`: Add the specified hashed password to the list of user
|
||||
passwords. A Redis hashed password is hashed with SHA256 and translated into a
|
||||
hexadecimal string. Example:
|
||||
|
@ -104,6 +111,10 @@ This is a list of all the supported Redis ACL rules:
|
|||
|
||||
If the rules contain errors, the error is returned.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added Pub/Sub channel patterns.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
|
|
|
@ -29,6 +29,10 @@ defined in the ACL list (see `ACL SETUSER`) and the official
|
|||
When ACLs are used, the single argument form of the command, where only the
|
||||
password is specified, assumes that the implicit username is "default".
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.0.0`: Added ACL style (username and password).
|
||||
|
||||
## Security notice
|
||||
|
||||
Because of the high performance nature of Redis, it is possible to try a lot of
|
||||
|
|
|
@ -20,11 +20,11 @@ offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0:
|
|||
|
||||
Note that:
|
||||
|
||||
1. Addressing with `GET` bits outside the current string length (including the
|
||||
1. Addressing with `!GET` bits outside the current string length (including the
|
||||
case the key does not exist at all), results in the operation to be performed
|
||||
like the missing part all consists of bits set to 0.
|
||||
2. Addressing with `SET` or `INCRBY` bits outside the current string length will
|
||||
enlarge the string, zero-padding it, as needed, for the minimal length
|
||||
2. Addressing with `!SET` or `!INCRBY` bits outside the current string length
|
||||
will enlarge the string, zero-padding it, as needed, for the minimal length
|
||||
needed, according to the most far bit touched.
|
||||
|
||||
## Supported subcommands and integer types
|
||||
|
@ -39,7 +39,7 @@ The following is the list of supported commands.
|
|||
value.
|
||||
|
||||
There is another subcommand that only changes the behavior of successive
|
||||
`INCRBY` subcommand calls by setting the overflow behavior:
|
||||
`!INCRBY` and `!SET` subcommands calls by setting the overflow behavior:
|
||||
|
||||
- **OVERFLOW** `[WRAP|SAT|FAIL]`
|
||||
|
||||
|
@ -91,8 +91,9 @@ following behaviors:
|
|||
detected. The corresponding return value is set to NULL to signal the
|
||||
condition to the caller.
|
||||
|
||||
Note that each `OVERFLOW` statement only affects the `INCRBY` commands that
|
||||
follow it in the list of subcommands, up to the next `OVERFLOW` statement.
|
||||
Note that each `OVERFLOW` statement only affects the `!INCRBY` and `!SET`
|
||||
commands that follow it in the list of subcommands, up to the next `OVERFLOW`
|
||||
statement.
|
||||
|
||||
By default, **WRAP** is used if not otherwise specified.
|
||||
|
||||
|
|
23
iredis/data/commands/blmove.md
Normal file
23
iredis/data/commands/blmove.md
Normal file
|
@ -0,0 +1,23 @@
|
|||
`BLMOVE` is the blocking variant of `LMOVE`. When `source` contains elements,
|
||||
this command behaves exactly like `LMOVE`. When used inside a `MULTI`/`EXEC`
|
||||
block, this command behaves exactly like `LMOVE`. When `source` is empty, Redis
|
||||
will block the connection until another client pushes to it or until `timeout`
|
||||
is reached. A `timeout` of zero can be used to block indefinitely.
|
||||
|
||||
This command comes in place of the now deprecated `BRPOPLPUSH`. Doing
|
||||
`BLMOVE RIGHT LEFT` is equivalent.
|
||||
|
||||
See `LMOVE` for more information.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: the element being popped from `source` and pushed to
|
||||
`destination`. If `timeout` is reached, a @nil-reply is returned.
|
||||
|
||||
## Pattern: Reliable queue
|
||||
|
||||
Please see the pattern description in the `LMOVE` documentation.
|
||||
|
||||
## Pattern: Circular list
|
||||
|
||||
Please see the pattern description in the `LMOVE` documentation.
|
|
@ -34,7 +34,7 @@ client will unblock returning a `nil` multi-bulk value when the specified
|
|||
timeout has expired without a push operation against at least one of the
|
||||
specified keys.
|
||||
|
||||
**The timeout argument is interpreted as an integer value specifying the maximum
|
||||
**The timeout argument is interpreted as a double value specifying the maximum
|
||||
number of seconds to block**. A timeout of zero can be used to block
|
||||
indefinitely.
|
||||
|
||||
|
@ -129,6 +129,10 @@ If you like science fiction, think of time flowing at infinite speed inside a
|
|||
where an element was popped and the second element being the value of the
|
||||
popped element.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
|
|
|
@ -18,6 +18,10 @@ the tail of a list instead of popping from the head.
|
|||
where an element was popped and the second element being the value of the
|
||||
popped element.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
|
|
|
@ -5,6 +5,9 @@ elements, this command behaves exactly like `RPOPLPUSH`. When used inside a
|
|||
to it or until `timeout` is reached. A `timeout` of zero can be used to block
|
||||
indefinitely.
|
||||
|
||||
As per Redis 6.2.0, BRPOPLPUSH is considered deprecated. Please prefer `BLMOVE`
|
||||
in new code.
|
||||
|
||||
See `RPOPLPUSH` for more information.
|
||||
|
||||
@return
|
||||
|
@ -12,6 +15,10 @@ See `RPOPLPUSH` for more information.
|
|||
@bulk-string-reply: the element being popped from `source` and pushed to
|
||||
`destination`. If `timeout` is reached, a @nil-reply is returned.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
|
||||
|
||||
## Pattern: Reliable queue
|
||||
|
||||
Please see the pattern description in the `RPOPLPUSH` documentation.
|
||||
|
|
|
@ -5,7 +5,7 @@ members to pop from any of the given sorted sets. A member with the highest
|
|||
score is popped from first sorted set that is non-empty, with the given keys
|
||||
being checked in the order that they are given.
|
||||
|
||||
The `timeout` argument is interpreted as an integer value specifying the maximum
|
||||
The `timeout` argument is interpreted as a double value specifying the maximum
|
||||
number of seconds to block. A timeout of zero can be used to block indefinitely.
|
||||
|
||||
See the [BZPOPMIN documentation][cb] for the exact semantics, since `BZPOPMAX`
|
||||
|
@ -23,6 +23,10 @@ with the highest scores instead of popping the ones with the lowest scores.
|
|||
where a member was popped, the second element is the popped member itself, and
|
||||
the third element is the score of the popped element.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
|
|
|
@ -5,7 +5,7 @@ members to pop from any of the given sorted sets. A member with the lowest score
|
|||
is popped from first sorted set that is non-empty, with the given keys being
|
||||
checked in the order that they are given.
|
||||
|
||||
The `timeout` argument is interpreted as an integer value specifying the maximum
|
||||
The `timeout` argument is interpreted as an double value specifying the maximum
|
||||
number of seconds to block. A timeout of zero can be used to block indefinitely.
|
||||
|
||||
See the [BLPOP documentation][cl] for the exact semantics, since `BZPOPMIN` is
|
||||
|
@ -23,6 +23,10 @@ popped from.
|
|||
where a member was popped, the second element is the popped member itself, and
|
||||
the third element is the score of the popped element.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
This command controls the tracking of the keys in the next command executed by
|
||||
the connection, when tracking is enabled in `OPTIN` or `OPTOUT` mode. Please
|
||||
check the [client side caching documentation](/topics/client-side-caching) for
|
||||
background informations.
|
||||
background information.
|
||||
|
||||
When tracking is enabled Redis, using the `CLIENT TRACKING` command, it is
|
||||
possible to specify the `OPTIN` or `OPTOUT` options, so that keys in read only
|
||||
|
|
16
iredis/data/commands/client-info.md
Normal file
16
iredis/data/commands/client-info.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
The command returns information and statistics about the current client
|
||||
connection in a mostly human readable format.
|
||||
|
||||
The reply format is identical to that of `CLIENT LIST`, and the content consists
|
||||
only of information about the current client.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
CLIENT INFO
|
||||
```
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: a unique string, as described at the `CLIENT LIST` page, for
|
||||
the current client.
|
|
@ -1,14 +1,12 @@
|
|||
The `CLIENT KILL` command closes a given client connection. Up to Redis 2.8.11
|
||||
it was possible to close a connection only by client address, using the
|
||||
following form:
|
||||
The `CLIENT KILL` command closes a given client connection. This command support
|
||||
two formats, the old format:
|
||||
|
||||
CLIENT KILL addr:port
|
||||
|
||||
The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr`
|
||||
field).
|
||||
|
||||
However starting with Redis 2.8.12 or greater, the command accepts the following
|
||||
form:
|
||||
The new format:
|
||||
|
||||
CLIENT KILL <filter> <value> ... ... <filter> <value>
|
||||
|
||||
|
@ -17,13 +15,14 @@ of killing just by address. The following filters are available:
|
|||
|
||||
- `CLIENT KILL ADDR ip:port`. This is exactly the same as the old
|
||||
three-arguments behavior.
|
||||
- `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field,
|
||||
which was introduced in the `CLIENT LIST` command starting from Redis 2.8.12.
|
||||
- `CLIENT KILL TYPE type`, where _type_ is one of `normal`, `master`, `slave`
|
||||
and `pubsub` (the `master` type is available from v3.2). This closes the
|
||||
connections of **all the clients** in the specified class. Note that clients
|
||||
blocked into the `MONITOR` command are considered to belong to the `normal`
|
||||
class.
|
||||
- `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local
|
||||
(bind) address.
|
||||
- `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field.
|
||||
Client `ID`'s are retrieved using the `CLIENT LIST` command.
|
||||
- `CLIENT KILL TYPE type`, where _type_ is one of `normal`, `master`, `replica`
|
||||
and `pubsub`. This closes the connections of **all the clients** in the
|
||||
specified class. Note that clients blocked into the `MONITOR` command are
|
||||
considered to belong to the `normal` class.
|
||||
- `CLIENT KILL USER username`. Closes all the connections that are authenticated
|
||||
with the specified [ACL](/topics/acl) username, however it returns an error if
|
||||
the username does not map to an existing ACL user.
|
||||
|
@ -32,10 +31,6 @@ of killing just by address. The following filters are available:
|
|||
option to `no` will have the effect of also killing the client calling the
|
||||
command.
|
||||
|
||||
**Note: starting with Redis 5 the project is no longer using the slave word. You
|
||||
can use `TYPE replica` instead, however the old form is still supported for
|
||||
backward compatibility.**
|
||||
|
||||
It is possible to provide multiple filters at the same time. The command will
|
||||
handle multiple filters via logical AND. For example:
|
||||
|
||||
|
@ -71,3 +66,12 @@ When called with the three arguments format:
|
|||
When called with the filter / value format:
|
||||
|
||||
@integer-reply: the number of clients killed.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 2.8.12`: Added new filter format.
|
||||
- `>= 2.8.12`: `ID` option.
|
||||
- `>= 3.2`: Added `master` type in for `TYPE` option.
|
||||
- `>= 5`: Replaced `slave` `TYPE` with `replica`. `slave` still supported for
|
||||
backward compatibility.
|
||||
- `>= 6.2`: `LADDR` option.
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
The `CLIENT LIST` command returns information and statistics about the client
|
||||
connections server in a mostly human readable format.
|
||||
|
||||
As of v5.0, the optional `TYPE type` subcommand can be used to filter the list
|
||||
by clients' type, where _type_ is one of `normal`, `master`, `replica` and
|
||||
`pubsub`. Note that clients blocked into the `MONITOR` command are considered to
|
||||
belong to the `normal` class.
|
||||
You can use one of the optional subcommands to filter the list. The `TYPE type`
|
||||
subcommand filters the list by clients' type, where _type_ is one of `normal`,
|
||||
`master`, `replica`, and `pubsub`. Note that clients blocked by the `MONITOR`
|
||||
command belong to the `normal` class.
|
||||
|
||||
The `ID` filter only returns entries for clients with IDs matching the
|
||||
`client-id` arguments.
|
||||
|
||||
@return
|
||||
|
||||
|
@ -16,9 +19,10 @@ belong to the `normal` class.
|
|||
|
||||
Here is the meaning of the fields:
|
||||
|
||||
- `id`: an unique 64-bit client ID (introduced in Redis 2.8.12).
|
||||
- `id`: an unique 64-bit client ID.
|
||||
- `name`: the name set by the client with `CLIENT SETNAME`
|
||||
- `addr`: address/port of the client
|
||||
- `laddr`: address/port of local address client connected to (bind address)
|
||||
- `fd`: file descriptor corresponding to the socket
|
||||
- `age`: total duration of the connection in seconds
|
||||
- `idle`: idle time of the connection in seconds
|
||||
|
@ -35,6 +39,11 @@ Here is the meaning of the fields:
|
|||
- `omem`: output buffer memory usage
|
||||
- `events`: file descriptor events (see below)
|
||||
- `cmd`: last command played
|
||||
- `argv-mem`: incomplete arguments for the next command (already extracted from
|
||||
query buffer)
|
||||
- `tot-mem`: total memory consumed by this client in its various buffers
|
||||
- `redir`: client id of current client tracking redirection
|
||||
- `user`: the authenticated username of the client
|
||||
|
||||
The client flags can be a combination of:
|
||||
|
||||
|
@ -53,6 +62,9 @@ S: the client is a replica node connection to this instance
|
|||
u: the client is unblocked
|
||||
U: the client is connected via a Unix domain socket
|
||||
x: the client is in a MULTI/EXEC context
|
||||
t: the client enabled keys tracking in order to perform client side caching
|
||||
R: the client tracking target client is invalid
|
||||
B: the client enabled broadcast tracking mode
|
||||
```
|
||||
|
||||
The file descriptor events can be:
|
||||
|
@ -68,3 +80,9 @@ New fields are regularly added for debugging purpose. Some could be removed in
|
|||
the future. A version safe Redis client using this command should parse the
|
||||
output accordingly (i.e. handling gracefully missing fields, skipping unknown
|
||||
fields).
|
||||
|
||||
@history
|
||||
|
||||
- `>= 2.8.12`: Added unique client `id` field.
|
||||
- `>= 5.0`: Added optional `TYPE` filter.
|
||||
- `>= 6.2`: Added `laddr` field and the optional `ID` filter.
|
||||
|
|
|
@ -3,14 +3,28 @@ clients for the specified amount of time (in milliseconds).
|
|||
|
||||
The command performs the following actions:
|
||||
|
||||
- It stops processing all the pending commands from normal and pub/sub clients.
|
||||
However interactions with replicas will continue normally.
|
||||
- It stops processing all the pending commands from normal and pub/sub clients
|
||||
for the given mode. However interactions with replicas will continue normally.
|
||||
Note that clients are formally paused when they try to execute a command, so
|
||||
no work is taken on the server side for inactive clients.
|
||||
- However it returns OK to the caller ASAP, so the `CLIENT PAUSE` command
|
||||
execution is not paused by itself.
|
||||
- When the specified amount of time has elapsed, all the clients are unblocked:
|
||||
this will trigger the processing of all the commands accumulated in the query
|
||||
buffer of every client during the pause.
|
||||
|
||||
Client pause currently supports two modes:
|
||||
|
||||
- `ALL`: This is the default mode. All client commands are blocked.
|
||||
- `WRITE`: Clients are only blocked if they attempt to execute a write command.
|
||||
|
||||
For the `WRITE` mode, some commands have special behavior:
|
||||
|
||||
- `EVAL`/`EVALSHA`: Will block client for all scripts.
|
||||
- `PUBLISH`: Will block client.
|
||||
- `PFCOUNT`: Will block client.
|
||||
- `WAIT`: Acknowledgements will be delayed, so this command will appear blocked.
|
||||
|
||||
This command is useful as it makes able to switch clients from a Redis instance
|
||||
to another one in a controlled way. For example during an instance upgrade the
|
||||
system administrator could do the following:
|
||||
|
@ -21,11 +35,16 @@ system administrator could do the following:
|
|||
- Turn one of the replicas into a master.
|
||||
- Reconfigure clients to connect with the new master.
|
||||
|
||||
It is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the
|
||||
`INFO replication` command in order to get the current master offset at the time
|
||||
the clients are blocked. This way it is possible to wait for a specific offset
|
||||
in the replica side in order to make sure all the replication stream was
|
||||
processed.
|
||||
Since Redis 6.2, the recommended mode for client pause is `WRITE`. This mode
|
||||
will stop all replication traffic, can be aborted with the `CLIENT UNPAUSE`
|
||||
command, and allows reconfiguring the old master without risking accepting
|
||||
writes after the failover. This is also the mode used during cluster failover.
|
||||
|
||||
For versions before 6.2, it is possible to send `CLIENT PAUSE` in a MULTI/EXEC
|
||||
block together with the `INFO replication` command in order to get the current
|
||||
master offset at the time the clients are blocked. This way it is possible to
|
||||
wait for a specific offset in the replica side in order to make sure all the
|
||||
replication stream was processed.
|
||||
|
||||
Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or
|
||||
expired during the time clients are paused. This way the dataset is guaranteed
|
||||
|
@ -36,3 +55,8 @@ but also from the point of view of internal operations.
|
|||
|
||||
@simple-string-reply: The command returns OK or an error if the timeout is
|
||||
invalid.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 3.2.10`: Client pause prevents client pause and key eviction as well.
|
||||
- `>= 6.2`: CLIENT PAUSE WRITE mode added along with the `mode` option.
|
||||
|
|
|
@ -37,7 +37,9 @@ when enabling tracking:
|
|||
notifications will be provided only for keys starting with this string. This
|
||||
option can be given multiple times to register multiple prefixes. If
|
||||
broadcasting is enabled without this option, Redis will send notifications for
|
||||
every key.
|
||||
every key. You can't delete a single prefix, but you can delete all prefixes
|
||||
by disabling and re-enabling tracking. Using this option adds the additional
|
||||
time complexity of O(N^2), where N is the total number of prefixes tracked.
|
||||
- `OPTIN`: when broadcasting is NOT active, normally don't track keys in read
|
||||
only commands, unless they are called immediately after a `CLIENT CACHING yes`
|
||||
command.
|
||||
|
|
25
iredis/data/commands/client-trackinginfo.md
Normal file
25
iredis/data/commands/client-trackinginfo.md
Normal file
|
@ -0,0 +1,25 @@
|
|||
The command returns information about the current client connection's use of the
|
||||
[server assisted client side caching](/topics/client-side-caching) feature.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply: a list of tracking information sections and their respective
|
||||
values, specifically:
|
||||
|
||||
- **flags**: A list of tracking flags used by the connection. The flags and
|
||||
their meanings are as follows:
|
||||
- `off`: The connection isn't using server assisted client side caching.
|
||||
- `on`: Server assisted client side caching is enabled for the connection.
|
||||
- `bcast`: The client uses broadcasting mode.
|
||||
- `optin`: The client does not cache keys by default.
|
||||
- `optout`: The client caches keys by default.
|
||||
- `caching-yes`: The next command will cache keys (exists only together with
|
||||
`optin`).
|
||||
- `caching-no`: The next command won't cache keys (exists only together with
|
||||
`optout`).
|
||||
- `noloop`: The client isn't notified about keys modified by itself.
|
||||
- `broken_redirect`: The client ID used for redirection isn't valid anymore.
|
||||
- **redirect**: The client ID used for notifications redirection, or -1 when
|
||||
none.
|
||||
- **prefixes**: A list of key prefixes for which notifications are sent to the
|
||||
client.
|
6
iredis/data/commands/client-unpause.md
Normal file
6
iredis/data/commands/client-unpause.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
`CLIENT UNPAUSE` is used to resume command processing for all clients that were
|
||||
paused by `CLIENT PAUSE`.
|
||||
|
||||
@return
|
||||
|
||||
@simple-string-reply: The command returns `OK`
|
|
@ -46,7 +46,7 @@ bound with another node, or if the configuration epoch of the node advertising
|
|||
the new hash slot, is greater than the node currently listed in the table.
|
||||
|
||||
This means that this command should be used with care only by applications
|
||||
orchestrating Redis Cluster, like `redis-trib`, and the command if used out of
|
||||
orchestrating Redis Cluster, like `redis-cli`, and the command if used out of
|
||||
the right context can leave the cluster in a wrong state or cause data loss.
|
||||
|
||||
@return
|
||||
|
|
|
@ -38,7 +38,7 @@ node receiving the command:
|
|||
|
||||
This command only works in cluster mode and may be useful for debugging and in
|
||||
order to manually orchestrate a cluster configuration when a new cluster is
|
||||
created. It is currently not used by `redis-trib`, and mainly exists for API
|
||||
created. It is currently not used by `redis-cli`, and mainly exists for API
|
||||
completeness.
|
||||
|
||||
@return
|
||||
|
|
|
@ -3,6 +3,6 @@ Deletes all slots from a node.
|
|||
The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected
|
||||
node. It can only be called when the database is empty.
|
||||
|
||||
@reply
|
||||
@return
|
||||
|
||||
@simple-string-reply: `OK`
|
||||
|
|
|
@ -11,8 +11,8 @@ additional info appended at the end).
|
|||
Note that normally clients willing to fetch the map between Cluster hash slots
|
||||
and node addresses should use `CLUSTER SLOTS` instead. `CLUSTER NODES`, that
|
||||
provides more information, should be used for administrative tasks, debugging,
|
||||
and configuration inspections. It is also used by `redis-trib` in order to
|
||||
manage a cluster.
|
||||
and configuration inspections. It is also used by `redis-cli` in order to manage
|
||||
a cluster.
|
||||
|
||||
## Serialization format
|
||||
|
||||
|
@ -41,8 +41,8 @@ The meaning of each filed is the following:
|
|||
2. `ip:port@cport`: The node address where clients should contact the node to
|
||||
run queries.
|
||||
3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`,
|
||||
`fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in
|
||||
detail in the next section.
|
||||
`fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are
|
||||
explained in detail in the next section.
|
||||
4. `master`: If the node is a replica, and the master is known, the master node
|
||||
ID, otherwise the "-" character.
|
||||
5. `ping-sent`: Milliseconds unix time at which the currently active ping was
|
||||
|
@ -74,6 +74,7 @@ Meaning of the flags (field number 3):
|
|||
promoted the `PFAIL` state to `FAIL`.
|
||||
- `handshake`: Untrusted node, we are handshaking.
|
||||
- `noaddr`: No address known for this node.
|
||||
- `nofailover`: Replica will not try to failover.
|
||||
- `noflags`: No flags at all.
|
||||
|
||||
## Notes on published config epochs
|
||||
|
|
|
@ -65,9 +65,10 @@ already migrated to the target node are executed in the target node, so that:
|
|||
## CLUSTER SETSLOT `<slot>` STABLE
|
||||
|
||||
This subcommand just clears migrating / importing state from the slot. It is
|
||||
mainly used to fix a cluster stuck in a wrong state by `redis-trib fix`.
|
||||
Normally the two states are cleared automatically at the end of the migration
|
||||
using the `SETSLOT ... NODE ...` subcommand as explained in the next section.
|
||||
mainly used to fix a cluster stuck in a wrong state by
|
||||
`redis-cli --cluster fix`. Normally the two states are cleared automatically at
|
||||
the end of the migration using the `SETSLOT ... NODE ...` subcommand as
|
||||
explained in the next section.
|
||||
|
||||
## CLUSTER SETSLOT `<slot>` NODE `<node-id>`
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ Command flags is @array-reply containing one or more status replies:
|
|||
|
||||
- _write_ - command may result in modifications
|
||||
- _readonly_ - command will never modify keys
|
||||
- _denyoom_ - reject command if currently OOM
|
||||
- _denyoom_ - reject command if currently out of memory
|
||||
- _admin_ - server admin command
|
||||
- _pubsub_ - pubsub-related command
|
||||
- _noscript_ - deny this command from scripts
|
||||
|
@ -109,8 +109,12 @@ relevant key positions.
|
|||
Complete list of commands currently requiring key location parsing:
|
||||
|
||||
- `SORT` - optional `STORE` key, optional `BY` weights, optional `GET` keys
|
||||
- `ZUNION` - keys stop when `WEIGHT` or `AGGREGATE` starts
|
||||
- `ZUNIONSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
|
||||
- `ZINTER` - keys stop when `WEIGHT` or `AGGREGATE` starts
|
||||
- `ZINTERSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
|
||||
- `ZDIFF` - keys stop after `numkeys` count arguments
|
||||
- `ZDIFFSTORE` - keys stop after `numkeys` count arguments
|
||||
- `EVAL` - keys stop after `numkeys` count arguments
|
||||
- `EVALSHA` - keys stop after `numkeys` count arguments
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ All the supported parameters have the same meaning of the equivalent
|
|||
configuration parameter used in the [redis.conf][hgcarr22rc] file, with the
|
||||
following important differences:
|
||||
|
||||
[hgcarr22rc]: http://github.com/redis/redis/raw/2.8/redis.conf
|
||||
[hgcarr22rc]: http://github.com/redis/redis/raw/6.0/redis.conf
|
||||
|
||||
- In options where bytes or other quantities are specified, it is not possible
|
||||
to use the `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth),
|
||||
|
|
24
iredis/data/commands/copy.md
Normal file
24
iredis/data/commands/copy.md
Normal file
|
@ -0,0 +1,24 @@
|
|||
This command copies the value stored at the `source` key to the `destination`
|
||||
key.
|
||||
|
||||
By default, the `destination` key is created in the logical database used by the
|
||||
connection. The `DB` option allows specifying an alternative logical database
|
||||
index for the destination key.
|
||||
|
||||
The command returns an error when the `destination` key already exists. The
|
||||
`REPLACE` option removes the `destination` key before copying the value to it.
|
||||
|
||||
@return
|
||||
|
||||
@integer-reply, specifically:
|
||||
|
||||
- `1` if `source` was copied.
|
||||
- `0` if `source` was not copied.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
SET dolly "sheep"
|
||||
COPY dolly clone
|
||||
GET clone
|
||||
```
|
|
@ -214,6 +214,27 @@ format specified above (as a Lua table with an `err` field). The script can pass
|
|||
the exact error to the user by returning the error object returned by
|
||||
`redis.pcall()`.
|
||||
|
||||
## Running Lua under low memory conditions
|
||||
|
||||
When the memory usage in Redis exceeds the `maxmemory` limit, the first write
|
||||
command encountered in the Lua script that uses additional memory will cause the
|
||||
script to abort (unless `redis.pcall` was used). However, one thing to caution
|
||||
here is that if the first write command does not use additional memory such as
|
||||
DEL, LREM, or SREM, etc, Redis will allow it to run and all subsequent commands
|
||||
in the Lua script will execute to completion for atomicity. If the subsequent
|
||||
writes in the script generate additional memory, the Redis memory usage can go
|
||||
over `maxmemory`.
|
||||
|
||||
Another possible way for Lua script to cause Redis memory usage to go above
|
||||
`maxmemory` happens when the script execution starts when Redis is slightly
|
||||
below `maxmemory` so the first write command in the script is allowed. As the
|
||||
script executes, subsequent write commands continue to generate memory and
|
||||
causes the Redis server to go above `maxmemory`.
|
||||
|
||||
In those scenarios, it is recommended to configure the `maxmemory-policy` not to
|
||||
use `noeviction`. Also Lua scripts should be short so that evictions of items
|
||||
can happen in between Lua scripts.
|
||||
|
||||
## Bandwidth and EVALSHA
|
||||
|
||||
The `EVAL` command forces you to send the script body again and again. Redis
|
||||
|
@ -619,13 +640,13 @@ the cause of bugs.
|
|||
|
||||
## Using Lua scripting in RESP3 mode
|
||||
|
||||
Starting with Redis version 6, the server supports two differnent protocols. One
|
||||
Starting with Redis version 6, the server supports two different protocols. One
|
||||
is called RESP2, and is the old protocol: all the new connections to the server
|
||||
start in this mode. However clients are able to negotiate the new protocol using
|
||||
the `HELLO` command: this way the connection is put in RESP3 mode. In this mode
|
||||
certain commands, like for instance `HGETALL`, reply with a new data type (the
|
||||
Map data type in this specific case). The RESP3 protocol is semantically more
|
||||
powerful, however most scripts are ok with using just RESP2.
|
||||
powerful, however most scripts are OK with using just RESP2.
|
||||
|
||||
The Lua engine always assumes to run in RESP2 mode when talking with Redis, so
|
||||
whatever the connection that is invoking the `EVAL` or `EVALSHA` command is in
|
||||
|
@ -669,7 +690,7 @@ At this point the new conversions are available, specifically:
|
|||
- Lua table with a single `map` field set to a field-value Lua table -> Redis
|
||||
map reply.
|
||||
- Lua table with a single `set` field set to a field-value Lua table -> Redis
|
||||
set reply, the values are discared and can be anything.
|
||||
set reply, the values are discarded and can be anything.
|
||||
- Lua table with a single `double` field set to a field-value Lua table -> Redis
|
||||
double reply.
|
||||
- Lua null -> Redis RESP3 new null reply (protocol `"_\r\n"`).
|
||||
|
|
19
iredis/data/commands/eval_ro.md
Normal file
19
iredis/data/commands/eval_ro.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
This is a read-only variant of the `EVAL` command that isn't allowed to execute
|
||||
commands that modify data.
|
||||
|
||||
Unlike `EVAL`, scripts executed with this command can always be killed and never
|
||||
affect the replication stream. Because it can only read data, this command can
|
||||
always be executed on a master or a replica.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
> SET mykey "Hello"
|
||||
OK
|
||||
|
||||
> EVAL_RO "return redis.call('GET', KEYS[1])" 1 mykey
|
||||
"Hello"
|
||||
|
||||
> EVAL_RO "return redis.call('DEL', KEYS[1])" 1 mykey
|
||||
(error) ERR Error running script (call to f_359f69785f876b7f3f60597d81534f3d6c403284): @user_script:1: @user_script: 1: Write commands are not allowed from read-only scripts
|
||||
```
|
6
iredis/data/commands/evalsha_ro.md
Normal file
6
iredis/data/commands/evalsha_ro.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
This is a read-only variant of the `EVALSHA` command that isn't allowed to
|
||||
execute commands that modify data.
|
||||
|
||||
Unlike `EVALSHA`, scripts executed with this command can always be killed and
|
||||
never affect the replication stream. Because it can only read data, this command
|
||||
can always be executed on a master or a replica.
|
22
iredis/data/commands/expiretime.md
Normal file
22
iredis/data/commands/expiretime.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which
|
||||
the given key will expire.
|
||||
|
||||
See also the `PEXPIRETIME` command which returns the same information with
|
||||
milliseconds resolution.
|
||||
|
||||
@return
|
||||
|
||||
@integer-reply: Expiration Unix timestamp in seconds, or a negative value in
|
||||
order to signal an error (see the description below).
|
||||
|
||||
- The command returns `-1` if the key exists but has no associated expiration
|
||||
time.
|
||||
- The command returns `-2` if the key does not exist.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
SET mykey "Hello"
|
||||
EXPIREAT mykey 33177117420
|
||||
EXPIRETIME mykey
|
||||
```
|
84
iredis/data/commands/failover.md
Normal file
84
iredis/data/commands/failover.md
Normal file
|
@ -0,0 +1,84 @@
|
|||
This command will start a coordinated failover between the
|
||||
currently-connected-to master and one of its replicas. The failover is not
|
||||
synchronous, instead a background task will handle coordinating the failover. It
|
||||
is designed to limit data loss and unavailability of the cluster during the
|
||||
failover. This command is analogous to the `CLUSTER FAILOVER` command for
|
||||
non-clustered Redis and is similar to the failover support provided by sentinel.
|
||||
|
||||
The specific details of the default failover flow are as follows:
|
||||
|
||||
1. The master will internally start a `CLIENT PAUSE WRITE`, which will pause
|
||||
incoming writes and prevent the accumulation of new data in the replication
|
||||
stream.
|
||||
2. The master will monitor its replicas, waiting for a replica to indicate that
|
||||
it has fully consumed the replication stream. If the master has multiple
|
||||
replicas, it will only wait for the first replica to catch up.
|
||||
3. The master will then demote itself to a replica. This is done to prevent any
|
||||
dual master scenarios. NOTE: The master will not discard its data, so it will
|
||||
be able to rollback if the replica rejects the failover request in the next
|
||||
step.
|
||||
4. The previous master will send a special PSYNC request to the target replica,
|
||||
`PSYNC FAILOVER`, instructing the target replica to become a master.
|
||||
5. Once the previous master receives acknowledgement the `PSYNC FAILOVER` was
|
||||
accepted it will unpause its clients. If the PSYNC request is rejected, the
|
||||
master will abort the failover and return to normal.
|
||||
|
||||
The field `master_failover_state` in `INFO replication` can be used to track the
|
||||
current state of the failover, which has the following values:
|
||||
|
||||
- `no-failover`: There is no ongoing coordinated failover.
|
||||
- `waiting-for-sync`: The master is waiting for the replica to catch up to its
|
||||
replication offset.
|
||||
- `failover-in-progress`: The master has demoted itself, and is attempting to
|
||||
hand off ownership to a target replica.
|
||||
|
||||
If the previous master had additional replicas attached to it, they will
|
||||
continue replicating from it as chained replicas. You will need to manually
|
||||
execute a `REPLICAOF` on these replicas to start replicating directly from the
|
||||
new master.
|
||||
|
||||
## Optional arguments
|
||||
|
||||
The following optional arguments exist to modify the behavior of the failover
|
||||
flow:
|
||||
|
||||
- `TIMEOUT` _milliseconds_ -- This option allows specifying a maximum time a
|
||||
master will wait in the `waiting-for-sync` state before aborting the failover
|
||||
attempt and rolling back. This is intended to set an upper bound on the write
|
||||
outage the Redis cluster can experience. Failovers typically happen in less
|
||||
than a second, but could take longer if there is a large amount of write
|
||||
traffic or the replica is already behind in consuming the replication stream.
|
||||
If this value is not specified, the timeout can be considered to be
|
||||
"infinite".
|
||||
|
||||
- `TO` _HOST_ _PORT_ -- This option allows designating a specific replica, by
|
||||
its host and port, to failover to. The master will wait specifically for this
|
||||
replica to catch up to its replication offset, and then failover to it.
|
||||
|
||||
- `FORCE` -- If both the `TIMEOUT` and `TO` options are set, the force flag can
|
||||
also be used to designate that that once the timeout has elapsed, the master
|
||||
should failover to the target replica instead of rolling back. This can be
|
||||
used for a best-effort attempt at a failover without data loss, but limiting
|
||||
write outage.
|
||||
|
||||
NOTE: The master will always rollback if the `PSYNC FAILOVER` request is
|
||||
rejected by the target replica.
|
||||
|
||||
## Failover abort
|
||||
|
||||
The failover command is intended to be safe from data loss and corruption, but
|
||||
can encounter some scenarios it can not automatically remediate from and may get
|
||||
stuck. For this purpose, the `FAILOVER ABORT` command exists, which will abort
|
||||
an ongoing failover and return the master to its normal state. The command has
|
||||
no side effects if issued in the `waiting-for-sync` state but can introduce
|
||||
multi-master scenarios in the `failover-in-progress` state. If a multi-master
|
||||
scenario is encountered, you will need to manually identify which master has the
|
||||
latest data and designate it as the master and have the other replicas.
|
||||
|
||||
NOTE: `REPLICAOF` is disabled while a failover is in progress, this is to
|
||||
prevent unintended interactions with the failover that might cause data loss.
|
||||
|
||||
@return
|
||||
|
||||
@simple-string-reply: `OK` if the command was accepted and a coordinated
|
||||
failover is in progress. An error if the operation cannot be executed.
|
|
@ -1,19 +1,26 @@
|
|||
Delete all the keys of all the existing databases, not just the currently
|
||||
selected one. This command never fails.
|
||||
|
||||
The time-complexity for this operation is O(N), N being the number of keys in
|
||||
all existing databases.
|
||||
By default, `FLUSHALL` will synchronously flush all the databases. Starting with
|
||||
Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to
|
||||
"yes" changes the default flush mode to asynchronous.
|
||||
|
||||
## `FLUSHALL ASYNC` (Redis 4.0.0 or greater)
|
||||
It is possible to use one of the following modifiers to dictate the flushing
|
||||
mode explicitly:
|
||||
|
||||
Redis is now able to delete keys in the background in a different thread without
|
||||
blocking the server. An `ASYNC` option was added to `FLUSHALL` and `FLUSHDB` in
|
||||
order to let the entire dataset or a single database to be freed asynchronously.
|
||||
- `ASYNC`: flushes the databases asynchronously
|
||||
- `!SYNC`: flushes the databases synchronously
|
||||
|
||||
Asynchronous `FLUSHALL` and `FLUSHDB` commands only delete keys that were
|
||||
present at the time the command was invoked. Keys created during an asynchronous
|
||||
flush will be unaffected.
|
||||
Note: an asynchronous `FLUSHALL` command only deletes keys that were present at
|
||||
the time the command was invoked. Keys created during an asynchronous flush will
|
||||
be unaffected.
|
||||
|
||||
@return
|
||||
|
||||
@simple-string-reply
|
||||
|
||||
@history
|
||||
|
||||
- `>= 4.0.0`: Added the `ASYNC` flushing mode modifier.
|
||||
- `>= 6.2.0`: Added the `!SYNC` flushing mode modifier and the
|
||||
**lazyfree-lazy-user-flush** configuration directive.
|
||||
|
|
|
@ -1,11 +1,18 @@
|
|||
Delete all the keys of the currently selected DB. This command never fails.
|
||||
|
||||
The time-complexity for this operation is O(N), N being the number of keys in
|
||||
the database.
|
||||
By default, `FLUSHDB` will synchronously flush all keys from the database.
|
||||
Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration
|
||||
directive to "yes" changes the default flush mode to asynchronous.
|
||||
|
||||
## `FLUSHDB ASYNC` (Redis 4.0.0 or greater)
|
||||
It is possible to use one of the following modifiers to dictate the flushing
|
||||
mode explicitly:
|
||||
|
||||
See `FLUSHALL` for documentation.
|
||||
- `ASYNC`: flushes the database asynchronously
|
||||
- `!SYNC`: flushes the database synchronously
|
||||
|
||||
Note: an asynchronous `FLUSHDB` command only deletes keys that were present at
|
||||
the time the command was invoked. Keys created during an asynchronous flush will
|
||||
be unaffected.
|
||||
|
||||
@return
|
||||
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
Adds the specified geospatial items (latitude, longitude, name) to the specified
|
||||
Adds the specified geospatial items (longitude, latitude, name) to the specified
|
||||
key. Data is stored into the key as a sorted set, in a way that makes it
|
||||
possible to later retrieve items using a query by radius with the `GEORADIUS` or
|
||||
`GEORADIUSBYMEMBER` commands.
|
||||
possible to query the items with the `GEOSEARCH` command.
|
||||
|
||||
The command takes arguments in the standard format x,y so the longitude must be
|
||||
specified before the latitude. There are limits to the coordinates that can be
|
||||
indexed: areas very near to the poles are not indexable. The exact limits, as
|
||||
specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following:
|
||||
indexed: areas very near to the poles are not indexable.
|
||||
|
||||
The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the
|
||||
following:
|
||||
|
||||
- Valid longitudes are from -180 to 180 degrees.
|
||||
- Valid latitudes are from -85.05112878 to 85.05112878 degrees.
|
||||
|
@ -14,37 +15,58 @@ specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following:
|
|||
The command will report an error when the user attempts to index coordinates
|
||||
outside the specified ranges.
|
||||
|
||||
**Note:** there is no **GEODEL** command because you can use `ZREM` in order to
|
||||
remove elements. The Geo index structure is just a sorted set.
|
||||
**Note:** there is no **GEODEL** command because you can use `ZREM` to remove
|
||||
elements. The Geo index structure is just a sorted set.
|
||||
|
||||
## GEOADD options
|
||||
|
||||
`GEOADD` also provides the following options:
|
||||
|
||||
- **XX**: Only update elements that already exist. Never add elements.
|
||||
- **NX**: Don't update already existing elements. Always add new elements.
|
||||
- **CH**: Modify the return value from the number of new elements added, to the
|
||||
total number of elements changed (CH is an abbreviation of _changed_). Changed
|
||||
elements are **new elements added** and elements already existing for which
|
||||
**the coordinates was updated**. So elements specified in the command line
|
||||
having the same score as they had in the past are not counted. Note: normally,
|
||||
the return value of `GEOADD` only counts the number of new elements added.
|
||||
|
||||
Note: The **XX** and **NX** options are mutually exclusive.
|
||||
|
||||
## How does it work?
|
||||
|
||||
The way the sorted set is populated is using a technique called
|
||||
[Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude bits
|
||||
are interleaved in order to form an unique 52 bit integer. We know that a sorted
|
||||
set double score can represent a 52 bit integer without losing precision.
|
||||
are interleaved to form a unique 52-bit integer. We know that a sorted set
|
||||
double score can represent a 52-bit integer without losing precision.
|
||||
|
||||
This format allows for radius querying by checking the 1+8 areas needed to cover
|
||||
the whole radius, and discarding elements outside the radius. The areas are
|
||||
checked by calculating the range of the box covered removing enough bits from
|
||||
the less significant part of the sorted set score, and computing the score range
|
||||
to query in the sorted set for each area.
|
||||
This format allows for bounding box and radius querying by checking the 1+8
|
||||
areas needed to cover the whole shape and discarding elements outside it. The
|
||||
areas are checked by calculating the range of the box covered, removing enough
|
||||
bits from the less significant part of the sorted set score, and computing the
|
||||
score range to query in the sorted set for each area.
|
||||
|
||||
## What Earth model does it use?
|
||||
|
||||
It just assumes that the Earth is a sphere, since the used distance formula is
|
||||
the Haversine formula. This formula is only an approximation when applied to the
|
||||
The model assumes that the Earth is a sphere since it uses the Haversine formula
|
||||
to calculate distance. This formula is only an approximation when applied to the
|
||||
Earth, which is not a perfect sphere. The introduced errors are not an issue
|
||||
when used in the context of social network sites that need to query by radius
|
||||
and most other applications. However in the worst case the error may be up to
|
||||
0.5%, so you may want to consider other systems for error-critical applications.
|
||||
when used, for example, by social networks and similar applications requiring
|
||||
this type of querying. However, in the worst case, the error may be up to 0.5%,
|
||||
so you may want to consider other systems for error-critical applications.
|
||||
|
||||
@return
|
||||
|
||||
@integer-reply, specifically:
|
||||
|
||||
- The number of elements added to the sorted set, not including elements already
|
||||
existing for which the score was updated.
|
||||
- When used without optional arguments, the number of elements added to the
|
||||
sorted set (excluding score updates).
|
||||
- If the `CH` option is specified, the number of elements that were changed
|
||||
(added or updated).
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added the `CH`, `NX` and `XX` options.
|
||||
|
||||
@examples
|
||||
|
||||
|
|
|
@ -2,6 +2,9 @@ Return the members of a sorted set populated with geospatial information using
|
|||
`GEOADD`, which are within the borders of the area specified with the center
|
||||
location and the maximum distance from the center (the radius).
|
||||
|
||||
As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please
|
||||
prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code.
|
||||
|
||||
This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO`
|
||||
variants (see the section below for more information).
|
||||
|
||||
|
@ -38,11 +41,13 @@ can be invoked using the following two options:
|
|||
|
||||
By default all the matching items are returned. It is possible to limit the
|
||||
results to the first N matching items by using the **COUNT `<count>`** option.
|
||||
However note that internally the command needs to perform an effort proportional
|
||||
to the number of items matching the specified area, so to query very large areas
|
||||
with a very small `COUNT` option may be slow even if just a few results are
|
||||
returned. On the other hand `COUNT` can be a very effective way to reduce
|
||||
bandwidth usage if normally just the first results are used.
|
||||
When `ANY` is provided the command will return as soon as enough matches are
|
||||
found, so the results may not be the ones closest to the specified point, but on
|
||||
the other hand, the effort invested by the server is significantly lower. When
|
||||
`ANY` is not provided, the command will perform an effort that is proportional
|
||||
to the number of items matching the specified area and sort them, so to query
|
||||
very large areas with a very small `COUNT` option may be slow even if just a few
|
||||
results are returned.
|
||||
|
||||
By default the command returns the items to the client. It is possible to store
|
||||
the results with one of these options:
|
||||
|
@ -93,6 +98,10 @@ They are exactly like the original commands but refuse the `STORE` and
|
|||
|
||||
Both commands were introduced in Redis 3.2.10 and Redis 4.0.0 respectively.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added the `ANY` option for `COUNT`.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
|
|
|
@ -3,6 +3,9 @@ of taking, as the center of the area to query, a longitude and latitude value,
|
|||
it takes the name of a member already existing inside the geospatial index
|
||||
represented by the sorted set.
|
||||
|
||||
As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please
|
||||
prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code.
|
||||
|
||||
The position of the specified member is used as the center of the query.
|
||||
|
||||
Please check the example below and the `GEORADIUS` documentation for more
|
||||
|
|
77
iredis/data/commands/geosearch.md
Normal file
77
iredis/data/commands/geosearch.md
Normal file
|
@ -0,0 +1,77 @@
|
|||
Return the members of a sorted set populated with geospatial information using
|
||||
`GEOADD`, which are within the borders of the area specified by a given shape.
|
||||
This command extends the `GEORADIUS` command, so in addition to searching within
|
||||
circular areas, it supports searching within rectangular areas.
|
||||
|
||||
This command should be used in place of the deprecated `GEORADIUS` and
|
||||
`GEORADIUSBYMEMBER` commands.
|
||||
|
||||
The query's center point is provided by one of these mandatory options:
|
||||
|
||||
- `FROMMEMBER`: Use the position of the given existing `<member>` in the sorted
|
||||
set.
|
||||
- `FROMLONLAT`: Use the given `<longitude>` and `<latitude>` position.
|
||||
|
||||
The query's shape is provided by one of these mandatory options:
|
||||
|
||||
- `BYRADIUS`: Similar to `GEORADIUS`, search inside circular area according to
|
||||
given `<radius>`.
|
||||
- `BYBOX`: Search inside an axis-aligned rectangle, determined by `<height>` and
|
||||
`<width>`.
|
||||
|
||||
The command optionally returns additional information using the following
|
||||
options:
|
||||
|
||||
- `WITHDIST`: Also return the distance of the returned items from the specified
|
||||
center point. The distance is returned in the same unit as specified for the
|
||||
radius or height and width arguments.
|
||||
- `WITHCOORD`: Also return the longitude and latitude of the matching items.
|
||||
- `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item,
|
||||
in the form of a 52 bit unsigned integer. This is only useful for low level
|
||||
hacks or debugging and is otherwise of little interest for the general user.
|
||||
|
||||
Matching items are returned unsorted by default. To sort them, use one of the
|
||||
following two options:
|
||||
|
||||
- `ASC`: Sort returned items from the nearest to the farthest, relative to the
|
||||
center point.
|
||||
- `DESC`: Sort returned items from the farthest to the nearest, relative to the
|
||||
center point.
|
||||
|
||||
All matching items are returned by default. To limit the results to the first N
|
||||
matching items, use the **COUNT `<count>`** option. When the `ANY` option is
|
||||
used, the command returns as soon as enough matches are found. This means that
|
||||
the results returned may not be the ones closest to the specified point, but the
|
||||
effort invested by the server to generate them is significantly less. When `ANY`
|
||||
is not provided, the command will perform an effort that is proportional to the
|
||||
number of items matching the specified area and sort them, so to query very
|
||||
large areas with a very small `COUNT` option may be slow even if just a few
|
||||
results are returned.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply, specifically:
|
||||
|
||||
- Without any `WITH` option specified, the command just returns a linear array
|
||||
like ["New York","Milan","Paris"].
|
||||
- If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command
|
||||
returns an array of arrays, where each sub-array represents a single item.
|
||||
|
||||
When additional information is returned as an array of arrays for each item, the
|
||||
first item in the sub-array is always the name of the returned item. The other
|
||||
information is returned in the following order as successive elements of the
|
||||
sub-array.
|
||||
|
||||
1. The distance from the center as a floating point number, in the same unit
|
||||
specified in the shape.
|
||||
2. The geohash integer.
|
||||
3. The coordinates as a two items x,y array (longitude,latitude).
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
|
||||
GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2"
|
||||
GEOSEARCH Sicily FROMLONLAT 15 37 BYRADIUS 200 km ASC
|
||||
GEOSEARCH Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC
|
||||
```
|
11
iredis/data/commands/geosearchstore.md
Normal file
11
iredis/data/commands/geosearchstore.md
Normal file
|
@ -0,0 +1,11 @@
|
|||
This command is like `GEOSEARCH`, but stores the result in destination key.
|
||||
|
||||
This command comes in place of the now deprecated `GEORADIUS` and
|
||||
`GEORADIUSBYMEMBER`.
|
||||
|
||||
By default, it stores the results in the `destination` sorted set with their
|
||||
geospatial information.
|
||||
|
||||
When using the `STOREDIST` option, the command stores the items in a sorted set
|
||||
populated with their distance from the center of the circle or box, as a
|
||||
floating-point number, in the same unit specified for that shape.
|
16
iredis/data/commands/getdel.md
Normal file
16
iredis/data/commands/getdel.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
Get the value of `key` and delete the key. This command is similar to `GET`,
|
||||
except for the fact that it also deletes the key on success (if and only if the
|
||||
key's value type is a string).
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: the value of `key`, `nil` when `key` does not exist, or an
|
||||
error if the key's value type isn't a string.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
SET mykey "Hello"
|
||||
GETDEL mykey
|
||||
GET mykey
|
||||
```
|
28
iredis/data/commands/getex.md
Normal file
28
iredis/data/commands/getex.md
Normal file
|
@ -0,0 +1,28 @@
|
|||
Get the value of `key` and optionally set its expiration. `GETEX` is similar to
|
||||
`GET`, but is a write command with additional options.
|
||||
|
||||
## Options
|
||||
|
||||
The `GETEX` command supports a set of options that modify its behavior:
|
||||
|
||||
- `EX` _seconds_ -- Set the specified expire time, in seconds.
|
||||
- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds.
|
||||
- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key
|
||||
will expire, in seconds.
|
||||
- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the
|
||||
key will expire, in milliseconds.
|
||||
- `PERSIST` -- Remove the time to live associated with the key.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: the value of `key`, or `nil` when `key` does not exist.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
SET mykey "Hello"
|
||||
GETEX mykey
|
||||
TTL mykey
|
||||
GETEX mykey EX 60
|
||||
TTL mykey
|
||||
```
|
|
@ -1,5 +1,7 @@
|
|||
Atomically sets `key` to `value` and returns the old value stored at `key`.
|
||||
Returns an error when `key` exists but does not hold a string value.
|
||||
Returns an error when `key` exists but does not hold a string value. Any
|
||||
previous time to live associated with the key is discarded on successful `SET`
|
||||
operation.
|
||||
|
||||
## Design pattern
|
||||
|
||||
|
@ -14,6 +16,9 @@ GETSET mycounter "0"
|
|||
GET mycounter
|
||||
```
|
||||
|
||||
As per Redis 6.2, GETSET is considered deprecated. Please prefer `SET` with
|
||||
`GET` parameter in new code.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: the old value stored at `key`, or `nil` when `key` did not
|
||||
|
|
|
@ -1,16 +1,41 @@
|
|||
Switch the connection to a different protocol. Redis version 6 or greater are
|
||||
able to support two protocols, the old protocol, RESP2, and a new one introduced
|
||||
with Redis 6, RESP3. RESP3 has certain advantages since when the connection is
|
||||
in this mode, Redis is able to reply with more semantical replies: for instance
|
||||
`HGETALL` will return a _map type_, so a client library implementation no longer
|
||||
requires to know in advance to translate the array into a hash before returning
|
||||
it to the caller. For a full coverage of RESP3 please
|
||||
Switch to a different protocol, optionally authenticating and setting the
|
||||
connection's name, or provide a contextual client report.
|
||||
|
||||
Redis version 6 and above supports two protocols: the old protocol, RESP2, and a
|
||||
new one introduced with Redis 6, RESP3. RESP3 has certain advantages since when
|
||||
the connection is in this mode, Redis is able to reply with more semantical
|
||||
replies: for instance, `HGETALL` will return a _map type_, so a client library
|
||||
implementation no longer requires to know in advance to translate the array into
|
||||
a hash before returning it to the caller. For a full coverage of RESP3, please
|
||||
[check this repository](https://github.com/antirez/resp3).
|
||||
|
||||
Redis 6 connections starts in RESP2 mode, so clients implementing RESP2 do not
|
||||
need to change (nor there are short term plans to drop support for RESP2).
|
||||
Clients that want to handshake the RESP3 mode need to call the `HELLO` command,
|
||||
using "3" as first argument.
|
||||
In Redis 6 connections start in RESP2 mode, so clients implementing RESP2 do not
|
||||
need to updated or changed. There are no short term plans to drop support for
|
||||
RESP2, although future version may default to RESP3.
|
||||
|
||||
`HELLO` always replies with a list of current server and connection properties,
|
||||
such as: versions, modules loaded, client ID, replication role and so forth.
|
||||
When called without any arguments in Redis 6.2 and its default use of RESP2
|
||||
protocol, the reply looks like this:
|
||||
|
||||
> HELLO
|
||||
1) "server"
|
||||
2) "redis"
|
||||
3) "version"
|
||||
4) "255.255.255"
|
||||
5) "proto"
|
||||
6) (integer) 2
|
||||
7) "id"
|
||||
8) (integer) 5
|
||||
9) "mode"
|
||||
10) "standalone"
|
||||
11) "role"
|
||||
12) "master"
|
||||
13) "modules"
|
||||
14) (empty array)
|
||||
|
||||
Clients that want to handshake using the RESP3 mode need to call the `HELLO`
|
||||
command and specify the value "3" as the `protover` argument, like so:
|
||||
|
||||
> HELLO 3
|
||||
1# "server" => "redis"
|
||||
|
@ -21,26 +46,28 @@ using "3" as first argument.
|
|||
6# "role" => "master"
|
||||
7# "modules" => (empty array)
|
||||
|
||||
The `HELLO` command has a useful reply that will state a number of facts about
|
||||
the server: the exact version, the set of modules loaded, the client ID, the
|
||||
replication role and so forth. Because of that, and given that the `HELLO`
|
||||
command also works with "2" as argument, both in order to downgrade the protocol
|
||||
back to version 2, or just to get the reply from the server without switching
|
||||
the protocol, client library authors may consider using this command instead of
|
||||
the canonical `PING` when setting up the connection.
|
||||
Because `HELLO` replies with useful information, and given that `protover` is
|
||||
optional or can be set to "2", client library authors may consider using this
|
||||
command instead of the canonical `PING` when setting up the connection.
|
||||
|
||||
This command accepts two non mandatory options:
|
||||
When called with the optional `protover` argument, this command switches the
|
||||
protocol to the specified version and also accepts the following options:
|
||||
|
||||
- `AUTH <username> <password>`: directly authenticate the connection other than
|
||||
switching to the specified protocol. In this way there is no need to call
|
||||
`AUTH` before `HELLO` when setting up new connections. Note that the username
|
||||
can be set to "default" in order to authenticate against a server that does
|
||||
not use ACLs, but the simpler `requirepass` mechanism of Redis before
|
||||
- `AUTH <username> <password>`: directly authenticate the connection in addition
|
||||
to switching to the specified protocol version. This makes calling `AUTH`
|
||||
before `HELLO` unnecessary when setting up a new connection. Note that the
|
||||
`username` can be set to "default" to authenticate against a server that does
|
||||
not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to
|
||||
version 6.
|
||||
- `SETNAME <clientname>`: this is equivalent to also call `CLIENT SETNAME`.
|
||||
- `SETNAME <clientname>`: this is the equivalent of calling `CLIENT SETNAME`.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply: a list of server properties. The reply is a map instead of an
|
||||
array when RESP3 is selected. The command returns an error if the protocol
|
||||
array when RESP3 is selected. The command returns an error if the `protover`
|
||||
requested does not exist.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: `protover` made optional; when called without arguments the command
|
||||
reports the current connection's context.
|
||||
|
|
|
@ -2,7 +2,7 @@ Sets the specified fields to their respective values in the hash stored at
|
|||
`key`. This command overwrites any specified fields already existing in the
|
||||
hash. If `key` does not exist, a new key holding a hash is created.
|
||||
|
||||
As per Redis 4.0.0, HMSET is considered deprecated. Please use `HSET` in new
|
||||
As per Redis 4.0.0, HMSET is considered deprecated. Please prefer `HSET` in new
|
||||
code.
|
||||
|
||||
@return
|
||||
|
|
50
iredis/data/commands/hrandfield.md
Normal file
50
iredis/data/commands/hrandfield.md
Normal file
|
@ -0,0 +1,50 @@
|
|||
When called with just the `key` argument, return a random field from the hash
|
||||
value stored at `key`.
|
||||
|
||||
If the provided `count` argument is positive, return an array of **distinct
|
||||
fields**. The array's length is either `count` or the hash's number of fields
|
||||
(`HLEN`), whichever is lower.
|
||||
|
||||
If called with a negative `count`, the behavior changes and the command is
|
||||
allowed to return the **same field multiple times**. In this case, the number of
|
||||
returned fields is the absolute value of the specified `count`.
|
||||
|
||||
The optional `WITHVALUES` modifier changes the reply so it includes the
|
||||
respective values of the randomly selected hash fields.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: without the additional `count` argument, the command returns
|
||||
a Bulk Reply with the randomly selected field, or `nil` when `key` does not
|
||||
exist.
|
||||
|
||||
@array-reply: when the additional `count` argument is passed, the command
|
||||
returns an array of fields, or an empty array when `key` does not exist. If the
|
||||
`WITHVALUES` modifier is used, the reply is a list fields and their values from
|
||||
the hash.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
HMSET coin heads obverse tails reverse edge null
|
||||
HRANDFIELD coin
|
||||
HRANDFIELD coin
|
||||
HRANDFIELD coin -5 WITHVALUES
|
||||
```
|
||||
|
||||
## Specification of the behavior when count is passed
|
||||
|
||||
When the `count` argument is a positive value this command behaves as follows:
|
||||
|
||||
- No repeated fields are returned.
|
||||
- If `count` is bigger than the number of fields in the hash, the command will
|
||||
only return the whole hash without additional fields.
|
||||
- The order of fields in the reply is not truly random, so it is up to the
|
||||
client to shuffle them if needed.
|
||||
|
||||
When the `count` is a negative value, the behavior changes as follows:
|
||||
|
||||
- Repeating fields are possible.
|
||||
- Exactly `count` fields, or an empty array if the hash is empty (non-existing
|
||||
key), are always returned.
|
||||
- The order of fields in the reply is truly random.
|
|
@ -65,14 +65,14 @@ The more simple and direct implementation of this pattern is the following:
|
|||
FUNCTION LIMIT_API_CALL(ip)
|
||||
ts = CURRENT_UNIX_TIME()
|
||||
keyname = ip+":"+ts
|
||||
current = GET(keyname)
|
||||
IF current != NULL AND current > 10 THEN
|
||||
MULTI
|
||||
INCR(keyname)
|
||||
EXPIRE(keyname,10)
|
||||
EXEC
|
||||
current = RESPONSE_OF_INCR_WITHIN_MULTI
|
||||
IF current > 10 THEN
|
||||
ERROR "too many requests per second"
|
||||
ELSE
|
||||
MULTI
|
||||
INCR(keyname,1)
|
||||
EXPIRE(keyname,10)
|
||||
EXEC
|
||||
PERFORM_API_CALL()
|
||||
END
|
||||
```
|
||||
|
@ -119,7 +119,7 @@ script that is send using the `EVAL` command (only available since Redis version
|
|||
```
|
||||
local current
|
||||
current = redis.call("incr",KEYS[1])
|
||||
if tonumber(current) == 1 then
|
||||
if current == 1 then
|
||||
redis.call("expire",KEYS[1],1)
|
||||
end
|
||||
```
|
||||
|
|
|
@ -15,6 +15,7 @@ The optional parameter can be used to select a specific section of information:
|
|||
- `modules`: Modules section
|
||||
- `keyspace`: Database related statistics
|
||||
- `modules`: Module related sections
|
||||
- `errorstats`: Redis error statistics
|
||||
|
||||
It can also take the following values:
|
||||
|
||||
|
@ -60,6 +61,7 @@ Here is the meaning of all fields in the **server** section:
|
|||
- `run_id`: Random value identifying the Redis server (to be used by Sentinel
|
||||
and Cluster)
|
||||
- `tcp_port`: TCP/IP listen port
|
||||
- `server_time_in_usec`: Epoch-based system time with microsecond precision
|
||||
- `uptime_in_seconds`: Number of seconds since Redis server start
|
||||
- `uptime_in_days`: Same value expressed in days
|
||||
- `hz`: The server's current frequency setting
|
||||
|
@ -72,14 +74,20 @@ Here is the meaning of all fields in the **clients** section:
|
|||
|
||||
- `connected_clients`: Number of client connections (excluding connections from
|
||||
replicas)
|
||||
- `cluster_connections`: An approximation of the number of sockets used by the
|
||||
cluster's bus
|
||||
- `maxclients`: The value of the `maxclients` configuration directive. This is
|
||||
the upper limit for the sum of `connected_clients`, `connected_slaves` and
|
||||
`cluster_connections`.
|
||||
- `client_longest_output_list`: Longest output list among current client
|
||||
connections
|
||||
- `client_biggest_input_buf`: Biggest input buffer among current client
|
||||
connections
|
||||
- `blocked_clients`: Number of clients pending on a blocking call (`BLPOP`,
|
||||
`BRPOP`, `BRPOPLPUSH`, `BZPOPMIN`, `BZPOPMAX`)
|
||||
`BRPOP`, `BRPOPLPUSH`, `BLMOVE`, `BZPOPMIN`, `BZPOPMAX`)
|
||||
- `tracking_clients`: Number of clients being tracked (`CLIENT TRACKING`)
|
||||
- `clients_in_timeout_table`: Number of clients in the clients timeout table
|
||||
- `io_threads_active`: Flag indicating if I/O threads are active
|
||||
|
||||
Here is the meaning of all fields in the **memory** section:
|
||||
|
||||
|
@ -143,6 +151,15 @@ by referring to the `MEMORY STATS` command and the `MEMORY DOCTOR`.
|
|||
Here is the meaning of all fields in the **persistence** section:
|
||||
|
||||
- `loading`: Flag indicating if the load of a dump file is on-going
|
||||
- `current_cow_size`: The size in bytes of copy-on-write memory while a child
|
||||
fork is running
|
||||
- `current_fork_perc`: The percentage of progress of the current fork process.
|
||||
For AOF and RDB forks it is the percentage of `current_save_keys_processed`
|
||||
out of `current_save_keys_total`.
|
||||
- `current_save_keys_processed`: Number of keys processed by the current save
|
||||
operation
|
||||
- `current_save_keys_total`: Number of keys at the beginning of the current save
|
||||
operation
|
||||
- `rdb_changes_since_last_save`: Number of changes since the last dump
|
||||
- `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going
|
||||
- `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save
|
||||
|
@ -150,8 +167,8 @@ Here is the meaning of all fields in the **persistence** section:
|
|||
- `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in seconds
|
||||
- `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if
|
||||
any
|
||||
- `rdb_last_cow_size`: The size in bytes of copy-on-write allocations during the
|
||||
last RDB save operation
|
||||
- `rdb_last_cow_size`: The size in bytes of copy-on-write memory during the last
|
||||
RDB save operation
|
||||
- `aof_enabled`: Flag indicating AOF logging is activated
|
||||
- `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is on-going
|
||||
- `aof_rewrite_scheduled`: Flag indicating an AOF rewrite operation will be
|
||||
|
@ -162,11 +179,11 @@ Here is the meaning of all fields in the **persistence** section:
|
|||
if any
|
||||
- `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation
|
||||
- `aof_last_write_status`: Status of the last write operation to the AOF
|
||||
- `aof_last_cow_size`: The size in bytes of copy-on-write allocations during the
|
||||
last AOF rewrite operation
|
||||
- `aof_last_cow_size`: The size in bytes of copy-on-write memory during the last
|
||||
AOF rewrite operation
|
||||
- `module_fork_in_progress`: Flag indicating a module fork is on-going
|
||||
- `module_fork_last_cow_size`: The size in bytes of copy-on-write allocations
|
||||
during the last module fork operation
|
||||
- `module_fork_last_cow_size`: The size in bytes of copy-on-write memory during
|
||||
the last module fork operation
|
||||
|
||||
`rdb_changes_since_last_save` refers to the number of operations that produced
|
||||
some kind of changes in the dataset since the last time either `SAVE` or
|
||||
|
@ -187,6 +204,8 @@ If a load operation is on-going, these additional fields will be added:
|
|||
|
||||
- `loading_start_time`: Epoch-based timestamp of the start of the load operation
|
||||
- `loading_total_bytes`: Total file size
|
||||
- `loading_rdb_used_mem`: The memory usage of the server that had generated the
|
||||
RDB file at the time of the file's creation
|
||||
- `loading_loaded_bytes`: Number of bytes already loaded
|
||||
- `loading_loaded_perc`: Same value expressed as a percentage
|
||||
- `loading_eta_seconds`: ETA in seconds for the load to be complete
|
||||
|
@ -218,6 +237,7 @@ Here is the meaning of all fields in the **stats** section:
|
|||
- `pubsub_channels`: Global number of pub/sub channels with client subscriptions
|
||||
- `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions
|
||||
- `latest_fork_usec`: Duration of the latest fork operation in microseconds
|
||||
- `total_forks`: Total number of fork operations since the server start
|
||||
- `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes
|
||||
- `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes
|
||||
(applicable only to writable replicas)
|
||||
|
@ -235,12 +255,22 @@ Here is the meaning of all fields in the **stats** section:
|
|||
(only applicable for broadcast mode)
|
||||
- `unexpected_error_replies`: Number of unexpected error replies, that are types
|
||||
of errors from an AOF load or replication
|
||||
- `total_error_replies`: Total number of issued error replies, that is the sum
|
||||
of rejected commands (errors prior command execution) and failed commands
|
||||
(errors within the command execution)
|
||||
- `total_reads_processed`: Total number of read events processed
|
||||
- `total_writes_processed`: Total number of write events processed
|
||||
- `io_threaded_reads_processed`: Number of read events processed by the main and
|
||||
I/O threads
|
||||
- `io_threaded_writes_processed`: Number of write events processed by the main
|
||||
and I/O threads
|
||||
|
||||
Here is the meaning of all fields in the **replication** section:
|
||||
|
||||
- `role`: Value is "master" if the instance is replica of no one, or "slave" if
|
||||
the instance is a replica of some master instance. Note that a replica can be
|
||||
master of another replica (chained replication).
|
||||
- `master_failover_state`: The state of an ongoing failover, if any.
|
||||
- `master_replid`: The replication ID of the Redis server.
|
||||
- `master_replid2`: The secondary replication ID, used for PSYNC after a
|
||||
failover.
|
||||
|
@ -267,7 +297,15 @@ If the instance is a replica, these additional fields are provided:
|
|||
|
||||
If a SYNC operation is on-going, these additional fields are provided:
|
||||
|
||||
- `master_sync_left_bytes`: Number of bytes left before syncing is complete
|
||||
- `master_sync_total_bytes`: Total number of bytes that need to be transferred.
|
||||
this may be 0 when the size is unknown (for example, when the
|
||||
`repl-diskless-sync` configuration directive is used)
|
||||
- `master_sync_read_bytes`: Number of bytes already transferred
|
||||
- `master_sync_left_bytes`: Number of bytes left before syncing is complete (may
|
||||
be negative when `master_sync_total_bytes` is 0)
|
||||
- `master_sync_perc`: The percentage `master_sync_read_bytes` from
|
||||
`master_sync_total_bytes`, or an approximation that uses
|
||||
`loading_rdb_used_mem` when `master_sync_total_bytes` is 0
|
||||
- `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O
|
||||
during a SYNC operation
|
||||
|
||||
|
@ -291,18 +329,36 @@ For each replica, the following line is added:
|
|||
|
||||
Here is the meaning of all fields in the **cpu** section:
|
||||
|
||||
- `used_cpu_sys`: System CPU consumed by the Redis server
|
||||
- `used_cpu_user`:User CPU consumed by the Redis server
|
||||
- `used_cpu_sys`: System CPU consumed by the Redis server, which is the sum of
|
||||
system CPU consumed by all threads of the server process (main thread and
|
||||
background threads)
|
||||
- `used_cpu_user`: User CPU consumed by the Redis server, which is the sum of
|
||||
user CPU consumed by all threads of the server process (main thread and
|
||||
background threads)
|
||||
- `used_cpu_sys_children`: System CPU consumed by the background processes
|
||||
- `used_cpu_user_children`: User CPU consumed by the background processes
|
||||
- `used_cpu_sys_main_thread`: System CPU consumed by the Redis server main
|
||||
thread
|
||||
- `used_cpu_user_main_thread`: User CPU consumed by the Redis server main thread
|
||||
|
||||
The **commandstats** section provides statistics based on the command type,
|
||||
including the number of calls, the total CPU time consumed by these commands,
|
||||
and the average CPU consumed per command execution.
|
||||
including the number of calls that reached command execution (not rejected), the
|
||||
total CPU time consumed by these commands, the average CPU consumed per command
|
||||
execution, the number of rejected calls (errors prior command execution), and
|
||||
the number of failed calls (errors within the command execution).
|
||||
|
||||
For each command type, the following line is added:
|
||||
|
||||
- `cmdstat_XXX`: `calls=XXX,usec=XXX,usec_per_call=XXX`
|
||||
- `cmdstat_XXX`:
|
||||
`calls=XXX,usec=XXX,usec_per_call=XXX,rejected_calls=XXX,failed_calls=XXX`
|
||||
|
||||
The **errorstats** section enables keeping track of the different errors that
|
||||
occurred within Redis, based upon the reply error prefix ( The first word after
|
||||
the "-", up to the first space. Example: `ERR` ).
|
||||
|
||||
For each error type, the following line is added:
|
||||
|
||||
- `errorstat_XXX`: `count=XXX`
|
||||
|
||||
The **cluster** section currently only contains a unique field:
|
||||
|
||||
|
|
|
@ -31,6 +31,6 @@ For more information refer to the [Latency Monitoring Framework page][lm].
|
|||
|
||||
[lm]: /topics/latency-monitor
|
||||
|
||||
@reply
|
||||
@return
|
||||
|
||||
@integer-reply: the number of event time series that were reset.
|
||||
|
|
77
iredis/data/commands/lmove.md
Normal file
77
iredis/data/commands/lmove.md
Normal file
|
@ -0,0 +1,77 @@
|
|||
Atomically returns and removes the first/last element (head/tail depending on
|
||||
the `wherefrom` argument) of the list stored at `source`, and pushes the element
|
||||
at the first/last element (head/tail depending on the `whereto` argument) of the
|
||||
list stored at `destination`.
|
||||
|
||||
For example: consider `source` holding the list `a,b,c`, and `destination`
|
||||
holding the list `x,y,z`. Executing `LMOVE source destination RIGHT LEFT`
|
||||
results in `source` holding `a,b` and `destination` holding `c,x,y,z`.
|
||||
|
||||
If `source` does not exist, the value `nil` is returned and no operation is
|
||||
performed. If `source` and `destination` are the same, the operation is
|
||||
equivalent to removing the first/last element from the list and pushing it as
|
||||
first/last element of the list, so it can be considered as a list rotation
|
||||
command (or a no-op if `wherefrom` is the same as `whereto`).
|
||||
|
||||
This command comes in place of the now deprecated `RPOPLPUSH`. Doing
|
||||
`LMOVE RIGHT LEFT` is equivalent.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: the element being popped and pushed.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
RPUSH mylist "one"
|
||||
RPUSH mylist "two"
|
||||
RPUSH mylist "three"
|
||||
LMOVE mylist myotherlist RIGHT LEFT
|
||||
LMOVE mylist myotherlist LEFT RIGHT
|
||||
LRANGE mylist 0 -1
|
||||
LRANGE myotherlist 0 -1
|
||||
```
|
||||
|
||||
## Pattern: Reliable queue
|
||||
|
||||
Redis is often used as a messaging server to implement processing of background
|
||||
jobs or other kinds of messaging tasks. A simple form of queue is often obtained
|
||||
pushing values into a list in the producer side, and waiting for this values in
|
||||
the consumer side using `RPOP` (using polling), or `BRPOP` if the client is
|
||||
better served by a blocking operation.
|
||||
|
||||
However in this context the obtained queue is not _reliable_ as messages can be
|
||||
lost, for example in the case there is a network problem or if the consumer
|
||||
crashes just after the message is received but it is still to process.
|
||||
|
||||
`LMOVE` (or `BLMOVE` for the blocking variant) offers a way to avoid this
|
||||
problem: the consumer fetches the message and at the same time pushes it into a
|
||||
_processing_ list. It will use the `LREM` command in order to remove the message
|
||||
from the _processing_ list once the message has been processed.
|
||||
|
||||
An additional client may monitor the _processing_ list for items that remain
|
||||
there for too much time, and will push those timed out items into the queue
|
||||
again if needed.
|
||||
|
||||
## Pattern: Circular list
|
||||
|
||||
Using `LMOVE` with the same source and destination key, a client can visit all
|
||||
the elements of an N-elements list, one after the other, in O(N) without
|
||||
transferring the full list from the server to the client using a single `LRANGE`
|
||||
operation.
|
||||
|
||||
The above pattern works even if the following two conditions:
|
||||
|
||||
- There are multiple clients rotating the list: they'll fetch different
|
||||
elements, until all the elements of the list are visited, and the process
|
||||
restarts.
|
||||
- Even if other clients are actively pushing new items at the end of the list.
|
||||
|
||||
The above makes it very simple to implement a system where a set of items must
|
||||
be processed by N workers continuously as fast as possible. An example is a
|
||||
monitoring system that must check that a set of web sites are reachable, with
|
||||
the smallest delay possible, using a number of parallel workers.
|
||||
|
||||
Note that this implementation of workers is trivially scalable and reliable,
|
||||
because even if a message is lost the item is still in the queue and will be
|
||||
processed at the next iteration.
|
|
@ -1,16 +1,29 @@
|
|||
Removes and returns the first element of the list stored at `key`.
|
||||
Removes and returns the first elements of the list stored at `key`.
|
||||
|
||||
By default, the command pops a single element from the beginning of the list.
|
||||
When provided with the optional `count` argument, the reply will consist of up
|
||||
to `count` elements, depending on the list's length.
|
||||
|
||||
@return
|
||||
|
||||
When called without the `count` argument:
|
||||
|
||||
@bulk-string-reply: the value of the first element, or `nil` when `key` does not
|
||||
exist.
|
||||
|
||||
When called with the `count` argument:
|
||||
|
||||
@array-reply: list of popped elements, or `nil` when `key` does not exist.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added the `count` argument.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
RPUSH mylist "one"
|
||||
RPUSH mylist "two"
|
||||
RPUSH mylist "three"
|
||||
RPUSH mylist "one" "two" "three" "four" "five"
|
||||
LPOP mylist
|
||||
LPOP mylist 2
|
||||
LRANGE mylist 0 -1
|
||||
```
|
||||
|
|
|
@ -2,7 +2,7 @@ The command returns the index of matching elements inside a Redis list. By
|
|||
default, when no options are given, it will scan the list from head to tail,
|
||||
looking for the first match of "element". If the element is found, its index
|
||||
(the zero-based position in the list) is returned. Otherwise, if no match is
|
||||
found, NULL is returned.
|
||||
found, `nil` is returned.
|
||||
|
||||
```
|
||||
> RPUSH mylist a b c 1 2 3 c c
|
||||
|
@ -64,12 +64,12 @@ indexes. This is better than giving a very large `COUNT` option because it is
|
|||
more general.
|
||||
|
||||
```
|
||||
> LPOS mylist COUNT 0
|
||||
> LPOS mylist c COUNT 0
|
||||
[2,6,7]
|
||||
```
|
||||
|
||||
When `COUNT` is used and no match is found, an empty array is returned. However
|
||||
when `COUNT` is not used and there are no matches, the command returns NULL.
|
||||
when `COUNT` is not used and there are no matches, the command returns `nil`.
|
||||
|
||||
Finally, the `MAXLEN` option tells the command to compare the provided element
|
||||
only with a given maximum number of list items. So for instance specifying
|
||||
|
@ -80,9 +80,13 @@ useful to limit the maximum complexity of the command. It is also useful when we
|
|||
expect the match to be found very early, but want to be sure that in case this
|
||||
is not true, the command does not take too much time to run.
|
||||
|
||||
When `MAXLEN` is used, it is possible to specify 0 as the maximum number of
|
||||
comparisons, as a way to tell the command we want unlimited comparisons. This is
|
||||
better than giving a very large `MAXLEN` option because it is more general.
|
||||
|
||||
@return
|
||||
|
||||
The command returns the integer representing the matching element, or null if
|
||||
The command returns the integer representing the matching element, or `nil` if
|
||||
there is no match. However, if the `COUNT` option is given the command returns
|
||||
an array (empty if there are no matches).
|
||||
|
||||
|
|
|
@ -37,4 +37,4 @@ OK
|
|||
|
||||
@return
|
||||
|
||||
@integer-reply: the memory usage in bytes
|
||||
@integer-reply: the memory usage in bytes, or `nil` when the key does not exist.
|
||||
|
|
|
@ -68,9 +68,12 @@ a single key exists.
|
|||
- `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or
|
||||
greater ACL auth style).
|
||||
|
||||
`COPY` and `REPLACE` are available only in 3.0 and above. `KEYS` is available
|
||||
starting with Redis 3.0.6. `AUTH` is available starting with Redis 4.0.7.
|
||||
`AUTH2` is available starting with Redis 6.0.0.
|
||||
@history
|
||||
|
||||
- `>= 3.0.0`: Added the `COPY` and `REPLACE` options.
|
||||
- `>= 3.0.6`: Added the `KEYS` option.
|
||||
- `>= 4.0.7`: Added the `AUTH` option.
|
||||
- `>= 6.0.0`: Added the `AUTH2` option.
|
||||
|
||||
@return
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ specified by the `path` argument. The `path` should be the absolute path of the
|
|||
library, including the full filename. Any additional arguments are passed
|
||||
unmodified to the module.
|
||||
|
||||
**Note**: modules can also be loaded at server startup with 'loadmodule'
|
||||
**Note**: modules can also be loaded at server startup with `loadmodule`
|
||||
configuration directive in `redis.conf`.
|
||||
|
||||
@return
|
||||
|
|
|
@ -36,8 +36,8 @@ QUIT
|
|||
Connection closed by foreign host.
|
||||
```
|
||||
|
||||
Manually issue the `QUIT` command to stop a `MONITOR` stream running via
|
||||
`telnet`.
|
||||
Manually issue the `QUIT` or `RESET` commands to stop a `MONITOR` stream running
|
||||
via `telnet`.
|
||||
|
||||
## Commands not logged by MONITOR
|
||||
|
||||
|
@ -90,4 +90,5 @@ flow.
|
|||
|
||||
@history
|
||||
|
||||
- `>=6.0`: `AUTH` excluded from the command's output.
|
||||
- `>= 6.2`: `RESET` can be called to exit monitor mode.
|
||||
- `>= 6.0`: `AUTH` excluded from the command's output.
|
||||
|
|
19
iredis/data/commands/pexpiretime.md
Normal file
19
iredis/data/commands/pexpiretime.md
Normal file
|
@ -0,0 +1,19 @@
|
|||
`PEXPIRETIME` has the same semantic as `EXPIRETIME`, but returns the absolute
|
||||
Unix expiration timestamp in milliseconds instead of seconds.
|
||||
|
||||
@return
|
||||
|
||||
@integer-reply: Expiration Unix timestamp in milliseconds, or a negative value
|
||||
in order to signal an error (see the description below).
|
||||
|
||||
- The command returns `-1` if the key exists but has no associated expiration
|
||||
time.
|
||||
- The command returns `-2` if the key does not exist.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
SET mykey "Hello"
|
||||
PEXPIREAT mykey 33177117420000
|
||||
PEXPIRETIME mykey
|
||||
```
|
|
@ -1,5 +1,11 @@
|
|||
Posts a message to the given channel.
|
||||
|
||||
In a Redis Cluster clients can publish to every node. The cluster makes sure
|
||||
that published messages are forwarded as needed, so clients can subscribe to any
|
||||
channel by connecting to any one of the nodes.
|
||||
|
||||
@return
|
||||
|
||||
@integer-reply: the number of clients that received the message.
|
||||
@integer-reply: the number of clients that received the message. Note that in a
|
||||
Redis Cluster, only clients that are connected to the same node as the
|
||||
publishing client are included in the count.
|
||||
|
|
|
@ -4,6 +4,12 @@ separately. The general form is:
|
|||
|
||||
PUBSUB <subcommand> ... args ...
|
||||
|
||||
Cluster note: in a Redis Cluster clients can subscribe to every node, and can
|
||||
also publish to every other node. The cluster will make sure that published
|
||||
messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster
|
||||
only report information from the node's Pub/Sub context, rather than the entire
|
||||
cluster.
|
||||
|
||||
# PUBSUB CHANNELS [pattern]
|
||||
|
||||
Lists the currently _active channels_. An active channel is a Pub/Sub channel
|
||||
|
|
23
iredis/data/commands/reset.md
Normal file
23
iredis/data/commands/reset.md
Normal file
|
@ -0,0 +1,23 @@
|
|||
This command performs a full reset of the connection's server-side context,
|
||||
mimicking the effect of disconnecting and reconnecting again.
|
||||
|
||||
When the command is called from a regular client connection, it does the
|
||||
following:
|
||||
|
||||
- Discards the current `MULTI` transaction block, if one exists.
|
||||
- Unwatches all keys `WATCH`ed by the connection.
|
||||
- Disables `CLIENT TRACKING`, if in use.
|
||||
- Sets the connection to `READWRITE` mode.
|
||||
- Cancels the connection's `ASKING` mode, if previously set.
|
||||
- Sets `CLIENT REPLY` to `ON`.
|
||||
- Sets the protocol version to RESP2.
|
||||
- `SELECT`s database 0.
|
||||
- Exits `MONITOR` mode, when applicable.
|
||||
- Aborts Pub/Sub's subscription state (`SUBSCRIBE` and `PSUBSCRIBE`), when
|
||||
appropriate.
|
||||
- Deauthenticates the connection, requiring a call `AUTH` to reauthenticate when
|
||||
authentication is enabled.
|
||||
|
||||
@return
|
||||
|
||||
@simple-string-reply: always 'RESET'.
|
|
@ -1,16 +1,29 @@
|
|||
Removes and returns the last element of the list stored at `key`.
|
||||
Removes and returns the last elements of the list stored at `key`.
|
||||
|
||||
By default, the command pops a single element from the end of the list. When
|
||||
provided with the optional `count` argument, the reply will consist of up to
|
||||
`count` elements, depending on the list's length.
|
||||
|
||||
@return
|
||||
|
||||
When called without the `count` argument:
|
||||
|
||||
@bulk-string-reply: the value of the last element, or `nil` when `key` does not
|
||||
exist.
|
||||
|
||||
When called with the `count` argument:
|
||||
|
||||
@array-reply: list of popped elements, or `nil` when `key` does not exist.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added the `count` argument.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
RPUSH mylist "one"
|
||||
RPUSH mylist "two"
|
||||
RPUSH mylist "three"
|
||||
RPUSH mylist "one" "two" "three" "four" "five"
|
||||
RPOP mylist
|
||||
RPOP mylist 2
|
||||
LRANGE mylist 0 -1
|
||||
```
|
||||
|
|
|
@ -11,6 +11,9 @@ performed. If `source` and `destination` are the same, the operation is
|
|||
equivalent to removing the last element from the list and pushing it as first
|
||||
element of the list, so it can be considered as a list rotation command.
|
||||
|
||||
As per Redis 6.2.0, RPOPLPUSH is considered deprecated. Please prefer `LMOVE` in
|
||||
new code.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: the element being popped and pushed.
|
||||
|
|
|
@ -7,7 +7,7 @@ An error is returned when the value stored at `key` is not a set.
|
|||
@return
|
||||
|
||||
@integer-reply: the number of elements that were added to the set, not including
|
||||
all the elements already present into the set.
|
||||
all the elements already present in the set.
|
||||
|
||||
@history
|
||||
|
||||
|
|
|
@ -246,7 +246,7 @@ may receive no elements in many iterations.
|
|||
|
||||
It is possible for an infinite number of clients to iterate the same collection
|
||||
at the same time, as the full state of the iterator is in the cursor, that is
|
||||
obtained and returned to the client at every call. Server side no state is taken
|
||||
obtained and returned to the client at every call. No server side state is taken
|
||||
at all.
|
||||
|
||||
## Terminating iterations in the middle
|
||||
|
|
|
@ -17,8 +17,8 @@ is active and retains all changes to the data set once it ends.
|
|||
|
||||
- `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are
|
||||
discarded).
|
||||
- `SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to
|
||||
data).
|
||||
- `!SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes
|
||||
to data).
|
||||
- `NO`. Disables scripts debug mode.
|
||||
|
||||
@return
|
||||
|
|
|
@ -3,6 +3,21 @@ Flush the Lua scripts cache.
|
|||
Please refer to the `EVAL` documentation for detailed information about Redis
|
||||
Lua scripting.
|
||||
|
||||
By default, `SCRIPT FLUSH` will synchronously flush the cache. Starting with
|
||||
Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to
|
||||
"yes" changes the default flush mode to asynchronous.
|
||||
|
||||
It is possible to use one of the following modifiers to dictate the flushing
|
||||
mode explicitly:
|
||||
|
||||
- `ASYNC`: flushes the cache asynchronously
|
||||
- `!SYNC`: flushes the cache synchronously
|
||||
|
||||
@return
|
||||
|
||||
@simple-string-reply
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2.0`: Added the `ASYNC` and `!SYNC` flushing mode modifiers, as well as
|
||||
the **lazyfree-lazy-user-flush** configuration directive.
|
||||
|
|
|
@ -8,24 +8,41 @@ The `SET` command supports a set of options that modify its behavior:
|
|||
|
||||
- `EX` _seconds_ -- Set the specified expire time, in seconds.
|
||||
- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds.
|
||||
- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key
|
||||
will expire, in seconds.
|
||||
- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the
|
||||
key will expire, in milliseconds.
|
||||
- `NX` -- Only set the key if it does not already exist.
|
||||
- `XX` -- Only set the key if it already exist.
|
||||
- `KEEPTTL` -- Retain the time to live associated with the key.
|
||||
- `GET` -- Return the old string stored at key, or nil if key did not exist. An
|
||||
error is returned and `SET` aborted if the value stored at key is not a
|
||||
string.
|
||||
|
||||
Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, it
|
||||
is possible that in future versions of Redis these three commands will be
|
||||
Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`,
|
||||
`GETSET`, it is possible that in future versions of Redis these commands will be
|
||||
deprecated and finally removed.
|
||||
|
||||
@return
|
||||
|
||||
@simple-string-reply: `OK` if `SET` was executed correctly. @nil-reply: a Null
|
||||
Bulk Reply is returned if the `SET` operation was not performed because the user
|
||||
@simple-string-reply: `OK` if `SET` was executed correctly.
|
||||
|
||||
@nil-reply: `(nil)` if the `SET` operation was not performed because the user
|
||||
specified the `NX` or `XX` option but the condition was not met.
|
||||
|
||||
If the command is issued with the `GET` option, the above does not apply. It
|
||||
will instead reply as follows, regardless if the `SET` was actually performed:
|
||||
|
||||
@bulk-string-reply: the old string value stored at key.
|
||||
|
||||
@nil-reply: `(nil)` if the key did not exist.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 2.6.12`: Added the `EX`, `PX`, `NX` and `XX` options.
|
||||
- `>= 6.0`: Added the `KEEPTTL` option.
|
||||
- `>= 6.2`: Added the `GET`, `EXAT` and `PXAT` option.
|
||||
- `>= 7.0`: Allowed the `NX` and `GET` options to be used together.
|
||||
|
||||
@examples
|
||||
|
||||
|
@ -39,7 +56,7 @@ SET anotherkey "will expire in a minute" EX 60
|
|||
## Patterns
|
||||
|
||||
**Note:** The following pattern is discouraged in favor of
|
||||
[the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit
|
||||
[the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit
|
||||
more complex to implement, but offers better guarantees and is fault tolerant.
|
||||
|
||||
The command `SET resource-name anystring NX EX max-lock-time` is a simple way to
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
Sets or clears the bit at _offset_ in the string value stored at _key_.
|
||||
|
||||
The bit is either set or cleared depending on _value_, which can be either 0
|
||||
or 1.
|
||||
The bit is either set or cleared depending on _value_, which can be either 0 or
|
||||
|
||||
1.
|
||||
|
||||
When _key_ does not exist, a new string value is created. The string is grown to
|
||||
make sure it can hold a bit at _offset_. The _offset_ argument is required to be
|
||||
|
|
|
@ -22,7 +22,7 @@ GET mykey
|
|||
**Please note that:**
|
||||
|
||||
1. The following pattern is discouraged in favor of
|
||||
[the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit
|
||||
[the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit
|
||||
more complex to implement, but offers better guarantees and is fault
|
||||
tolerant.
|
||||
2. We document the old pattern anyway because certain existing implementations
|
||||
|
|
|
@ -29,6 +29,8 @@ _slowlog-log-slower-than_ config parameter to zero) with minor performance hit.
|
|||
To read the slow log the **SLOWLOG GET** command is used, that returns every
|
||||
entry in the slow log. It is possible to return only the N most recent entries
|
||||
passing an additional argument to the command (for instance **SLOWLOG GET 10**).
|
||||
The default requested length is 10 (when the argument is omitted). It's possible
|
||||
to pass -1 to get the entire slowlog.
|
||||
|
||||
Note that you need a recent version of redis-cli in order to read the slow log
|
||||
output, since it uses some features of the protocol that were not formerly
|
||||
|
|
17
iredis/data/commands/smismember.md
Normal file
17
iredis/data/commands/smismember.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
Returns whether each `member` is a member of the set stored at `key`.
|
||||
|
||||
For every `member`, `1` is returned if the value is a member of the set, or `0`
|
||||
if the element is not a member of the set or if `key` does not exist.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply: list representing the membership of the given elements, in the
|
||||
same order as they are requested.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
SADD myset "one"
|
||||
SADD myset "one"
|
||||
SMISMEMBER myset "one" "notamember"
|
||||
```
|
|
@ -1,14 +1,26 @@
|
|||
Removes and returns one or more random elements from the set value store at
|
||||
Removes and returns one or more random members from the set value store at
|
||||
`key`.
|
||||
|
||||
This operation is similar to `SRANDMEMBER`, that returns one or more random
|
||||
elements from a set but does not remove it.
|
||||
|
||||
The `count` argument is available since version 3.2.
|
||||
By default, the command pops a single member from the set. When provided with
|
||||
the optional `count` argument, the reply will consist of up to `count` members,
|
||||
depending on the set's cardinality.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: the removed element, or `nil` when `key` does not exist.
|
||||
When called without the `count` argument:
|
||||
|
||||
@bulk-string-reply: the removed member, or `nil` when `key` does not exist.
|
||||
|
||||
When called with the `count` argument:
|
||||
|
||||
@array-reply: the removed members, or an empty array when `key` does not exist.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 3.2`: Added the `count` argument.
|
||||
|
||||
@examples
|
||||
|
||||
|
@ -24,18 +36,8 @@ SPOP myset 3
|
|||
SMEMBERS myset
|
||||
```
|
||||
|
||||
## Specification of the behavior when count is passed
|
||||
|
||||
If count is bigger than the number of elements inside the Set, the command will
|
||||
only return the whole set without additional elements.
|
||||
|
||||
## Distribution of returned elements
|
||||
|
||||
Note that this command is not suitable when you need a guaranteed uniform
|
||||
distribution of the returned elements. For more information about the algorithms
|
||||
used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms.
|
||||
|
||||
## Count argument extension
|
||||
|
||||
Redis 3.2 introduced an optional `count` argument that can be passed to `SPOP`
|
||||
in order to retrieve multiple elements in a single call.
|
||||
used for `SPOP`, look up both the Knuth sampling and Floyd sampling algorithms.
|
||||
|
|
|
@ -1,22 +1,21 @@
|
|||
When called with just the `key` argument, return a random element from the set
|
||||
value stored at `key`.
|
||||
|
||||
Starting from Redis version 2.6, when called with the additional `count`
|
||||
argument, return an array of `count` **distinct elements** if `count` is
|
||||
positive. If called with a negative `count` the behavior changes and the command
|
||||
is allowed to return the **same element multiple times**. In this case the
|
||||
number of returned elements is the absolute value of the specified `count`.
|
||||
If the provided `count` argument is positive, return an array of **distinct
|
||||
elements**. The array's length is either `count` or the set's cardinality
|
||||
(`SCARD`), whichever is lower.
|
||||
|
||||
When called with just the key argument, the operation is similar to `SPOP`,
|
||||
however while `SPOP` also removes the randomly selected element from the set,
|
||||
`SRANDMEMBER` will just return a random element without altering the original
|
||||
set in any way.
|
||||
If called with a negative `count`, the behavior changes and the command is
|
||||
allowed to return the **same element multiple times**. In this case, the number
|
||||
of returned elements is the absolute value of the specified `count`.
|
||||
|
||||
@return
|
||||
|
||||
@bulk-string-reply: without the additional `count` argument the command returns
|
||||
@bulk-string-reply: without the additional `count` argument, the command returns
|
||||
a Bulk Reply with the randomly selected element, or `nil` when `key` does not
|
||||
exist. @array-reply: when the additional `count` argument is passed the command
|
||||
exist.
|
||||
|
||||
@array-reply: when the additional `count` argument is passed, the command
|
||||
returns an array of elements, or an empty array when `key` does not exist.
|
||||
|
||||
@examples
|
||||
|
@ -28,26 +27,32 @@ SRANDMEMBER myset 2
|
|||
SRANDMEMBER myset -5
|
||||
```
|
||||
|
||||
@history
|
||||
|
||||
- `>= 2.6.0`: Added the optional `count` argument.
|
||||
|
||||
## Specification of the behavior when count is passed
|
||||
|
||||
When a count argument is passed and is positive, the elements are returned as if
|
||||
every selected element is removed from the set (like the extraction of numbers
|
||||
in the game of Bingo). However elements are **not removed** from the Set. So
|
||||
basically:
|
||||
When the `count` argument is a positive value this command behaves as follows:
|
||||
|
||||
- No repeated elements are returned.
|
||||
- If count is bigger than the number of elements inside the Set, the command
|
||||
will only return the whole set without additional elements.
|
||||
- If `count` is bigger than the set's cardinality, the command will only return
|
||||
the whole set without additional elements.
|
||||
- The order of elements in the reply is not truly random, so it is up to the
|
||||
client to shuffle them if needed.
|
||||
|
||||
When instead the count is negative, the behavior changes and the extraction
|
||||
happens as if you put the extracted element inside the bag again after every
|
||||
extraction, so repeated elements are possible, and the number of elements
|
||||
requested is always returned as we can repeat the same elements again and again,
|
||||
with the exception of an empty Set (non existing key) that will always produce
|
||||
an empty array as a result.
|
||||
When the `count` is a negative value, the behavior changes as follows:
|
||||
|
||||
- Repeating elements are possible.
|
||||
- Exactly `count` elements, or an empty array if the set is empty (non-existing
|
||||
key), are always returned.
|
||||
- The order of elements in the reply is truly random.
|
||||
|
||||
## Distribution of returned elements
|
||||
|
||||
Note: this section is relevant only for Redis 5 or below, as Redis 6 implements
|
||||
a fairer algorithm.
|
||||
|
||||
The distribution of the returned elements is far from perfect when the number of
|
||||
elements in the set is small, this is due to the fact that we used an
|
||||
approximated random element function that does not really guarantees good
|
||||
|
|
|
@ -11,7 +11,7 @@ argument must be "LCS", since this is the only implemented one.
|
|||
## LCS algorithm
|
||||
|
||||
```
|
||||
STRALGO LCS [KEYS ...] [STRINGS ...] [LEN] [IDX] [MINMATCHLEN <len>] [WITHMATCHLEN]
|
||||
STRALGO LCS STRINGS <string_a> <string_b> | KEYS <key_a> <key_b> [LEN] [IDX] [MINMATCHLEN <len>] [WITHMATCHLEN]
|
||||
```
|
||||
|
||||
The LCS subcommand implements the longest common subsequence algorithm. Note
|
||||
|
@ -113,9 +113,9 @@ For the LCS algorithm:
|
|||
|
||||
- Without modifiers the string representing the longest common substring is
|
||||
returned.
|
||||
- When LEN is given the command returns the length of the longest common
|
||||
- When `LEN` is given the command returns the length of the longest common
|
||||
substring.
|
||||
- When IDX is given the command returns an array with the LCS length and all the
|
||||
ranges in both the strings, start and end offset for each string, where there
|
||||
are matches. When WITHMATCHLEN is given each array representing a match will
|
||||
also have the length of the match (see examples).
|
||||
- When `IDX` is given the command returns an array with the LCS length and all
|
||||
the ranges in both the strings, start and end offset for each string, where
|
||||
there are matches. When `WITHMATCHLEN` is given each array representing a
|
||||
match will also have the length of the match (see examples).
|
||||
|
|
|
@ -2,4 +2,8 @@ Subscribes the client to the specified channels.
|
|||
|
||||
Once the client enters the subscribed state it is not supposed to issue any
|
||||
other commands, except for additional `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`,
|
||||
`PUNSUBSCRIBE`, `PING` and `QUIT` commands.
|
||||
`PUNSUBSCRIBE`, `PING`, `RESET` and `QUIT` commands.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: `RESET` can be called to exit subscribed state.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
The `XACK` command removes one or multiple messages from the _pending entries
|
||||
list_ (PEL) of a stream consumer group. A message is pending, and as such stored
|
||||
The `XACK` command removes one or multiple messages from the _Pending Entries
|
||||
List_ (PEL) of a stream consumer group. A message is pending, and as such stored
|
||||
inside the PEL, when it was delivered to some consumer, normally as a side
|
||||
effect of calling `XREADGROUP`, or when a consumer took ownership of a message
|
||||
calling `XCLAIM`. The pending message was delivered to some consumer but the
|
||||
|
@ -17,9 +17,13 @@ entry about this message is also purged, releasing memory from the Redis server.
|
|||
@integer-reply, specifically:
|
||||
|
||||
The command returns the number of messages successfully acknowledged. Certain
|
||||
message IDs may no longer be part of the PEL (for example because they have been
|
||||
already acknowledge), and XACK will not count them as successfully acknowledged.
|
||||
message IDs may no longer be part of the PEL (for example because they have
|
||||
already been acknowledged), and XACK will not count them as successfully
|
||||
acknowledged.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
XACK mystream mygroup 1526569495631-0
|
||||
```
|
||||
redis> XACK mystream mygroup 1526569495631-0
|
||||
(integer) 1
|
||||
```
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
Appends the specified stream entry to the stream at the specified key. If the
|
||||
key does not exist, as a side effect of running this command the key is created
|
||||
with a stream value.
|
||||
with a stream value. The creation of stream's key can be disabled with the
|
||||
`NOMKSTREAM` option.
|
||||
|
||||
An entry is composed of a set of field-value pairs, it is basically a small
|
||||
dictionary. The field-value pairs are stored in the same order they are given by
|
||||
|
@ -14,11 +15,12 @@ stream.
|
|||
|
||||
## Specifying a Stream ID as an argument
|
||||
|
||||
A stream entry ID identifies a given entry inside a stream. The `XADD` command
|
||||
will auto-generate a unique ID for you if the ID argument specified is the `*`
|
||||
character (asterisk ASCII character). However, while useful only in very rare
|
||||
cases, it is possible to specify a well-formed ID, so that the new entry will be
|
||||
added exactly with the specified ID.
|
||||
A stream entry ID identifies a given entry inside a stream.
|
||||
|
||||
The `XADD` command will auto-generate a unique ID for you if the ID argument
|
||||
specified is the `*` character (asterisk ASCII character). However, while useful
|
||||
only in very rare cases, it is possible to specify a well-formed ID, so that the
|
||||
new entry will be added exactly with the specified ID.
|
||||
|
||||
IDs are specified by two numbers separated by a `-` character:
|
||||
|
||||
|
@ -39,30 +41,27 @@ or if after a failover the new master has a different absolute time.
|
|||
|
||||
When a user specified an explicit ID to `XADD`, the minimum valid ID is `0-1`,
|
||||
and the user _must_ specify an ID which is greater than any other ID currently
|
||||
inside the stream, otherwise the command will fail. Usually resorting to
|
||||
specific IDs is useful only if you have another system generating unique IDs
|
||||
(for instance an SQL table) and you really want the Redis stream IDs to match
|
||||
the one of this other system.
|
||||
inside the stream, otherwise the command will fail and return an error. Usually
|
||||
resorting to specific IDs is useful only if you have another system generating
|
||||
unique IDs (for instance an SQL table) and you really want the Redis stream IDs
|
||||
to match the one of this other system.
|
||||
|
||||
## Capped streams
|
||||
|
||||
It is possible to limit the size of the stream to a maximum number of elements
|
||||
using the **MAXLEN** option.
|
||||
`XADD` incorporates the same semantics as the `XTRIM` command - refer to its
|
||||
documentation page for more information. This allows adding new entries and
|
||||
keeping the stream's size in check with a single call to `XADD`, effectively
|
||||
capping the stream with an arbitrary threshold. Although exact trimming is
|
||||
possible and is the default, due to the internal representation of steams it is
|
||||
more efficient to add an entry and trim stream with `XADD` using **almost
|
||||
exact** trimming (the `~` argument).
|
||||
|
||||
Trimming with **MAXLEN** can be expensive compared to just adding entries with
|
||||
`XADD`: streams are represented by macro nodes into a radix tree, in order to be
|
||||
very memory efficient. Altering the single macro node, consisting of a few tens
|
||||
of elements, is not optimal. So it is possible to give the command in the
|
||||
following special form:
|
||||
For example, calling `XADD` in the following form:
|
||||
|
||||
XADD mystream MAXLEN ~ 1000 * ... entry fields here ...
|
||||
|
||||
The `~` argument between the **MAXLEN** option and the actual count means that
|
||||
the user is not really requesting that the stream length is exactly 1000 items,
|
||||
but instead it could be a few tens of entries more, but never less than 1000
|
||||
items. When this option modifier is used, the trimming is performed only when
|
||||
Redis is able to remove a whole macro node. This makes it much more efficient,
|
||||
and it is usually what you want.
|
||||
Will add a new entry but will also evict old entries so that the stream will
|
||||
contain only 1000 entries, or at most a few tens more.
|
||||
|
||||
## Additional information about streams
|
||||
|
||||
|
@ -77,6 +76,14 @@ The command returns the ID of the added entry. The ID is the one auto-generated
|
|||
if `*` is passed as ID argument, otherwise the command just returns the same ID
|
||||
specified by the user during insertion.
|
||||
|
||||
The command returns a @nil-reply when used with the `NOMKSTREAM` option and the
|
||||
key doesn't exist.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added the `NOMKSTREAM` option, `MINID` trimming strategy and the
|
||||
`LIMIT` option.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
|
|
70
iredis/data/commands/xautoclaim.md
Normal file
70
iredis/data/commands/xautoclaim.md
Normal file
|
@ -0,0 +1,70 @@
|
|||
This command transfers ownership of pending stream entries that match the
|
||||
specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling
|
||||
`XPENDING` and then `XCLAIM`, but provides a more straightforward way to deal
|
||||
with message delivery failures via `SCAN`-like semantics.
|
||||
|
||||
Like `XCLAIM`, the command operates on the stream entries at `<key>` and in the
|
||||
context of the provided `<group>`. It transfers ownership to `<consumer>` of
|
||||
messages pending for more than `<min-idle-time>` milliseconds and having an
|
||||
equal or greater ID than `<start>`.
|
||||
|
||||
The optional `<count>` argument, which defaults to 100, is the upper limit of
|
||||
the number of entries that the command attempts to claim. Internally, the
|
||||
command begins scanning the consumer group's Pending Entries List (PEL) from
|
||||
`<start>` and filters out entries having an idle time less than or equal to
|
||||
`<min-idle-time>`. The maximum number of pending entries that the command scans
|
||||
is the product of multiplying `<count>`'s value by 10 (hard-coded). It is
|
||||
possible, therefore, that the number of entries claimed will be less than the
|
||||
specified value.
|
||||
|
||||
The optional `JUSTID` argument changes the reply to return just an array of IDs
|
||||
of messages successfully claimed, without returning the actual message. Using
|
||||
this option means the retry counter is not incremented.
|
||||
|
||||
The command returns the claimed entries as an array. It also returns a stream ID
|
||||
intended for cursor-like use as the `<start>` argument for its subsequent call.
|
||||
When there are no remaining PEL entries, the command returns the special `0-0`
|
||||
ID to signal completion. However, note that you may want to continue calling
|
||||
`XAUTOCLAIM` even after the scan is complete with the `0-0` as `<start>` ID,
|
||||
because enough time passed, so older pending entries may now be eligible for
|
||||
claiming.
|
||||
|
||||
Note that only messages that are idle longer than `<min-idle-time>` are claimed,
|
||||
and claiming a message resets its idle time. This ensures that only a single
|
||||
consumer can successfully claim a given pending message at a specific instant of
|
||||
time and trivially reduces the probability of processing the same message
|
||||
multiple times.
|
||||
|
||||
Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted
|
||||
deliveries count for that message, unless the `JUSTID` option has been specified
|
||||
(which only delivers the message ID, not the message itself). Messages that
|
||||
cannot be processed for some reason - for example, because consumers
|
||||
systematically crash when processing them - will exhibit high attempted delivery
|
||||
counts that can be detected by monitoring.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply, specifically:
|
||||
|
||||
An array with two elements:
|
||||
|
||||
1. The first element is a stream ID to be used as the `<start>` argument for the
|
||||
next call to `XAUTOCLAIM`
|
||||
2. The second element is an array containing all the successfully claimed
|
||||
messages in the same format as `XRANGE`.
|
||||
|
||||
@examples
|
||||
|
||||
```
|
||||
> XAUTOCLAIM mystream mygroup Alice 3600000 0-0 COUNT 25
|
||||
1) "0-0"
|
||||
2) 1) 1) "1609338752495-0"
|
||||
2) 1) "field"
|
||||
2) "value"
|
||||
```
|
||||
|
||||
In the above example, we attempt to claim up to 25 entries that are pending and
|
||||
idle (not having been acknowledged or claimed) for at least an hour, starting at
|
||||
the stream's beginning. The consumer "Alice" from the "mygroup" group acquires
|
||||
ownership of these messages. Note that the stream ID returned in the example is
|
||||
`0-0`, indicating that the entire stream was scanned.
|
|
@ -5,14 +5,15 @@ command argument. Normally this is what happens:
|
|||
1. There is a stream with an associated consumer group.
|
||||
2. Some consumer A reads a message via `XREADGROUP` from a stream, in the
|
||||
context of that consumer group.
|
||||
3. As a side effect a pending message entry is created in the pending entries
|
||||
list (PEL) of the consumer group: it means the message was delivered to a
|
||||
3. As a side effect a pending message entry is created in the Pending Entries
|
||||
List (PEL) of the consumer group: it means the message was delivered to a
|
||||
given consumer, but it was not yet acknowledged via `XACK`.
|
||||
4. Then suddenly that consumer fails forever.
|
||||
5. Other consumers may inspect the list of pending messages, that are stale for
|
||||
quite some time, using the `XPENDING` command. In order to continue
|
||||
processing such messages, they use `XCLAIM` to acquire the ownership of the
|
||||
message and continue.
|
||||
message and continue. As of Redis 6.2, consumers can use the `XAUTOCLAIM`
|
||||
command to automatically scan and claim stale pending messages.
|
||||
|
||||
This dynamic is clearly explained in the
|
||||
[Stream intro documentation](/topics/streams-intro).
|
||||
|
@ -68,7 +69,7 @@ The command returns all the messages successfully claimed, in the same format as
|
|||
`XRANGE`. However if the `JUSTID` option was specified, only the message IDs are
|
||||
reported, without including the actual message.
|
||||
|
||||
Example:
|
||||
@examples
|
||||
|
||||
```
|
||||
> XCLAIM mystream mygroup Alice 3600000 1526569498055-0
|
||||
|
|
|
@ -11,7 +11,7 @@ To create a new consumer group, use the following form:
|
|||
XGROUP CREATE mystream consumer-group-name $
|
||||
|
||||
The last argument is the ID of the last item in the stream to consider already
|
||||
delivered. In the above case we used the special ID '\$' (that means: the ID of
|
||||
delivered. In the above case we used the special ID '$' (that means: the ID of
|
||||
the last item in the stream). In this case the consumers fetching data from that
|
||||
consumer group will only see new elements arriving in the stream.
|
||||
|
||||
|
@ -22,8 +22,9 @@ starting ID for the consumer group:
|
|||
|
||||
Of course it is also possible to use any other valid ID. If the specified
|
||||
consumer group already exists, the command returns a `-BUSYGROUP` error.
|
||||
Otherwise the operation is performed and OK is returned. There are no hard
|
||||
limits to the number of consumer groups you can associate to a given stream.
|
||||
Otherwise, the operation is performed and a @simple-string-reply `OK` is
|
||||
returned. There are no hard limits to the number of consumer groups you can
|
||||
associate with a given stream.
|
||||
|
||||
If the specified stream doesn't exist when creating a group, an error will be
|
||||
returned. You can use the optional `MKSTREAM` subcommand as the last argument
|
||||
|
@ -38,16 +39,26 @@ A consumer group can be destroyed completely by using the following form:
|
|||
|
||||
The consumer group will be destroyed even if there are active consumers and
|
||||
pending messages, so make sure to call this command only when really needed.
|
||||
This form returns an @integer-reply with the number of destroyed consumer groups
|
||||
(0 or 1).
|
||||
|
||||
Consumers in a consumer group are auto-created every time a new consumer name is
|
||||
mentioned by some command. They can also be explicitly created by using the
|
||||
following form:
|
||||
|
||||
XGROUP CREATECONSUMER mystream consumer-group-name myconsumer123
|
||||
|
||||
This form returns an @integer-reply with the number of created consumers (0 or
|
||||
1).
|
||||
|
||||
To just remove a given consumer from a consumer group, the following form is
|
||||
used:
|
||||
|
||||
XGROUP DELCONSUMER mystream consumer-group-name myconsumer123
|
||||
|
||||
Consumers in a consumer group are auto-created every time a new consumer name is
|
||||
mentioned by some command. However sometimes it may be useful to remove old
|
||||
consumers since they are no longer used. This form returns the number of pending
|
||||
messages that the consumer had before it was deleted.
|
||||
Sometimes it may be useful to remove old consumers since they are no longer
|
||||
used. This form returns an @integer-reply with the number of pending messages
|
||||
that the consumer had before it was deleted.
|
||||
|
||||
Finally it possible to set the next message to deliver using the `SETID`
|
||||
subcommand. Normally the next ID is set when the consumer is created, as the
|
||||
|
@ -58,7 +69,13 @@ messages in a stream, you may want to set its next ID to 0:
|
|||
|
||||
XGROUP SETID mystream consumer-group-name 0
|
||||
|
||||
This form returns a @simple-string-reply `OK` or an error.
|
||||
|
||||
Finally to get some help if you don't remember the syntax, use the HELP
|
||||
subcommand:
|
||||
|
||||
XGROUP HELP
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2.0`: Supports the `CREATECONSUMER` subcommand.
|
||||
|
|
|
@ -39,7 +39,8 @@ is the stream content.
|
|||
- `XINFO STREAM <key> FULL [COUNT <count>]`
|
||||
|
||||
In this form the command returns the entire state of the stream, including
|
||||
entries, groups, consumers and PELs. This form is available since Redis 6.0.
|
||||
entries, groups, consumers and Pending Entries Lists (PELs). This form is
|
||||
available since Redis 6.0.
|
||||
|
||||
```
|
||||
> XADD mystream * foo bar
|
||||
|
|
|
@ -2,7 +2,7 @@ Fetching data from a stream via a consumer group, and not acknowledging such
|
|||
data, has the effect of creating _pending entries_. This is well explained in
|
||||
the `XREADGROUP` command, and even better in our
|
||||
[introduction to Redis Streams](/topics/streams-intro). The `XACK` command will
|
||||
immediately remove the pending entry from the Pending Entry List (PEL) since
|
||||
immediately remove the pending entry from the Pending Entries List (PEL) since
|
||||
once a message is successfully processed, there is no longer need for the
|
||||
consumer group to track it and to remember the current owner of the message.
|
||||
|
||||
|
@ -58,10 +58,13 @@ consumer group, which is one, followed by the smallest and greatest ID among the
|
|||
pending messages, and then list every consumer in the consumer group with at
|
||||
least one pending message, and the number of pending messages it has.
|
||||
|
||||
This is a good overview, but sometimes we are interested in the details. In
|
||||
order to see all the pending messages with more associated information we need
|
||||
to also pass a range of IDs, in a similar way we do it with `XRANGE`, and a non
|
||||
optional _count_ argument, to limit the number of messages returned per call:
|
||||
## Extended form of XPENDING
|
||||
|
||||
The summary provides a good overview, but sometimes we are interested in the
|
||||
details. In order to see all the pending messages with more associated
|
||||
information we need to also pass a range of IDs, in a similar way we do it with
|
||||
`XRANGE`, and a non optional _count_ argument, to limit the number of messages
|
||||
returned per call:
|
||||
|
||||
```
|
||||
> XPENDING mystream group55 - + 10
|
||||
|
@ -71,7 +74,7 @@ optional _count_ argument, to limit the number of messages returned per call:
|
|||
4) (integer) 1
|
||||
```
|
||||
|
||||
In the extended form we no longer see the summary information, instead there are
|
||||
In the extended form we no longer see the summary information, instead there is
|
||||
detailed information for each message in the pending entries list. For each
|
||||
message four attributes are returned:
|
||||
|
||||
|
@ -87,8 +90,8 @@ when some other consumer _claims_ the message with `XCLAIM`, or when the message
|
|||
is delivered again via `XREADGROUP`, when accessing the history of a consumer in
|
||||
a consumer group (see the `XREADGROUP` page for more info).
|
||||
|
||||
Finally it is possible to pass an additional argument to the command, in order
|
||||
to see the messages having a specific owner:
|
||||
It is possible to pass an additional argument to the command, in order to see
|
||||
the messages having a specific owner:
|
||||
|
||||
```
|
||||
> XPENDING mystream group55 - + 10 consumer-123
|
||||
|
@ -101,6 +104,29 @@ even when there are many pending messages from many consumers: we have a pending
|
|||
entries list data structure both globally, and for every consumer, so we can
|
||||
very efficiently show just messages pending for a single consumer.
|
||||
|
||||
## Idle time filter
|
||||
|
||||
Since version 6.2 it is possible to filter entries by their idle-time, given in
|
||||
milliseconds (useful for `XCLAIM`ing entries that have not been processed for
|
||||
some time):
|
||||
|
||||
```
|
||||
> XPENDING mystream group55 IDLE 9000 - + 10
|
||||
> XPENDING mystream group55 IDLE 9000 - + 10 consumer-123
|
||||
```
|
||||
|
||||
The first case will return the first 10 (or less) PEL entries of the entire
|
||||
group that are idle for over 9 seconds, whereas in the second case only those of
|
||||
`consumer-123`.
|
||||
|
||||
## Exclusive ranges and iterating the PEL
|
||||
|
||||
The `XPENDING` command allows iterating over the pending entries just like
|
||||
`XRANGE` and `XREVRANGE` allow for the stream's entries. You can do this by
|
||||
prefixing the ID of the last-read pending entry with the `(` character that
|
||||
denotes an open (exclusive) range, and proving it to the subsequent call to the
|
||||
command.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply, specifically:
|
||||
|
@ -108,3 +134,7 @@ very efficiently show just messages pending for a single consumer.
|
|||
The command returns data in different format depending on the way it is called,
|
||||
as previously explained in this page. However the reply is always an array of
|
||||
items.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2.0`: Added the `IDLE` option and exclusive range intervals.
|
||||
|
|
|
@ -67,6 +67,13 @@ Used in this way `XRANGE` works as a range query command to obtain entries in a
|
|||
specified time. This is very handy in order to access the history of past events
|
||||
in a stream.
|
||||
|
||||
## Exclusive ranges
|
||||
|
||||
The range is close (inclusive) by default, meaning that the reply can include
|
||||
entries with IDs matching the query's start and end intervals. It is possible to
|
||||
specify an open interval (exclusive) by prefixing the ID with the character `(`.
|
||||
This is useful for iterating the stream, as explained below.
|
||||
|
||||
## Returning a maximum number of entries
|
||||
|
||||
Using the **COUNT** option it is possible to reduce the number of entries
|
||||
|
@ -110,14 +117,14 @@ is trivial:
|
|||
```
|
||||
|
||||
Then instead of starting the iteration again from `-`, as the start of the range
|
||||
we use the entry ID of the _last_ entry returned by the previous `XRANGE` call,
|
||||
adding the sequence part of the ID by one.
|
||||
we use the entry ID of the _last_ entry returned by the previous `XRANGE` call
|
||||
as an exclusive interval.
|
||||
|
||||
The ID of the last entry is `1526985685298-0`, so we just add 1 to the sequence
|
||||
to obtain `1526985685298-1`, and continue our iteration:
|
||||
The ID of the last entry is `1526985685298-0`, so we just prefix it with a '(',
|
||||
and continue our iteration:
|
||||
|
||||
```
|
||||
> XRANGE writers 1526985685298-1 + COUNT 2
|
||||
> XRANGE writers (1526985685298-0 + COUNT 2
|
||||
1) 1) 1526985691746-0
|
||||
2) 1) "name"
|
||||
2) "Toni"
|
||||
|
@ -139,6 +146,37 @@ The command `XREAD` is also able to iterate the stream. The command `XREVRANGE`
|
|||
can iterate the stream reverse, from higher IDs (or times) to lower IDs (or
|
||||
times).
|
||||
|
||||
### Iterating with earlier versions of Redis
|
||||
|
||||
While exclusive range intervals are only available from Redis 6.2, it is still
|
||||
possible to use a similar stream iteration pattern with earlier versions. You
|
||||
start fetching from the stream the same way as described above to obtain the
|
||||
first entries.
|
||||
|
||||
For the subsequent calls, you'll need to programmatically advance the last
|
||||
entry's ID returned. Most Redis client should abstract this detail, but the
|
||||
implementation can also be in the application if needed. In the example above,
|
||||
this means incrementing the sequence of `1526985685298-0` by one, from 0 to 1.
|
||||
The second call would, therefore, be:
|
||||
|
||||
```
|
||||
> XRANGE writers 1526985685298-1 + COUNT 2
|
||||
1) 1) 1526985691746-0
|
||||
2) 1) "name"
|
||||
2) "Toni"
|
||||
...
|
||||
```
|
||||
|
||||
Also, note that once the sequence part of the last ID equals
|
||||
18446744073709551615, you'll need to increment the timestamp and reset the
|
||||
sequence part to 0. For example, incrementing the ID
|
||||
`1526985685298-18446744073709551615` should result in `1526985685299-0`.
|
||||
|
||||
A symmetrical pattern applies to iterating the stream with `XREVRANGE`. The only
|
||||
difference is that the client needs to decrement the ID for the subsequent
|
||||
calls. When decrementing an ID with a sequence part of 0, the timestamp needs to
|
||||
be decremented by 1 and the sequence set to 18446744073709551615.
|
||||
|
||||
## Fetching single items
|
||||
|
||||
If you look for an `XGET` command you'll be disappointed because `XRANGE` is
|
||||
|
@ -170,6 +208,10 @@ returned entries are complete, that means that the ID and all the fields they
|
|||
are composed are returned. Moreover, the entries are returned with their fields
|
||||
and values in the exact same order as `XADD` added them.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2` Added exclusive ranges.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
|
|
|
@ -29,7 +29,7 @@ the history of messages that were delivered to it, so a message has just a
|
|||
single owner. However there is a special feature called _message claiming_ that
|
||||
allows other consumers to claim messages in case there is a non recoverable
|
||||
failure of some consumer. In order to implement such semantics, consumer groups
|
||||
require explicit acknowledgement of the messages successfully processed by the
|
||||
require explicit acknowledgment of the messages successfully processed by the
|
||||
consumer, via the `XACK` command. This is needed because the stream will track,
|
||||
for each consumer group, who is processing what message.
|
||||
|
||||
|
@ -88,7 +88,7 @@ no differences in this regard.
|
|||
Two things:
|
||||
|
||||
1. If the message was never delivered to anyone, that is, if we are talking
|
||||
about a new message, then a PEL (Pending Entry List) is created.
|
||||
about a new message, then a PEL (Pending Entries List) is created.
|
||||
2. If instead the message was already delivered to this consumer, and it is just
|
||||
re-fetching the same message again, then the _last delivery counter_ is
|
||||
updated to the current time, and the _number of deliveries_ is incremented by
|
||||
|
@ -129,3 +129,19 @@ acknowledged all the pending messages: we can start to use `>` as ID, in order
|
|||
to get the new messages and rejoin the consumers that are processing new things.
|
||||
|
||||
To see how the command actually replies, please check the `XREAD` command page.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply, specifically:
|
||||
|
||||
The command returns an array of results: each element of the returned array is
|
||||
an array composed of a two element containing the key name and the entries
|
||||
reported for that key. The entries reported are full stream entries, having IDs
|
||||
and the list of all the fields and values. Field and values are guaranteed to be
|
||||
reported in the same order they were added by `XADD`.
|
||||
|
||||
When **BLOCK** is used, on timeout a null reply is returned.
|
||||
|
||||
Reading the [Redis Streams introduction](/topics/streams-intro) is highly
|
||||
suggested in order to understand more about the streams overall behavior and
|
||||
semantics.
|
||||
|
|
|
@ -14,54 +14,6 @@ send:
|
|||
|
||||
XREVRANGE somestream + - COUNT 1
|
||||
|
||||
## Iterating with XREVRANGE
|
||||
|
||||
Like `XRANGE` this command can be used in order to iterate the whole stream
|
||||
content, however note that in this case, the next command calls should use the
|
||||
ID of the last entry, with the sequence number decremented by one. However if
|
||||
the sequence number is already 0, the time part of the ID should be decremented
|
||||
by 1, and the sequence part should be set to the maximum possible sequence
|
||||
number, that is, 18446744073709551615, or could be omitted at all, and the
|
||||
command will automatically assume it to be such a number (see `XRANGE` for more
|
||||
info about incomplete IDs).
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
> XREVRANGE writers + - COUNT 2
|
||||
1) 1) 1526985723355-0
|
||||
2) 1) "name"
|
||||
2) "Ngozi"
|
||||
3) "surname"
|
||||
4) "Adichie"
|
||||
2) 1) 1526985712947-0
|
||||
2) 1) "name"
|
||||
2) "Agatha"
|
||||
3) "surname"
|
||||
4) "Christie"
|
||||
```
|
||||
|
||||
The last ID returned is `1526985712947-0`, since the sequence number is already
|
||||
zero, the next ID I'll use instead of the `+` special ID will be
|
||||
`1526985712946-18446744073709551615`, or just `18446744073709551615`:
|
||||
|
||||
```
|
||||
> XREVRANGE writers 1526985712946-18446744073709551615 - COUNT 2
|
||||
1) 1) 1526985691746-0
|
||||
2) 1) "name"
|
||||
2) "Toni"
|
||||
3) "surname"
|
||||
4) "Morrison"
|
||||
2) 1) 1526985685298-0
|
||||
2) 1) "name"
|
||||
2) "Jane"
|
||||
3) "surname"
|
||||
4) "Austen"
|
||||
```
|
||||
|
||||
And so for until the iteration is complete and no result is returned. See the
|
||||
`XRANGE` page about iterating for more information.
|
||||
|
||||
@return
|
||||
|
||||
@array-reply, specifically:
|
||||
|
@ -72,6 +24,10 @@ means that the ID and all the fields they are composed are returned. Moreover
|
|||
the entries are returned with their fields and values in the exact same order as
|
||||
`XADD` added them.
|
||||
|
||||
@history
|
||||
|
||||
- `>= 6.2` Added exclusive ranges.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
|
|
|
@ -1,34 +1,71 @@
|
|||
`XTRIM` trims the stream to a given number of items, evicting older items (items
|
||||
with lower IDs) if needed. The command is conceived to accept multiple trimming
|
||||
strategies, however currently only a single one is implemented, which is
|
||||
`MAXLEN`, and works exactly as the `MAXLEN` option in `XADD`.
|
||||
`XTRIM` trims the stream by evicting older entries (entries with lower IDs) if
|
||||
needed.
|
||||
|
||||
For example the following command will trim the stream to exactly the latest
|
||||
1000 items:
|
||||
Trimming the stream can be done using one of these strategies:
|
||||
|
||||
- `MAXLEN`: Evicts entries as long as the stream's length exceeds the specified
|
||||
`threshold`, where `threshold` is a positive integer.
|
||||
- `MINID`: Evicts entries with IDs lower than `threshold`, where `threshold` is
|
||||
a stream ID.
|
||||
|
||||
For example, this will trim the stream to exactly the latest 1000 items:
|
||||
|
||||
```
|
||||
XTRIM mystream MAXLEN 1000
|
||||
```
|
||||
|
||||
It is possible to give the command in the following special form in order to
|
||||
make it more efficient:
|
||||
Whereas in this example, all entries that have an ID lower than 649085820-0 will
|
||||
be evicted:
|
||||
|
||||
```
|
||||
XTRIM mystream MINID 649085820
|
||||
```
|
||||
|
||||
By default, or when provided with the optional `=` argument, the command
|
||||
performs exact trimming.
|
||||
|
||||
Depending on the strategy, exact trimming means:
|
||||
|
||||
- `MAXLEN`: the trimmed stream's length will be exactly the minimum between its
|
||||
original length and the specified `threshold`.
|
||||
- `MINID`: the oldest ID in the stream will be exactly the minimum between its
|
||||
original oldest ID and the specified `threshold`.
|
||||
|
||||
## Nearly exact trimming
|
||||
|
||||
Because exact trimming may require additional effort from the Redis server, the
|
||||
optional `~` argument can be provided to make it more efficient.
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
XTRIM mystream MAXLEN ~ 1000
|
||||
```
|
||||
|
||||
The `~` argument between the **MAXLEN** option and the actual count means that
|
||||
the user is not really requesting that the stream length is exactly 1000 items,
|
||||
but instead it could be a few tens of entries more, but never less than 1000
|
||||
items. When this option modifier is used, the trimming is performed only when
|
||||
Redis is able to remove a whole macro node. This makes it much more efficient,
|
||||
and it is usually what you want.
|
||||
The `~` argument between the `MAXLEN` strategy and the `threshold` means that
|
||||
the user is requesting to trim the stream so its length is **at least** the
|
||||
`threshold`, but possibly slightly more. In this case, Redis will stop trimming
|
||||
early when performance can be gained (for example, when a whole macro node in
|
||||
the data structure can't be removed). This makes trimming much more efficient,
|
||||
and it is usually what you want, although after trimming, the stream may have
|
||||
few tens of additional entries over the `threshold`.
|
||||
|
||||
Another way to control the amount of work done by the command when using the
|
||||
`~`, is the `LIMIT` clause. When used, it specifies the maximal `count` of
|
||||
entries that will be evicted. When `LIMIT` and `count` aren't specified, the
|
||||
default value of 100 \* the number of entries in a macro node will be implicitly
|
||||
used as the `count`. Specifying the value 0 as `count` disables the limiting
|
||||
mechanism entirely.
|
||||
|
||||
@return
|
||||
|
||||
@integer-reply, specifically:
|
||||
@integer-reply: The number of entries deleted from the stream.
|
||||
|
||||
The command returns the number of entries deleted from the stream.
|
||||
@history
|
||||
|
||||
- `>= 6.2`: Added the `MINID` trimming strategy and the `LIMIT` option.
|
||||
|
||||
@examples
|
||||
|
||||
```cli
|
||||
XADD mystream * field1 A field2 B field3 C field4 D
|
||||
|
|
|
@ -10,13 +10,17 @@ not hold a sorted set, an error is returned.
|
|||
The score values should be the string representation of a double precision
|
||||
floating point number. `+inf` and `-inf` values are valid values as well.
|
||||
|
||||
## ZADD options (Redis 3.0.2 or greater)
|
||||
## ZADD options
|
||||
|
||||
ZADD supports a list of options, specified after the name of the key and before
|
||||
the first score argument. Options are:
|
||||
|
||||
- **XX**: Only update elements that already exist. Never add elements.
|
||||
- **NX**: Don't update already existing elements. Always add new elements.
|
||||
- **XX**: Only update elements that already exist. Don't add new elements.
|
||||
- **NX**: Only add new elements. Don't update already existing elements.
|
||||
- **LT**: Only update existing elements if the new score is **less than** the
|
||||
current score. This flag doesn't prevent adding new elements.
|
||||
- **GT**: Only update existing elements if the new score is **greater than** the
|
||||
current score. This flag doesn't prevent adding new elements.
|
||||
- **CH**: Modify the return value from the number of new elements added, to the
|
||||
total number of elements changed (CH is an abbreviation of _changed_). Changed
|
||||
elements are **new elements added** and elements already existing for which
|
||||
|
@ -26,6 +30,8 @@ the first score argument. Options are:
|
|||
- **INCR**: When this option is specified `ZADD` acts like `ZINCRBY`. Only one
|
||||
score-element pair can be specified in this mode.
|
||||
|
||||
Note: The **GT**, **LT** and **NX** options are mutually exclusive.
|
||||
|
||||
## Range of integer scores that can be expressed precisely
|
||||
|
||||
Redis sorted sets use a _double 64-bit floating point number_ to represent the
|
||||
|
@ -74,8 +80,10 @@ is also possible to query sorted sets by range of scores using `ZRANGEBYSCORE`).
|
|||
|
||||
@integer-reply, specifically:
|
||||
|
||||
- The number of elements added to the sorted set, not including elements already
|
||||
existing for which the score was updated.
|
||||
- When used without optional arguments, the number of elements added to the
|
||||
sorted set (excluding score updates).
|
||||
- If the `CH` option is specified, the number of elements that were changed
|
||||
(added or updated).
|
||||
|
||||
If the `INCR` option is specified, the return value will be @bulk-string-reply:
|
||||
|
||||
|
@ -87,6 +95,8 @@ If the `INCR` option is specified, the return value will be @bulk-string-reply:
|
|||
|
||||
- `>= 2.4`: Accepts multiple elements. In Redis versions older than 2.4 it was
|
||||
possible to add or update a single member per call.
|
||||
- `>= 3.0.2`: Added the `XX`, `NX`, `CH` and `INCR` options.
|
||||
- `>= 6.2`: Added the `GT` and `LT` options.
|
||||
|
||||
@examples
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue