1
0
Fork 0

Merging upstream version 1.9.4.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-09 17:04:04 +01:00
parent 7ac9951505
commit db5ed8b1cc
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
131 changed files with 3811 additions and 826 deletions

View file

@ -1,9 +1,8 @@
[bumpversion] [bumpversion]
current_version = 1.9.1 current_version = 1.9.4
commit = True commit = True
tag = True tag = True
[bumpversion:file:iredis/__init__.py] [bumpversion:file:iredis/__init__.py]
[bumpversion:file:pyproject.toml] [bumpversion:file:pyproject.toml]

View file

@ -89,7 +89,7 @@ jobs:
run: | run: |
python3 -m venv venv python3 -m venv venv
. venv/bin/activate . venv/bin/activate
pip install -U pip pip install pip==21.1
pip install poetry pip install poetry
poetry install poetry install
python -c "import sys; print(sys.version)" python -c "import sys; print(sys.version)"
@ -107,18 +107,12 @@ jobs:
iredis -h iredis -h
iredis help GET iredis help GET
- name: Cache cargo registry
uses: actions/cache@v1
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry
- name: Executable Build - name: Executable Build
run: | run: |
# pyoxidizer doesn't know the wheel path, and it doesn't support passing env vars # pyoxidizer doesn't know the wheel path, and it doesn't support passing env vars
export WHEEL_PATH=`ls ./dist/iredis*.whl` export WHEEL_PATH=`ls ./dist/iredis*.whl`
envsubst '$WHEEL_PATH' < pyoxidizer.template.bzl > pyoxidizer.bzl envsubst '$WHEEL_PATH' < pyoxidizer.template.bzl > pyoxidizer.bzl
cargo install pyoxidizer --vers 0.6.0 pip install pyoxidizer
pyoxidizer build --release install pyoxidizer build --release install
cd ./build/x86*/release/install cd ./build/x86*/release/install
tar -zcf ../../../iredis.tar.gz lib/ iredis tar -zcf ../../../iredis.tar.gz lib/ iredis

View file

@ -0,0 +1,77 @@
name: Test binary build.
on:
pull_request:
push:
branches:
- master
jobs:
test-release-binary:
name: Test Build Executable Binary. You can download from Artifact after building.
runs-on: ubuntu-16.04
# FIXME
# help test shouldn't depends on this to run
services:
redis:
image: redis
ports:
- 6379:6379
options: --entrypoint redis-server
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v1
with:
python-version: 3.7
architecture: 'x64'
- name: Cache venv
uses: actions/cache@v1
with:
path: venv
# Look to see if there is a cache hit for the corresponding requirements file
key: ubuntu-16.04-poetryenv-${{ hashFiles('poetry.lock') }}
- name: Install Dependencies
run: |
python3 -m venv venv
. venv/bin/activate
pip install pip==21.1
pip install poetry
poetry install
python -c "import sys; print(sys.version)"
pip list
- name: Poetry Build
run: |
. venv/bin/activate
poetry build
- name: Test Build
run: |
python3 -m venv fresh_env
. fresh_env/bin/activate
pip install dist/*.whl
iredis -h
iredis help GET
- name: Executable Build
run: |
# pyoxidizer doesn't know the wheel path, and it doesn't support passing env vars
export WHEEL_PATH=`ls ./dist/iredis*.whl`
envsubst '$WHEEL_PATH' < pyoxidizer.template.bzl > pyoxidizer.bzl
pip install pyoxidizer
pyoxidizer build --release install
cd ./build/x86*/release/install
tar -zcf ../../../iredis.tar.gz lib/ iredis
cd -
- name: Test Executable
run: |
./build/x86*/release/install/iredis -h
./build/x86*/release/install/iredis help GET
- name: Upload Release Asset to Github Artifact
uses: actions/upload-artifact@v2
with:
name: iredis-${{github.sha}}.tar.gz
path: ./build/iredis.tar.gz

View file

@ -12,7 +12,7 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ubuntu-16.04] os: [ubuntu-16.04]
python: ['3.6', '3.7', '3.8'] python: ['3.6', '3.7', '3.8', '3.9']
redis: [5, 6] redis: [5, 6]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
@ -25,21 +25,21 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-python@v1 - uses: actions/setup-python@v2
with: with:
python-version: ${{ matrix.python }} python-version: ${{ matrix.python }}
architecture: 'x64' architecture: 'x64'
- name: Cache venv - name: Cache venv
uses: actions/cache@v1 uses: actions/cache@v2
with: with:
path: venv path: venv
# Look to see if there is a cache hit for the corresponding requirements file # Look to see if there is a cache hit for the corresponding requirements file
key: ${{ matrix.os }}-poetryenv-${{ hashFiles('poetry.lock') }} key: poetryenv-${{ matrix.os }}-${{ matrix.python }}-${{ hashFiles('poetry.lock') }}
- name: Install Dependencies - name: Install Dependencies
run: | run: |
python3 -m venv venv python3 -m venv venv
. venv/bin/activate . venv/bin/activate
pip install -U pip pip install -U pip==21.1 setuptools
pip install poetry pip install poetry
poetry install poetry install
python -c "import sys; print(sys.version)" python -c "import sys; print(sys.version)"
@ -49,7 +49,7 @@ jobs:
REDIS_VERSION: ${{ matrix.redis }} REDIS_VERSION: ${{ matrix.redis }}
run: | run: |
. venv/bin/activate . venv/bin/activate
pytest pytest || cat cli_test.log
lint: lint:
name: flake8 & black name: flake8 & black
runs-on: ubuntu-16.04 runs-on: ubuntu-16.04
@ -61,7 +61,7 @@ jobs:
python-version: 3.7 python-version: 3.7
architecture: 'x64' architecture: 'x64'
- name: Cache venv - name: Cache venv
uses: actions/cache@v1 uses: actions/cache@v2
with: with:
path: venv path: venv
# Look to see if there is a cache hit for the corresponding requirements file # Look to see if there is a cache hit for the corresponding requirements file

1
.gitignore vendored
View file

@ -106,3 +106,4 @@ venv.bak/
# IDE # IDE
.vscode .vscode
.idea/

View file

@ -1,3 +1,26 @@
## 1.10
- Feature: more human readable output for `HELP` command like `ACL HELP` and
`MEMORY HELP`.
- Feature: you can use <kbd>Ctrl</kbd> + <kbd>C</kbd> to cancel a blocking
command like `BLPOP`.
### 1.9.4
- Bugfix: respect newbie_mode set in config, if cli flag is missing. thanks to [sid-maddy]
### 1.9.3
- Bugfix: When IRedis start with `--decode=utf-8`, command with shell pipe will
fail. ( [#383](https://github.com/laixintao/iredis/issues/383)). Thanks to
[hanaasagi].
### 1.9.2
- Bugfix: before `cluster` commands' `node-id` only accept numbers, not it's
fixed. `node-id` can be `\w+`.
- Feature: support set client name for iredis connections via `--client-name`.
### 1.9.1 ### 1.9.1
- Feature: support auto-reissue command to another Redis server, when got a - Feature: support auto-reissue command to another Redis server, when got a
@ -216,3 +239,5 @@
[lyqscmy]: https://github.com/lyqscmy [lyqscmy]: https://github.com/lyqscmy
[brianmaissy]: https://github.com/brianmaissy [brianmaissy]: https://github.com/brianmaissy
[otms61]: https://github.com/otms61 [otms61]: https://github.com/otms61
[hanaasagi]: https://github.com/Hanaasagi
[sid-maddy]: https://github.com/sid-maddy

View file

@ -2,12 +2,12 @@
<img width="100" height="100" src="https://raw.githubusercontent.com/laixintao/iredis/master/docs/assets/logo.png" /> <img width="100" height="100" src="https://raw.githubusercontent.com/laixintao/iredis/master/docs/assets/logo.png" />
</p> </p>
<h3 align="center">Interactive Redis: A Cli for Redis with AutoCompletion and Syntax Highlighting.</h4> <h3 align="center">Interactive Redis: A Cli for Redis with AutoCompletion and Syntax Highlighting.</h3>
<p align="center"> <p align="center">
<a href="https://github.com/laixintao/iredis/actions"><img src="https://github.com/laixintao/iredis/workflows/Test/badge.svg" alt="Github Action"></a> <a href="https://github.com/laixintao/iredis/actions"><img src="https://github.com/laixintao/iredis/workflows/Test/badge.svg" alt="Github Action"></a>
<a href="https://badge.fury.io/py/iredis"><img src="https://badge.fury.io/py/iredis.svg" alt="PyPI version"></a> <a href="https://badge.fury.io/py/iredis"><img src="https://badge.fury.io/py/iredis.svg" alt="PyPI version"></a>
<img src="https://badgen.net/badge/python/3.6%20|%203.7%20|%203.8/" alt="Python version"> <img src="https://badgen.net/badge/python/3.6%20%7C%203.7%20%7C%203.8%20%7C%203.9/" alt="Python version">
<a href="https://pepy.tech/project/iredis"><img src="https://pepy.tech/badge/iredis" alt="Download stats"></a> <a href="https://pepy.tech/project/iredis"><img src="https://pepy.tech/badge/iredis" alt="Download stats"></a>
<a href="https://t.me/iredis_users"><img src="https://badgen.net/badge/icon/join?icon=telegram&amp;label=usergroup" alt="Chat on telegram"></a> <a href="https://t.me/iredis_users"><img src="https://badgen.net/badge/icon/join?icon=telegram&amp;label=usergroup" alt="Chat on telegram"></a>
<a href="https://console.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/laixintao/iredis&amp;cloudshell_print=docs/cloudshell/run-in-docker.txt"><img src="https://badgen.net/badge/run/GoogleCloudShell/blue?icon=terminal" alt="Open in Cloud Shell"></a> <a href="https://console.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https://github.com/laixintao/iredis&amp;cloudshell_print=docs/cloudshell/run-in-docker.txt"><img src="https://badgen.net/badge/run/GoogleCloudShell/blue?icon=terminal" alt="Open in Cloud Shell"></a>
@ -145,7 +145,7 @@ like <kbd>Ctrl</kbd> + <kbd>F</kbd> to forward work.
Also: Also:
- <kbd>Ctrl</kbd> + <kbd>F</kbd> (i.e. EOF) to exit; you can also use the `exit` - <kbd>Ctrl</kbd> + <kbd>D</kbd> (i.e. EOF) to exit; you can also use the `exit`
command. command.
- <kbd>Ctrl</kbd> + <kbd>L</kbd> to clear screen; you can also use the `clear` - <kbd>Ctrl</kbd> + <kbd>L</kbd> to clear screen; you can also use the `clear`
command. command.
@ -156,8 +156,8 @@ Also:
### Release Strategy ### Release Strategy
IRedis is built and released by CircleCI. Whenever a tag is pushed to the IRedis is built and released by `GitHub Actions`. Whenever a tag is pushed to
`master` branch, a new release is built and uploaded to pypi.org, it's very the `master` branch, a new release is built and uploaded to pypi.org, it's very
convenient. convenient.
Thus, we release as often as possible, so that users can always enjoy the new Thus, we release as often as possible, so that users can always enjoy the new

View file

@ -1 +1 @@
__version__ = "1.9.1" __version__ = "1.9.4"

View file

@ -24,12 +24,12 @@ class BottomToolbar:
def render(self): def render(self):
text = BUTTOM_TEXT text = BUTTOM_TEXT
# add command help if valide # add command help if valid
if self.command_holder.command: if self.command_holder.command:
try: try:
command_info = commands_summary[self.command_holder.command] command_info = commands_summary[self.command_holder.command]
text = command_syntax(self.command_holder.command, command_info) text = command_syntax(self.command_holder.command, command_info)
except KeyError as e: except KeyError as e:
logger.exception(e) logger.exception(e)
pass
return text return text

View file

@ -62,6 +62,7 @@ class Client:
path=None, path=None,
scheme="redis", scheme="redis",
username=None, username=None,
client_name=None,
): ):
self.host = host self.host = host
self.port = port self.port = port
@ -69,17 +70,11 @@ class Client:
self.path = path self.path = path
# FIXME username is not using... # FIXME username is not using...
self.username = username self.username = username
self.client_name = client_name
self.scheme = scheme self.scheme = scheme
self.password = password
self.connection = self.create_connection( self.build_connection()
host,
port,
db,
password,
path,
scheme,
username,
)
# all command upper case # all command upper case
self.answer_callbacks = command2callback self.answer_callbacks = command2callback
@ -101,6 +96,21 @@ class Client:
if config.version and re.match(r"([\d\.]+)", config.version): if config.version and re.match(r"([\d\.]+)", config.version):
self.auth_compat(config.version) self.auth_compat(config.version)
def build_connection(self):
"""
create a new connection and replace ``self.connection``
"""
self.connection = self.create_connection(
self.host,
self.port,
self.db,
self.password,
self.path,
self.scheme,
self.username,
client_name=self.client_name,
)
def create_connection( def create_connection(
self, self,
host=None, host=None,
@ -110,6 +120,7 @@ class Client:
path=None, path=None,
scheme="redis", scheme="redis",
username=None, username=None,
client_name=None,
): ):
if scheme in ("redis", "rediss"): if scheme in ("redis", "rediss"):
connection_kwargs = { connection_kwargs = {
@ -118,13 +129,19 @@ class Client:
"db": db, "db": db,
"password": password, "password": password,
"socket_keepalive": config.socket_keepalive, "socket_keepalive": config.socket_keepalive,
"client_name": client_name,
} }
if scheme == "rediss": if scheme == "rediss":
connection_class = SSLConnection connection_class = SSLConnection
else: else:
connection_class = Connection connection_class = Connection
else: else:
connection_kwargs = {"db": db, "password": password, "path": path} connection_kwargs = {
"db": db,
"password": password,
"path": path,
"client_name": client_name,
}
connection_class = UnixDomainSocketConnection connection_class = UnixDomainSocketConnection
if config.decode: if config.decode:
@ -242,6 +259,15 @@ class Client:
except redis.exceptions.ExecAbortError: except redis.exceptions.ExecAbortError:
config.transaction = False config.transaction = False
raise raise
except KeyboardInterrupt:
logger.warning("received KeyboardInterrupt... rebuild connection...")
connection.disconnect()
connection.connect()
print(
"KeyboardInterrupt received! User canceled reading response!",
file=sys.stderr,
)
return None
else: else:
return response return response
raise last_error raise last_error
@ -338,7 +364,7 @@ class Client:
grammar = completer.get_completer(input_text=rawinput).compiled_grammar grammar = completer.get_completer(input_text=rawinput).compiled_grammar
matched = grammar.match(rawinput) matched = grammar.match(rawinput)
if not matched: if not matched:
# invalide command! # invalid command!
return rawinput, None return rawinput, None
variables = matched.variables() variables = matched.variables()
shell_command = variables.get("shellcommand") shell_command = variables.get("shellcommand")
@ -397,12 +423,7 @@ class Client:
# subcommand's stdout/stderr # subcommand's stdout/stderr
if shell_command and config.shell: if shell_command and config.shell:
# pass the raw response of redis to shell command # pass the raw response of redis to shell command
if isinstance(redis_resp, list): stdin = OutputRender.render_raw(redis_resp)
# FIXME not handling nested list, use renders.render_raw
# instead
stdin = b"\n".join(redis_resp)
else:
stdin = redis_resp
run(shell_command, input=stdin, shell=True) run(shell_command, input=stdin, shell=True)
return return
@ -486,7 +507,7 @@ class Client:
redis_grammar = completer.get_completer(command).compiled_grammar redis_grammar = completer.get_completer(command).compiled_grammar
m = redis_grammar.match(command) m = redis_grammar.match(command)
if not m: if not m:
# invalide command! # invalid command!
return return
variables = m.variables() variables = m.variables()
# zset withscores # zset withscores
@ -501,7 +522,7 @@ class Client:
doc = read_text(commands_data, f"{command_docs_name}.md") doc = read_text(commands_data, f"{command_docs_name}.md")
except FileNotFoundError: except FileNotFoundError:
raise NotRedisCommand( raise NotRedisCommand(
f"{command_summary_name} is not a valide Redis command." f"{command_summary_name} is not a valid Redis command."
) )
rendered_detail = markdown.render(doc) rendered_detail = markdown.render(doc)
summary_dict = commands_summary[command_summary_name] summary_dict = commands_summary[command_summary_name]

View file

@ -94,7 +94,6 @@ commands_summary.update(
"PEEK": { "PEEK": {
"summary": "Get the key's type and value.", "summary": "Get the key's type and value.",
"arguments": [{"name": "key", "type": "key"}], "arguments": [{"name": "key", "type": "key"}],
"since": "1.0",
"complexity": "O(1).", "complexity": "O(1).",
"since": "1.0", "since": "1.0",
"group": "iredis", "group": "iredis",
@ -135,7 +134,7 @@ def split_command_args(command):
input_args = command[matcher.end() :] input_args = command[matcher.end() :]
break break
else: else:
raise InvalidArguments(f"`{command}` is not a valide Redis Command") raise InvalidArguments(f"`{command}` is not a valid Redis Command")
args = list(strip_quote_args(input_args)) args = list(strip_quote_args(input_args))

View file

@ -191,7 +191,7 @@ class IRedisCompleter(Completer):
grammar = completer.compiled_grammar grammar = completer.compiled_grammar
m = grammar.match(command) m = grammar.match(command)
if not m: if not m:
# invalide command! # invalid command!
return return
variables = m.variables() variables = m.variables()

View file

@ -123,7 +123,7 @@ server,ACL CAT,command_categorynamex,render_list
server,ACL DELUSER,command_usernames,render_int server,ACL DELUSER,command_usernames,render_int
server,ACL GENPASS,command_countx,render_bulk_string server,ACL GENPASS,command_countx,render_bulk_string
server,ACL GETUSER,command_username,render_list server,ACL GETUSER,command_username,render_list
server,ACL HELP,command,render_list server,ACL HELP,command,render_help
server,ACL LIST,command,render_list server,ACL LIST,command,render_list
server,ACL LOAD,command,render_simple_string server,ACL LOAD,command,render_simple_string
server,ACL LOG,command_count_or_resetx,render_list_or_string server,ACL LOG,command_count_or_resetx,render_list_or_string
@ -152,12 +152,12 @@ server,LOLWUT,command_version,render_bytes
server,LASTSAVE,command,render_unixtime server,LASTSAVE,command,render_unixtime
server,LATENCY DOCTOR,command,render_bulk_string_decode server,LATENCY DOCTOR,command,render_bulk_string_decode
server,LATENCY GRAPH,command_graphevent,render_bulk_string_decode server,LATENCY GRAPH,command_graphevent,render_bulk_string_decode
server,LATENCY HELP,command,render_list server,LATENCY HELP,command,render_help
server,LATENCY HISTORY,command_graphevent,render_list server,LATENCY HISTORY,command_graphevent,render_list
server,LATENCY LATEST,command,render_list server,LATENCY LATEST,command,render_list
server,LATENCY RESET,command_graphevents,render_int server,LATENCY RESET,command_graphevents,render_int
server,MEMORY DOCTOR,command,render_bulk_string_decode server,MEMORY DOCTOR,command,render_bulk_string_decode
server,MEMORY HELP,command,render_list server,MEMORY HELP,command,render_help
server,MEMORY MALLOC-STATS,command,render_bulk_string_decode server,MEMORY MALLOC-STATS,command,render_bulk_string_decode
server,MEMORY PURGE,command,render_simple_string server,MEMORY PURGE,command,render_simple_string
server,MEMORY STATS,command,render_nested_pair server,MEMORY STATS,command,render_nested_pair

1 Group Command Syntax Callback
123 server ACL DELUSER command_usernames render_int
124 server ACL GENPASS command_countx render_bulk_string
125 server ACL GETUSER command_username render_list
126 server ACL HELP command render_list render_help
127 server ACL LIST command render_list
128 server ACL LOAD command render_simple_string
129 server ACL LOG command_count_or_resetx render_list_or_string
152 server LASTSAVE command render_unixtime
153 server LATENCY DOCTOR command render_bulk_string_decode
154 server LATENCY GRAPH command_graphevent render_bulk_string_decode
155 server LATENCY HELP command render_list render_help
156 server LATENCY HISTORY command_graphevent render_list
157 server LATENCY LATEST command render_list
158 server LATENCY RESET command_graphevents render_int
159 server MEMORY DOCTOR command render_bulk_string_decode
160 server MEMORY HELP command render_list render_help
161 server MEMORY MALLOC-STATS command render_bulk_string_decode
162 server MEMORY PURGE command render_simple_string
163 server MEMORY STATS command render_nested_pair

File diff suppressed because it is too large Load diff

View file

@ -8,6 +8,10 @@ rules used to configure the user, it is still functionally identical.
@array-reply: a list of ACL rule definitions for the user. @array-reply: a list of ACL rule definitions for the user.
@history
- `>= 6.2`: Added Pub/Sub channel patterns.
@examples @examples
Here's the default configuration for the default user: Here's the default configuration for the default user:
@ -25,4 +29,6 @@ Here's the default configuration for the default user:
6) "+@all" 6) "+@all"
7) "keys" 7) "keys"
8) 1) "*" 8) 1) "*"
9) "channels"
10) 1) "*"
``` ```

View file

@ -12,6 +12,6 @@ An array of strings.
``` ```
> ACL LIST > ACL LIST
1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* +@all -@admin -@dangerous" 1) "user antirez on #9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 ~objects:* &* +@all -@admin -@dangerous"
2) "user default on nopass ~* +@all" 2) "user default on nopass ~* &* +@all"
``` ```

View file

@ -55,10 +55,17 @@ This is a list of all the supported Redis ACL rules:
deleted user to be disconnected. deleted user to be disconnected.
- `~<pattern>`: add the specified key pattern (glob style pattern, like in the - `~<pattern>`: add the specified key pattern (glob style pattern, like in the
`KEYS` command), to the list of key patterns accessible by the user. You can `KEYS` command), to the list of key patterns accessible by the user. You can
add as many key patterns you want to the same user. Example: `~objects:*` add multiple key patterns to the same user. Example: `~objects:*`
- `allkeys`: alias for `~*`, it allows the user to access all the keys. - `allkeys`: alias for `~*`, it allows the user to access all the keys.
- `resetkey`: removes all the key patterns from the list of key patterns the - `resetkeys`: removes all the key patterns from the list of key patterns the
user can access. user can access.
- `&<pattern>`: add the specified glob style pattern to the list of Pub/Sub
channel patterns accessible by the user. You can add multiple channel patterns
to the same user. Example: `&chatroom:*`
- `allchannels`: alias for `&*`, it allows the user to access all Pub/Sub
channels.
- `resetchannels`: removes all channel patterns from the list of Pub/Sub channel
patterns the user can access.
- `+<command>`: add this command to the list of the commands the user can call. - `+<command>`: add this command to the list of the commands the user can call.
Example: `+zadd`. Example: `+zadd`.
- `+@<category>`: add all the commands in the specified category to the list of - `+@<category>`: add all the commands in the specified category to the list of
@ -87,7 +94,7 @@ This is a list of all the supported Redis ACL rules:
- `>password`: Add the specified clear text password as an hashed password in - `>password`: Add the specified clear text password as an hashed password in
the list of the users passwords. Every user can have many active passwords, so the list of the users passwords. Every user can have many active passwords, so
that password rotation will be simpler. The specified password is not stored that password rotation will be simpler. The specified password is not stored
in cleartext inside the server. Example: `>mypassword`. as clear text inside the server. Example: `>mypassword`.
- `#<hashedpassword>`: Add the specified hashed password to the list of user - `#<hashedpassword>`: Add the specified hashed password to the list of user
passwords. A Redis hashed password is hashed with SHA256 and translated into a passwords. A Redis hashed password is hashed with SHA256 and translated into a
hexadecimal string. Example: hexadecimal string. Example:
@ -104,6 +111,10 @@ This is a list of all the supported Redis ACL rules:
If the rules contain errors, the error is returned. If the rules contain errors, the error is returned.
@history
- `>= 6.2`: Added Pub/Sub channel patterns.
@examples @examples
``` ```

View file

@ -29,6 +29,10 @@ defined in the ACL list (see `ACL SETUSER`) and the official
When ACLs are used, the single argument form of the command, where only the When ACLs are used, the single argument form of the command, where only the
password is specified, assumes that the implicit username is "default". password is specified, assumes that the implicit username is "default".
@history
- `>= 6.0.0`: Added ACL style (username and password).
## Security notice ## Security notice
Because of the high performance nature of Redis, it is possible to try a lot of Because of the high performance nature of Redis, it is possible to try a lot of

View file

@ -20,11 +20,11 @@ offset 100, and gets the value of the 4 bit unsigned integer at bit offset 0:
Note that: Note that:
1. Addressing with `GET` bits outside the current string length (including the 1. Addressing with `!GET` bits outside the current string length (including the
case the key does not exist at all), results in the operation to be performed case the key does not exist at all), results in the operation to be performed
like the missing part all consists of bits set to 0. like the missing part all consists of bits set to 0.
2. Addressing with `SET` or `INCRBY` bits outside the current string length will 2. Addressing with `!SET` or `!INCRBY` bits outside the current string length
enlarge the string, zero-padding it, as needed, for the minimal length will enlarge the string, zero-padding it, as needed, for the minimal length
needed, according to the most far bit touched. needed, according to the most far bit touched.
## Supported subcommands and integer types ## Supported subcommands and integer types
@ -39,7 +39,7 @@ The following is the list of supported commands.
value. value.
There is another subcommand that only changes the behavior of successive There is another subcommand that only changes the behavior of successive
`INCRBY` subcommand calls by setting the overflow behavior: `!INCRBY` and `!SET` subcommands calls by setting the overflow behavior:
- **OVERFLOW** `[WRAP|SAT|FAIL]` - **OVERFLOW** `[WRAP|SAT|FAIL]`
@ -91,8 +91,9 @@ following behaviors:
detected. The corresponding return value is set to NULL to signal the detected. The corresponding return value is set to NULL to signal the
condition to the caller. condition to the caller.
Note that each `OVERFLOW` statement only affects the `INCRBY` commands that Note that each `OVERFLOW` statement only affects the `!INCRBY` and `!SET`
follow it in the list of subcommands, up to the next `OVERFLOW` statement. commands that follow it in the list of subcommands, up to the next `OVERFLOW`
statement.
By default, **WRAP** is used if not otherwise specified. By default, **WRAP** is used if not otherwise specified.

View file

@ -0,0 +1,23 @@
`BLMOVE` is the blocking variant of `LMOVE`. When `source` contains elements,
this command behaves exactly like `LMOVE`. When used inside a `MULTI`/`EXEC`
block, this command behaves exactly like `LMOVE`. When `source` is empty, Redis
will block the connection until another client pushes to it or until `timeout`
is reached. A `timeout` of zero can be used to block indefinitely.
This command comes in place of the now deprecated `BRPOPLPUSH`. Doing
`BLMOVE RIGHT LEFT` is equivalent.
See `LMOVE` for more information.
@return
@bulk-string-reply: the element being popped from `source` and pushed to
`destination`. If `timeout` is reached, a @nil-reply is returned.
## Pattern: Reliable queue
Please see the pattern description in the `LMOVE` documentation.
## Pattern: Circular list
Please see the pattern description in the `LMOVE` documentation.

View file

@ -34,7 +34,7 @@ client will unblock returning a `nil` multi-bulk value when the specified
timeout has expired without a push operation against at least one of the timeout has expired without a push operation against at least one of the
specified keys. specified keys.
**The timeout argument is interpreted as an integer value specifying the maximum **The timeout argument is interpreted as a double value specifying the maximum
number of seconds to block**. A timeout of zero can be used to block number of seconds to block**. A timeout of zero can be used to block
indefinitely. indefinitely.
@ -129,6 +129,10 @@ If you like science fiction, think of time flowing at infinite speed inside a
where an element was popped and the second element being the value of the where an element was popped and the second element being the value of the
popped element. popped element.
@history
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
@examples @examples
``` ```

View file

@ -18,6 +18,10 @@ the tail of a list instead of popping from the head.
where an element was popped and the second element being the value of the where an element was popped and the second element being the value of the
popped element. popped element.
@history
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
@examples @examples
``` ```

View file

@ -5,6 +5,9 @@ elements, this command behaves exactly like `RPOPLPUSH`. When used inside a
to it or until `timeout` is reached. A `timeout` of zero can be used to block to it or until `timeout` is reached. A `timeout` of zero can be used to block
indefinitely. indefinitely.
As per Redis 6.2.0, BRPOPLPUSH is considered deprecated. Please prefer `BLMOVE`
in new code.
See `RPOPLPUSH` for more information. See `RPOPLPUSH` for more information.
@return @return
@ -12,6 +15,10 @@ See `RPOPLPUSH` for more information.
@bulk-string-reply: the element being popped from `source` and pushed to @bulk-string-reply: the element being popped from `source` and pushed to
`destination`. If `timeout` is reached, a @nil-reply is returned. `destination`. If `timeout` is reached, a @nil-reply is returned.
@history
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
## Pattern: Reliable queue ## Pattern: Reliable queue
Please see the pattern description in the `RPOPLPUSH` documentation. Please see the pattern description in the `RPOPLPUSH` documentation.

View file

@ -5,7 +5,7 @@ members to pop from any of the given sorted sets. A member with the highest
score is popped from first sorted set that is non-empty, with the given keys score is popped from first sorted set that is non-empty, with the given keys
being checked in the order that they are given. being checked in the order that they are given.
The `timeout` argument is interpreted as an integer value specifying the maximum The `timeout` argument is interpreted as a double value specifying the maximum
number of seconds to block. A timeout of zero can be used to block indefinitely. number of seconds to block. A timeout of zero can be used to block indefinitely.
See the [BZPOPMIN documentation][cb] for the exact semantics, since `BZPOPMAX` See the [BZPOPMIN documentation][cb] for the exact semantics, since `BZPOPMAX`
@ -23,6 +23,10 @@ with the highest scores instead of popping the ones with the lowest scores.
where a member was popped, the second element is the popped member itself, and where a member was popped, the second element is the popped member itself, and
the third element is the score of the popped element. the third element is the score of the popped element.
@history
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
@examples @examples
``` ```

View file

@ -5,7 +5,7 @@ members to pop from any of the given sorted sets. A member with the lowest score
is popped from first sorted set that is non-empty, with the given keys being is popped from first sorted set that is non-empty, with the given keys being
checked in the order that they are given. checked in the order that they are given.
The `timeout` argument is interpreted as an integer value specifying the maximum The `timeout` argument is interpreted as an double value specifying the maximum
number of seconds to block. A timeout of zero can be used to block indefinitely. number of seconds to block. A timeout of zero can be used to block indefinitely.
See the [BLPOP documentation][cl] for the exact semantics, since `BZPOPMIN` is See the [BLPOP documentation][cl] for the exact semantics, since `BZPOPMIN` is
@ -23,6 +23,10 @@ popped from.
where a member was popped, the second element is the popped member itself, and where a member was popped, the second element is the popped member itself, and
the third element is the score of the popped element. the third element is the score of the popped element.
@history
- `>= 6.0`: `timeout` is interpreted as a double instead of an integer.
@examples @examples
``` ```

View file

@ -1,7 +1,7 @@
This command controls the tracking of the keys in the next command executed by This command controls the tracking of the keys in the next command executed by
the connection, when tracking is enabled in `OPTIN` or `OPTOUT` mode. Please the connection, when tracking is enabled in `OPTIN` or `OPTOUT` mode. Please
check the [client side caching documentation](/topics/client-side-caching) for check the [client side caching documentation](/topics/client-side-caching) for
background informations. background information.
When tracking is enabled Redis, using the `CLIENT TRACKING` command, it is When tracking is enabled Redis, using the `CLIENT TRACKING` command, it is
possible to specify the `OPTIN` or `OPTOUT` options, so that keys in read only possible to specify the `OPTIN` or `OPTOUT` options, so that keys in read only

View file

@ -0,0 +1,16 @@
The command returns information and statistics about the current client
connection in a mostly human readable format.
The reply format is identical to that of `CLIENT LIST`, and the content consists
only of information about the current client.
@examples
```cli
CLIENT INFO
```
@return
@bulk-string-reply: a unique string, as described at the `CLIENT LIST` page, for
the current client.

View file

@ -1,14 +1,12 @@
The `CLIENT KILL` command closes a given client connection. Up to Redis 2.8.11 The `CLIENT KILL` command closes a given client connection. This command support
it was possible to close a connection only by client address, using the two formats, the old format:
following form:
CLIENT KILL addr:port CLIENT KILL addr:port
The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr` The `ip:port` should match a line returned by the `CLIENT LIST` command (`addr`
field). field).
However starting with Redis 2.8.12 or greater, the command accepts the following The new format:
form:
CLIENT KILL <filter> <value> ... ... <filter> <value> CLIENT KILL <filter> <value> ... ... <filter> <value>
@ -17,13 +15,14 @@ of killing just by address. The following filters are available:
- `CLIENT KILL ADDR ip:port`. This is exactly the same as the old - `CLIENT KILL ADDR ip:port`. This is exactly the same as the old
three-arguments behavior. three-arguments behavior.
- `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field, - `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local
which was introduced in the `CLIENT LIST` command starting from Redis 2.8.12. (bind) address.
- `CLIENT KILL TYPE type`, where _type_ is one of `normal`, `master`, `slave` - `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field.
and `pubsub` (the `master` type is available from v3.2). This closes the Client `ID`'s are retrieved using the `CLIENT LIST` command.
connections of **all the clients** in the specified class. Note that clients - `CLIENT KILL TYPE type`, where _type_ is one of `normal`, `master`, `replica`
blocked into the `MONITOR` command are considered to belong to the `normal` and `pubsub`. This closes the connections of **all the clients** in the
class. specified class. Note that clients blocked into the `MONITOR` command are
considered to belong to the `normal` class.
- `CLIENT KILL USER username`. Closes all the connections that are authenticated - `CLIENT KILL USER username`. Closes all the connections that are authenticated
with the specified [ACL](/topics/acl) username, however it returns an error if with the specified [ACL](/topics/acl) username, however it returns an error if
the username does not map to an existing ACL user. the username does not map to an existing ACL user.
@ -32,10 +31,6 @@ of killing just by address. The following filters are available:
option to `no` will have the effect of also killing the client calling the option to `no` will have the effect of also killing the client calling the
command. command.
**Note: starting with Redis 5 the project is no longer using the slave word. You
can use `TYPE replica` instead, however the old form is still supported for
backward compatibility.**
It is possible to provide multiple filters at the same time. The command will It is possible to provide multiple filters at the same time. The command will
handle multiple filters via logical AND. For example: handle multiple filters via logical AND. For example:
@ -71,3 +66,12 @@ When called with the three arguments format:
When called with the filter / value format: When called with the filter / value format:
@integer-reply: the number of clients killed. @integer-reply: the number of clients killed.
@history
- `>= 2.8.12`: Added new filter format.
- `>= 2.8.12`: `ID` option.
- `>= 3.2`: Added `master` type in for `TYPE` option.
- `>= 5`: Replaced `slave` `TYPE` with `replica`. `slave` still supported for
backward compatibility.
- `>= 6.2`: `LADDR` option.

View file

@ -1,10 +1,13 @@
The `CLIENT LIST` command returns information and statistics about the client The `CLIENT LIST` command returns information and statistics about the client
connections server in a mostly human readable format. connections server in a mostly human readable format.
As of v5.0, the optional `TYPE type` subcommand can be used to filter the list You can use one of the optional subcommands to filter the list. The `TYPE type`
by clients' type, where _type_ is one of `normal`, `master`, `replica` and subcommand filters the list by clients' type, where _type_ is one of `normal`,
`pubsub`. Note that clients blocked into the `MONITOR` command are considered to `master`, `replica`, and `pubsub`. Note that clients blocked by the `MONITOR`
belong to the `normal` class. command belong to the `normal` class.
The `ID` filter only returns entries for clients with IDs matching the
`client-id` arguments.
@return @return
@ -16,9 +19,10 @@ belong to the `normal` class.
Here is the meaning of the fields: Here is the meaning of the fields:
- `id`: an unique 64-bit client ID (introduced in Redis 2.8.12). - `id`: an unique 64-bit client ID.
- `name`: the name set by the client with `CLIENT SETNAME` - `name`: the name set by the client with `CLIENT SETNAME`
- `addr`: address/port of the client - `addr`: address/port of the client
- `laddr`: address/port of local address client connected to (bind address)
- `fd`: file descriptor corresponding to the socket - `fd`: file descriptor corresponding to the socket
- `age`: total duration of the connection in seconds - `age`: total duration of the connection in seconds
- `idle`: idle time of the connection in seconds - `idle`: idle time of the connection in seconds
@ -35,6 +39,11 @@ Here is the meaning of the fields:
- `omem`: output buffer memory usage - `omem`: output buffer memory usage
- `events`: file descriptor events (see below) - `events`: file descriptor events (see below)
- `cmd`: last command played - `cmd`: last command played
- `argv-mem`: incomplete arguments for the next command (already extracted from
query buffer)
- `tot-mem`: total memory consumed by this client in its various buffers
- `redir`: client id of current client tracking redirection
- `user`: the authenticated username of the client
The client flags can be a combination of: The client flags can be a combination of:
@ -53,6 +62,9 @@ S: the client is a replica node connection to this instance
u: the client is unblocked u: the client is unblocked
U: the client is connected via a Unix domain socket U: the client is connected via a Unix domain socket
x: the client is in a MULTI/EXEC context x: the client is in a MULTI/EXEC context
t: the client enabled keys tracking in order to perform client side caching
R: the client tracking target client is invalid
B: the client enabled broadcast tracking mode
``` ```
The file descriptor events can be: The file descriptor events can be:
@ -68,3 +80,9 @@ New fields are regularly added for debugging purpose. Some could be removed in
the future. A version safe Redis client using this command should parse the the future. A version safe Redis client using this command should parse the
output accordingly (i.e. handling gracefully missing fields, skipping unknown output accordingly (i.e. handling gracefully missing fields, skipping unknown
fields). fields).
@history
- `>= 2.8.12`: Added unique client `id` field.
- `>= 5.0`: Added optional `TYPE` filter.
- `>= 6.2`: Added `laddr` field and the optional `ID` filter.

View file

@ -3,14 +3,28 @@ clients for the specified amount of time (in milliseconds).
The command performs the following actions: The command performs the following actions:
- It stops processing all the pending commands from normal and pub/sub clients. - It stops processing all the pending commands from normal and pub/sub clients
However interactions with replicas will continue normally. for the given mode. However interactions with replicas will continue normally.
Note that clients are formally paused when they try to execute a command, so
no work is taken on the server side for inactive clients.
- However it returns OK to the caller ASAP, so the `CLIENT PAUSE` command - However it returns OK to the caller ASAP, so the `CLIENT PAUSE` command
execution is not paused by itself. execution is not paused by itself.
- When the specified amount of time has elapsed, all the clients are unblocked: - When the specified amount of time has elapsed, all the clients are unblocked:
this will trigger the processing of all the commands accumulated in the query this will trigger the processing of all the commands accumulated in the query
buffer of every client during the pause. buffer of every client during the pause.
Client pause currently supports two modes:
- `ALL`: This is the default mode. All client commands are blocked.
- `WRITE`: Clients are only blocked if they attempt to execute a write command.
For the `WRITE` mode, some commands have special behavior:
- `EVAL`/`EVALSHA`: Will block client for all scripts.
- `PUBLISH`: Will block client.
- `PFCOUNT`: Will block client.
- `WAIT`: Acknowledgements will be delayed, so this command will appear blocked.
This command is useful as it makes able to switch clients from a Redis instance This command is useful as it makes able to switch clients from a Redis instance
to another one in a controlled way. For example during an instance upgrade the to another one in a controlled way. For example during an instance upgrade the
system administrator could do the following: system administrator could do the following:
@ -21,11 +35,16 @@ system administrator could do the following:
- Turn one of the replicas into a master. - Turn one of the replicas into a master.
- Reconfigure clients to connect with the new master. - Reconfigure clients to connect with the new master.
It is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the Since Redis 6.2, the recommended mode for client pause is `WRITE`. This mode
`INFO replication` command in order to get the current master offset at the time will stop all replication traffic, can be aborted with the `CLIENT UNPAUSE`
the clients are blocked. This way it is possible to wait for a specific offset command, and allows reconfiguring the old master without risking accepting
in the replica side in order to make sure all the replication stream was writes after the failover. This is also the mode used during cluster failover.
processed.
For versions before 6.2, it is possible to send `CLIENT PAUSE` in a MULTI/EXEC
block together with the `INFO replication` command in order to get the current
master offset at the time the clients are blocked. This way it is possible to
wait for a specific offset in the replica side in order to make sure all the
replication stream was processed.
Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or Since Redis 3.2.10 / 4.0.0, this command also prevents keys to be evicted or
expired during the time clients are paused. This way the dataset is guaranteed expired during the time clients are paused. This way the dataset is guaranteed
@ -36,3 +55,8 @@ but also from the point of view of internal operations.
@simple-string-reply: The command returns OK or an error if the timeout is @simple-string-reply: The command returns OK or an error if the timeout is
invalid. invalid.
@history
- `>= 3.2.10`: Client pause prevents client pause and key eviction as well.
- `>= 6.2`: CLIENT PAUSE WRITE mode added along with the `mode` option.

View file

@ -37,7 +37,9 @@ when enabling tracking:
notifications will be provided only for keys starting with this string. This notifications will be provided only for keys starting with this string. This
option can be given multiple times to register multiple prefixes. If option can be given multiple times to register multiple prefixes. If
broadcasting is enabled without this option, Redis will send notifications for broadcasting is enabled without this option, Redis will send notifications for
every key. every key. You can't delete a single prefix, but you can delete all prefixes
by disabling and re-enabling tracking. Using this option adds the additional
time complexity of O(N^2), where N is the total number of prefixes tracked.
- `OPTIN`: when broadcasting is NOT active, normally don't track keys in read - `OPTIN`: when broadcasting is NOT active, normally don't track keys in read
only commands, unless they are called immediately after a `CLIENT CACHING yes` only commands, unless they are called immediately after a `CLIENT CACHING yes`
command. command.

View file

@ -0,0 +1,25 @@
The command returns information about the current client connection's use of the
[server assisted client side caching](/topics/client-side-caching) feature.
@return
@array-reply: a list of tracking information sections and their respective
values, specifically:
- **flags**: A list of tracking flags used by the connection. The flags and
their meanings are as follows:
- `off`: The connection isn't using server assisted client side caching.
- `on`: Server assisted client side caching is enabled for the connection.
- `bcast`: The client uses broadcasting mode.
- `optin`: The client does not cache keys by default.
- `optout`: The client caches keys by default.
- `caching-yes`: The next command will cache keys (exists only together with
`optin`).
- `caching-no`: The next command won't cache keys (exists only together with
`optout`).
- `noloop`: The client isn't notified about keys modified by itself.
- `broken_redirect`: The client ID used for redirection isn't valid anymore.
- **redirect**: The client ID used for notifications redirection, or -1 when
none.
- **prefixes**: A list of key prefixes for which notifications are sent to the
client.

View file

@ -0,0 +1,6 @@
`CLIENT UNPAUSE` is used to resume command processing for all clients that were
paused by `CLIENT PAUSE`.
@return
@simple-string-reply: The command returns `OK`

View file

@ -46,7 +46,7 @@ bound with another node, or if the configuration epoch of the node advertising
the new hash slot, is greater than the node currently listed in the table. the new hash slot, is greater than the node currently listed in the table.
This means that this command should be used with care only by applications This means that this command should be used with care only by applications
orchestrating Redis Cluster, like `redis-trib`, and the command if used out of orchestrating Redis Cluster, like `redis-cli`, and the command if used out of
the right context can leave the cluster in a wrong state or cause data loss. the right context can leave the cluster in a wrong state or cause data loss.
@return @return

View file

@ -38,7 +38,7 @@ node receiving the command:
This command only works in cluster mode and may be useful for debugging and in This command only works in cluster mode and may be useful for debugging and in
order to manually orchestrate a cluster configuration when a new cluster is order to manually orchestrate a cluster configuration when a new cluster is
created. It is currently not used by `redis-trib`, and mainly exists for API created. It is currently not used by `redis-cli`, and mainly exists for API
completeness. completeness.
@return @return

View file

@ -3,6 +3,6 @@ Deletes all slots from a node.
The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected The `CLUSTER FLUSHSLOTS` deletes all information about slots from the connected
node. It can only be called when the database is empty. node. It can only be called when the database is empty.
@reply @return
@simple-string-reply: `OK` @simple-string-reply: `OK`

View file

@ -11,8 +11,8 @@ additional info appended at the end).
Note that normally clients willing to fetch the map between Cluster hash slots Note that normally clients willing to fetch the map between Cluster hash slots
and node addresses should use `CLUSTER SLOTS` instead. `CLUSTER NODES`, that and node addresses should use `CLUSTER SLOTS` instead. `CLUSTER NODES`, that
provides more information, should be used for administrative tasks, debugging, provides more information, should be used for administrative tasks, debugging,
and configuration inspections. It is also used by `redis-trib` in order to and configuration inspections. It is also used by `redis-cli` in order to manage
manage a cluster. a cluster.
## Serialization format ## Serialization format
@ -41,8 +41,8 @@ The meaning of each filed is the following:
2. `ip:port@cport`: The node address where clients should contact the node to 2. `ip:port@cport`: The node address where clients should contact the node to
run queries. run queries.
3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, 3. `flags`: A list of comma separated flags: `myself`, `master`, `slave`,
`fail?`, `fail`, `handshake`, `noaddr`, `noflags`. Flags are explained in `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are
detail in the next section. explained in detail in the next section.
4. `master`: If the node is a replica, and the master is known, the master node 4. `master`: If the node is a replica, and the master is known, the master node
ID, otherwise the "-" character. ID, otherwise the "-" character.
5. `ping-sent`: Milliseconds unix time at which the currently active ping was 5. `ping-sent`: Milliseconds unix time at which the currently active ping was
@ -74,6 +74,7 @@ Meaning of the flags (field number 3):
promoted the `PFAIL` state to `FAIL`. promoted the `PFAIL` state to `FAIL`.
- `handshake`: Untrusted node, we are handshaking. - `handshake`: Untrusted node, we are handshaking.
- `noaddr`: No address known for this node. - `noaddr`: No address known for this node.
- `nofailover`: Replica will not try to failover.
- `noflags`: No flags at all. - `noflags`: No flags at all.
## Notes on published config epochs ## Notes on published config epochs

View file

@ -65,9 +65,10 @@ already migrated to the target node are executed in the target node, so that:
## CLUSTER SETSLOT `<slot>` STABLE ## CLUSTER SETSLOT `<slot>` STABLE
This subcommand just clears migrating / importing state from the slot. It is This subcommand just clears migrating / importing state from the slot. It is
mainly used to fix a cluster stuck in a wrong state by `redis-trib fix`. mainly used to fix a cluster stuck in a wrong state by
Normally the two states are cleared automatically at the end of the migration `redis-cli --cluster fix`. Normally the two states are cleared automatically at
using the `SETSLOT ... NODE ...` subcommand as explained in the next section. the end of the migration using the `SETSLOT ... NODE ...` subcommand as
explained in the next section.
## CLUSTER SETSLOT `<slot>` NODE `<node-id>` ## CLUSTER SETSLOT `<slot>` NODE `<node-id>`

View file

@ -73,7 +73,7 @@ Command flags is @array-reply containing one or more status replies:
- _write_ - command may result in modifications - _write_ - command may result in modifications
- _readonly_ - command will never modify keys - _readonly_ - command will never modify keys
- _denyoom_ - reject command if currently OOM - _denyoom_ - reject command if currently out of memory
- _admin_ - server admin command - _admin_ - server admin command
- _pubsub_ - pubsub-related command - _pubsub_ - pubsub-related command
- _noscript_ - deny this command from scripts - _noscript_ - deny this command from scripts
@ -109,8 +109,12 @@ relevant key positions.
Complete list of commands currently requiring key location parsing: Complete list of commands currently requiring key location parsing:
- `SORT` - optional `STORE` key, optional `BY` weights, optional `GET` keys - `SORT` - optional `STORE` key, optional `BY` weights, optional `GET` keys
- `ZUNION` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZUNIONSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts - `ZUNIONSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZINTER` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZINTERSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts - `ZINTERSTORE` - keys stop when `WEIGHT` or `AGGREGATE` starts
- `ZDIFF` - keys stop after `numkeys` count arguments
- `ZDIFFSTORE` - keys stop after `numkeys` count arguments
- `EVAL` - keys stop after `numkeys` count arguments - `EVAL` - keys stop after `numkeys` count arguments
- `EVALSHA` - keys stop after `numkeys` count arguments - `EVALSHA` - keys stop after `numkeys` count arguments

View file

@ -13,7 +13,7 @@ All the supported parameters have the same meaning of the equivalent
configuration parameter used in the [redis.conf][hgcarr22rc] file, with the configuration parameter used in the [redis.conf][hgcarr22rc] file, with the
following important differences: following important differences:
[hgcarr22rc]: http://github.com/redis/redis/raw/2.8/redis.conf [hgcarr22rc]: http://github.com/redis/redis/raw/6.0/redis.conf
- In options where bytes or other quantities are specified, it is not possible - In options where bytes or other quantities are specified, it is not possible
to use the `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth), to use the `redis.conf` abbreviated form (`10k`, `2gb` ... and so forth),

View file

@ -0,0 +1,24 @@
This command copies the value stored at the `source` key to the `destination`
key.
By default, the `destination` key is created in the logical database used by the
connection. The `DB` option allows specifying an alternative logical database
index for the destination key.
The command returns an error when the `destination` key already exists. The
`REPLACE` option removes the `destination` key before copying the value to it.
@return
@integer-reply, specifically:
- `1` if `source` was copied.
- `0` if `source` was not copied.
@examples
```
SET dolly "sheep"
COPY dolly clone
GET clone
```

View file

@ -214,6 +214,27 @@ format specified above (as a Lua table with an `err` field). The script can pass
the exact error to the user by returning the error object returned by the exact error to the user by returning the error object returned by
`redis.pcall()`. `redis.pcall()`.
## Running Lua under low memory conditions
When the memory usage in Redis exceeds the `maxmemory` limit, the first write
command encountered in the Lua script that uses additional memory will cause the
script to abort (unless `redis.pcall` was used). However, one thing to caution
here is that if the first write command does not use additional memory such as
DEL, LREM, or SREM, etc, Redis will allow it to run and all subsequent commands
in the Lua script will execute to completion for atomicity. If the subsequent
writes in the script generate additional memory, the Redis memory usage can go
over `maxmemory`.
Another possible way for Lua script to cause Redis memory usage to go above
`maxmemory` happens when the script execution starts when Redis is slightly
below `maxmemory` so the first write command in the script is allowed. As the
script executes, subsequent write commands continue to generate memory and
causes the Redis server to go above `maxmemory`.
In those scenarios, it is recommended to configure the `maxmemory-policy` not to
use `noeviction`. Also Lua scripts should be short so that evictions of items
can happen in between Lua scripts.
## Bandwidth and EVALSHA ## Bandwidth and EVALSHA
The `EVAL` command forces you to send the script body again and again. Redis The `EVAL` command forces you to send the script body again and again. Redis
@ -619,13 +640,13 @@ the cause of bugs.
## Using Lua scripting in RESP3 mode ## Using Lua scripting in RESP3 mode
Starting with Redis version 6, the server supports two differnent protocols. One Starting with Redis version 6, the server supports two different protocols. One
is called RESP2, and is the old protocol: all the new connections to the server is called RESP2, and is the old protocol: all the new connections to the server
start in this mode. However clients are able to negotiate the new protocol using start in this mode. However clients are able to negotiate the new protocol using
the `HELLO` command: this way the connection is put in RESP3 mode. In this mode the `HELLO` command: this way the connection is put in RESP3 mode. In this mode
certain commands, like for instance `HGETALL`, reply with a new data type (the certain commands, like for instance `HGETALL`, reply with a new data type (the
Map data type in this specific case). The RESP3 protocol is semantically more Map data type in this specific case). The RESP3 protocol is semantically more
powerful, however most scripts are ok with using just RESP2. powerful, however most scripts are OK with using just RESP2.
The Lua engine always assumes to run in RESP2 mode when talking with Redis, so The Lua engine always assumes to run in RESP2 mode when talking with Redis, so
whatever the connection that is invoking the `EVAL` or `EVALSHA` command is in whatever the connection that is invoking the `EVAL` or `EVALSHA` command is in
@ -669,7 +690,7 @@ At this point the new conversions are available, specifically:
- Lua table with a single `map` field set to a field-value Lua table -> Redis - Lua table with a single `map` field set to a field-value Lua table -> Redis
map reply. map reply.
- Lua table with a single `set` field set to a field-value Lua table -> Redis - Lua table with a single `set` field set to a field-value Lua table -> Redis
set reply, the values are discared and can be anything. set reply, the values are discarded and can be anything.
- Lua table with a single `double` field set to a field-value Lua table -> Redis - Lua table with a single `double` field set to a field-value Lua table -> Redis
double reply. double reply.
- Lua null -> Redis RESP3 new null reply (protocol `"_\r\n"`). - Lua null -> Redis RESP3 new null reply (protocol `"_\r\n"`).

View file

@ -0,0 +1,19 @@
This is a read-only variant of the `EVAL` command that isn't allowed to execute
commands that modify data.
Unlike `EVAL`, scripts executed with this command can always be killed and never
affect the replication stream. Because it can only read data, this command can
always be executed on a master or a replica.
@examples
```
> SET mykey "Hello"
OK
> EVAL_RO "return redis.call('GET', KEYS[1])" 1 mykey
"Hello"
> EVAL_RO "return redis.call('DEL', KEYS[1])" 1 mykey
(error) ERR Error running script (call to f_359f69785f876b7f3f60597d81534f3d6c403284): @user_script:1: @user_script: 1: Write commands are not allowed from read-only scripts
```

View file

@ -0,0 +1,6 @@
This is a read-only variant of the `EVALSHA` command that isn't allowed to
execute commands that modify data.
Unlike `EVALSHA`, scripts executed with this command can always be killed and
never affect the replication stream. Because it can only read data, this command
can always be executed on a master or a replica.

View file

@ -0,0 +1,22 @@
Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which
the given key will expire.
See also the `PEXPIRETIME` command which returns the same information with
milliseconds resolution.
@return
@integer-reply: Expiration Unix timestamp in seconds, or a negative value in
order to signal an error (see the description below).
- The command returns `-1` if the key exists but has no associated expiration
time.
- The command returns `-2` if the key does not exist.
@examples
```cli
SET mykey "Hello"
EXPIREAT mykey 33177117420
EXPIRETIME mykey
```

View file

@ -0,0 +1,84 @@
This command will start a coordinated failover between the
currently-connected-to master and one of its replicas. The failover is not
synchronous, instead a background task will handle coordinating the failover. It
is designed to limit data loss and unavailability of the cluster during the
failover. This command is analogous to the `CLUSTER FAILOVER` command for
non-clustered Redis and is similar to the failover support provided by sentinel.
The specific details of the default failover flow are as follows:
1. The master will internally start a `CLIENT PAUSE WRITE`, which will pause
incoming writes and prevent the accumulation of new data in the replication
stream.
2. The master will monitor its replicas, waiting for a replica to indicate that
it has fully consumed the replication stream. If the master has multiple
replicas, it will only wait for the first replica to catch up.
3. The master will then demote itself to a replica. This is done to prevent any
dual master scenarios. NOTE: The master will not discard its data, so it will
be able to rollback if the replica rejects the failover request in the next
step.
4. The previous master will send a special PSYNC request to the target replica,
`PSYNC FAILOVER`, instructing the target replica to become a master.
5. Once the previous master receives acknowledgement the `PSYNC FAILOVER` was
accepted it will unpause its clients. If the PSYNC request is rejected, the
master will abort the failover and return to normal.
The field `master_failover_state` in `INFO replication` can be used to track the
current state of the failover, which has the following values:
- `no-failover`: There is no ongoing coordinated failover.
- `waiting-for-sync`: The master is waiting for the replica to catch up to its
replication offset.
- `failover-in-progress`: The master has demoted itself, and is attempting to
hand off ownership to a target replica.
If the previous master had additional replicas attached to it, they will
continue replicating from it as chained replicas. You will need to manually
execute a `REPLICAOF` on these replicas to start replicating directly from the
new master.
## Optional arguments
The following optional arguments exist to modify the behavior of the failover
flow:
- `TIMEOUT` _milliseconds_ -- This option allows specifying a maximum time a
master will wait in the `waiting-for-sync` state before aborting the failover
attempt and rolling back. This is intended to set an upper bound on the write
outage the Redis cluster can experience. Failovers typically happen in less
than a second, but could take longer if there is a large amount of write
traffic or the replica is already behind in consuming the replication stream.
If this value is not specified, the timeout can be considered to be
"infinite".
- `TO` _HOST_ _PORT_ -- This option allows designating a specific replica, by
its host and port, to failover to. The master will wait specifically for this
replica to catch up to its replication offset, and then failover to it.
- `FORCE` -- If both the `TIMEOUT` and `TO` options are set, the force flag can
also be used to designate that that once the timeout has elapsed, the master
should failover to the target replica instead of rolling back. This can be
used for a best-effort attempt at a failover without data loss, but limiting
write outage.
NOTE: The master will always rollback if the `PSYNC FAILOVER` request is
rejected by the target replica.
## Failover abort
The failover command is intended to be safe from data loss and corruption, but
can encounter some scenarios it can not automatically remediate from and may get
stuck. For this purpose, the `FAILOVER ABORT` command exists, which will abort
an ongoing failover and return the master to its normal state. The command has
no side effects if issued in the `waiting-for-sync` state but can introduce
multi-master scenarios in the `failover-in-progress` state. If a multi-master
scenario is encountered, you will need to manually identify which master has the
latest data and designate it as the master and have the other replicas.
NOTE: `REPLICAOF` is disabled while a failover is in progress, this is to
prevent unintended interactions with the failover that might cause data loss.
@return
@simple-string-reply: `OK` if the command was accepted and a coordinated
failover is in progress. An error if the operation cannot be executed.

View file

@ -1,19 +1,26 @@
Delete all the keys of all the existing databases, not just the currently Delete all the keys of all the existing databases, not just the currently
selected one. This command never fails. selected one. This command never fails.
The time-complexity for this operation is O(N), N being the number of keys in By default, `FLUSHALL` will synchronously flush all the databases. Starting with
all existing databases. Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to
"yes" changes the default flush mode to asynchronous.
## `FLUSHALL ASYNC` (Redis 4.0.0 or greater) It is possible to use one of the following modifiers to dictate the flushing
mode explicitly:
Redis is now able to delete keys in the background in a different thread without - `ASYNC`: flushes the databases asynchronously
blocking the server. An `ASYNC` option was added to `FLUSHALL` and `FLUSHDB` in - `!SYNC`: flushes the databases synchronously
order to let the entire dataset or a single database to be freed asynchronously.
Asynchronous `FLUSHALL` and `FLUSHDB` commands only delete keys that were Note: an asynchronous `FLUSHALL` command only deletes keys that were present at
present at the time the command was invoked. Keys created during an asynchronous the time the command was invoked. Keys created during an asynchronous flush will
flush will be unaffected. be unaffected.
@return @return
@simple-string-reply @simple-string-reply
@history
- `>= 4.0.0`: Added the `ASYNC` flushing mode modifier.
- `>= 6.2.0`: Added the `!SYNC` flushing mode modifier and the
**lazyfree-lazy-user-flush** configuration directive.

View file

@ -1,11 +1,18 @@
Delete all the keys of the currently selected DB. This command never fails. Delete all the keys of the currently selected DB. This command never fails.
The time-complexity for this operation is O(N), N being the number of keys in By default, `FLUSHDB` will synchronously flush all keys from the database.
the database. Starting with Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration
directive to "yes" changes the default flush mode to asynchronous.
## `FLUSHDB ASYNC` (Redis 4.0.0 or greater) It is possible to use one of the following modifiers to dictate the flushing
mode explicitly:
See `FLUSHALL` for documentation. - `ASYNC`: flushes the database asynchronously
- `!SYNC`: flushes the database synchronously
Note: an asynchronous `FLUSHDB` command only deletes keys that were present at
the time the command was invoked. Keys created during an asynchronous flush will
be unaffected.
@return @return

View file

@ -1,12 +1,13 @@
Adds the specified geospatial items (latitude, longitude, name) to the specified Adds the specified geospatial items (longitude, latitude, name) to the specified
key. Data is stored into the key as a sorted set, in a way that makes it key. Data is stored into the key as a sorted set, in a way that makes it
possible to later retrieve items using a query by radius with the `GEORADIUS` or possible to query the items with the `GEOSEARCH` command.
`GEORADIUSBYMEMBER` commands.
The command takes arguments in the standard format x,y so the longitude must be The command takes arguments in the standard format x,y so the longitude must be
specified before the latitude. There are limits to the coordinates that can be specified before the latitude. There are limits to the coordinates that can be
indexed: areas very near to the poles are not indexable. The exact limits, as indexed: areas very near to the poles are not indexable.
specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following:
The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the
following:
- Valid longitudes are from -180 to 180 degrees. - Valid longitudes are from -180 to 180 degrees.
- Valid latitudes are from -85.05112878 to 85.05112878 degrees. - Valid latitudes are from -85.05112878 to 85.05112878 degrees.
@ -14,37 +15,58 @@ specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the following:
The command will report an error when the user attempts to index coordinates The command will report an error when the user attempts to index coordinates
outside the specified ranges. outside the specified ranges.
**Note:** there is no **GEODEL** command because you can use `ZREM` in order to **Note:** there is no **GEODEL** command because you can use `ZREM` to remove
remove elements. The Geo index structure is just a sorted set. elements. The Geo index structure is just a sorted set.
## GEOADD options
`GEOADD` also provides the following options:
- **XX**: Only update elements that already exist. Never add elements.
- **NX**: Don't update already existing elements. Always add new elements.
- **CH**: Modify the return value from the number of new elements added, to the
total number of elements changed (CH is an abbreviation of _changed_). Changed
elements are **new elements added** and elements already existing for which
**the coordinates was updated**. So elements specified in the command line
having the same score as they had in the past are not counted. Note: normally,
the return value of `GEOADD` only counts the number of new elements added.
Note: The **XX** and **NX** options are mutually exclusive.
## How does it work? ## How does it work?
The way the sorted set is populated is using a technique called The way the sorted set is populated is using a technique called
[Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude bits [Geohash](https://en.wikipedia.org/wiki/Geohash). Latitude and Longitude bits
are interleaved in order to form an unique 52 bit integer. We know that a sorted are interleaved to form a unique 52-bit integer. We know that a sorted set
set double score can represent a 52 bit integer without losing precision. double score can represent a 52-bit integer without losing precision.
This format allows for radius querying by checking the 1+8 areas needed to cover This format allows for bounding box and radius querying by checking the 1+8
the whole radius, and discarding elements outside the radius. The areas are areas needed to cover the whole shape and discarding elements outside it. The
checked by calculating the range of the box covered removing enough bits from areas are checked by calculating the range of the box covered, removing enough
the less significant part of the sorted set score, and computing the score range bits from the less significant part of the sorted set score, and computing the
to query in the sorted set for each area. score range to query in the sorted set for each area.
## What Earth model does it use? ## What Earth model does it use?
It just assumes that the Earth is a sphere, since the used distance formula is The model assumes that the Earth is a sphere since it uses the Haversine formula
the Haversine formula. This formula is only an approximation when applied to the to calculate distance. This formula is only an approximation when applied to the
Earth, which is not a perfect sphere. The introduced errors are not an issue Earth, which is not a perfect sphere. The introduced errors are not an issue
when used in the context of social network sites that need to query by radius when used, for example, by social networks and similar applications requiring
and most other applications. However in the worst case the error may be up to this type of querying. However, in the worst case, the error may be up to 0.5%,
0.5%, so you may want to consider other systems for error-critical applications. so you may want to consider other systems for error-critical applications.
@return @return
@integer-reply, specifically: @integer-reply, specifically:
- The number of elements added to the sorted set, not including elements already - When used without optional arguments, the number of elements added to the
existing for which the score was updated. sorted set (excluding score updates).
- If the `CH` option is specified, the number of elements that were changed
(added or updated).
@history
- `>= 6.2`: Added the `CH`, `NX` and `XX` options.
@examples @examples

View file

@ -2,6 +2,9 @@ Return the members of a sorted set populated with geospatial information using
`GEOADD`, which are within the borders of the area specified with the center `GEOADD`, which are within the borders of the area specified with the center
location and the maximum distance from the center (the radius). location and the maximum distance from the center (the radius).
As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please
prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code.
This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO` This manual page also covers the `GEORADIUS_RO` and `GEORADIUSBYMEMBER_RO`
variants (see the section below for more information). variants (see the section below for more information).
@ -38,11 +41,13 @@ can be invoked using the following two options:
By default all the matching items are returned. It is possible to limit the By default all the matching items are returned. It is possible to limit the
results to the first N matching items by using the **COUNT `<count>`** option. results to the first N matching items by using the **COUNT `<count>`** option.
However note that internally the command needs to perform an effort proportional When `ANY` is provided the command will return as soon as enough matches are
to the number of items matching the specified area, so to query very large areas found, so the results may not be the ones closest to the specified point, but on
with a very small `COUNT` option may be slow even if just a few results are the other hand, the effort invested by the server is significantly lower. When
returned. On the other hand `COUNT` can be a very effective way to reduce `ANY` is not provided, the command will perform an effort that is proportional
bandwidth usage if normally just the first results are used. to the number of items matching the specified area and sort them, so to query
very large areas with a very small `COUNT` option may be slow even if just a few
results are returned.
By default the command returns the items to the client. It is possible to store By default the command returns the items to the client. It is possible to store
the results with one of these options: the results with one of these options:
@ -93,6 +98,10 @@ They are exactly like the original commands but refuse the `STORE` and
Both commands were introduced in Redis 3.2.10 and Redis 4.0.0 respectively. Both commands were introduced in Redis 3.2.10 and Redis 4.0.0 respectively.
@history
- `>= 6.2`: Added the `ANY` option for `COUNT`.
@examples @examples
```cli ```cli

View file

@ -3,6 +3,9 @@ of taking, as the center of the area to query, a longitude and latitude value,
it takes the name of a member already existing inside the geospatial index it takes the name of a member already existing inside the geospatial index
represented by the sorted set. represented by the sorted set.
As per Redis 6.2.0, GEORADIUS command family are considered deprecated. Please
prefer `GEOSEARCH` and `GEOSEARCHSTORE` in new code.
The position of the specified member is used as the center of the query. The position of the specified member is used as the center of the query.
Please check the example below and the `GEORADIUS` documentation for more Please check the example below and the `GEORADIUS` documentation for more

View file

@ -0,0 +1,77 @@
Return the members of a sorted set populated with geospatial information using
`GEOADD`, which are within the borders of the area specified by a given shape.
This command extends the `GEORADIUS` command, so in addition to searching within
circular areas, it supports searching within rectangular areas.
This command should be used in place of the deprecated `GEORADIUS` and
`GEORADIUSBYMEMBER` commands.
The query's center point is provided by one of these mandatory options:
- `FROMMEMBER`: Use the position of the given existing `<member>` in the sorted
set.
- `FROMLONLAT`: Use the given `<longitude>` and `<latitude>` position.
The query's shape is provided by one of these mandatory options:
- `BYRADIUS`: Similar to `GEORADIUS`, search inside circular area according to
given `<radius>`.
- `BYBOX`: Search inside an axis-aligned rectangle, determined by `<height>` and
`<width>`.
The command optionally returns additional information using the following
options:
- `WITHDIST`: Also return the distance of the returned items from the specified
center point. The distance is returned in the same unit as specified for the
radius or height and width arguments.
- `WITHCOORD`: Also return the longitude and latitude of the matching items.
- `WITHHASH`: Also return the raw geohash-encoded sorted set score of the item,
in the form of a 52 bit unsigned integer. This is only useful for low level
hacks or debugging and is otherwise of little interest for the general user.
Matching items are returned unsorted by default. To sort them, use one of the
following two options:
- `ASC`: Sort returned items from the nearest to the farthest, relative to the
center point.
- `DESC`: Sort returned items from the farthest to the nearest, relative to the
center point.
All matching items are returned by default. To limit the results to the first N
matching items, use the **COUNT `<count>`** option. When the `ANY` option is
used, the command returns as soon as enough matches are found. This means that
the results returned may not be the ones closest to the specified point, but the
effort invested by the server to generate them is significantly less. When `ANY`
is not provided, the command will perform an effort that is proportional to the
number of items matching the specified area and sort them, so to query very
large areas with a very small `COUNT` option may be slow even if just a few
results are returned.
@return
@array-reply, specifically:
- Without any `WITH` option specified, the command just returns a linear array
like ["New York","Milan","Paris"].
- If `WITHCOORD`, `WITHDIST` or `WITHHASH` options are specified, the command
returns an array of arrays, where each sub-array represents a single item.
When additional information is returned as an array of arrays for each item, the
first item in the sub-array is always the name of the returned item. The other
information is returned in the following order as successive elements of the
sub-array.
1. The distance from the center as a floating point number, in the same unit
specified in the shape.
2. The geohash integer.
3. The coordinates as a two items x,y array (longitude,latitude).
@examples
```cli
GEOADD Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
GEOADD Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "edge2"
GEOSEARCH Sicily FROMLONLAT 15 37 BYRADIUS 200 km ASC
GEOSEARCH Sicily FROMLONLAT 15 37 BYBOX 400 400 km ASC
```

View file

@ -0,0 +1,11 @@
This command is like `GEOSEARCH`, but stores the result in destination key.
This command comes in place of the now deprecated `GEORADIUS` and
`GEORADIUSBYMEMBER`.
By default, it stores the results in the `destination` sorted set with their
geospatial information.
When using the `STOREDIST` option, the command stores the items in a sorted set
populated with their distance from the center of the circle or box, as a
floating-point number, in the same unit specified for that shape.

View file

@ -0,0 +1,16 @@
Get the value of `key` and delete the key. This command is similar to `GET`,
except for the fact that it also deletes the key on success (if and only if the
key's value type is a string).
@return
@bulk-string-reply: the value of `key`, `nil` when `key` does not exist, or an
error if the key's value type isn't a string.
@examples
```cli
SET mykey "Hello"
GETDEL mykey
GET mykey
```

View file

@ -0,0 +1,28 @@
Get the value of `key` and optionally set its expiration. `GETEX` is similar to
`GET`, but is a write command with additional options.
## Options
The `GETEX` command supports a set of options that modify its behavior:
- `EX` _seconds_ -- Set the specified expire time, in seconds.
- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds.
- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key
will expire, in seconds.
- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the
key will expire, in milliseconds.
- `PERSIST` -- Remove the time to live associated with the key.
@return
@bulk-string-reply: the value of `key`, or `nil` when `key` does not exist.
@examples
```cli
SET mykey "Hello"
GETEX mykey
TTL mykey
GETEX mykey EX 60
TTL mykey
```

View file

@ -1,5 +1,7 @@
Atomically sets `key` to `value` and returns the old value stored at `key`. Atomically sets `key` to `value` and returns the old value stored at `key`.
Returns an error when `key` exists but does not hold a string value. Returns an error when `key` exists but does not hold a string value. Any
previous time to live associated with the key is discarded on successful `SET`
operation.
## Design pattern ## Design pattern
@ -14,6 +16,9 @@ GETSET mycounter "0"
GET mycounter GET mycounter
``` ```
As per Redis 6.2, GETSET is considered deprecated. Please prefer `SET` with
`GET` parameter in new code.
@return @return
@bulk-string-reply: the old value stored at `key`, or `nil` when `key` did not @bulk-string-reply: the old value stored at `key`, or `nil` when `key` did not

View file

@ -1,16 +1,41 @@
Switch the connection to a different protocol. Redis version 6 or greater are Switch to a different protocol, optionally authenticating and setting the
able to support two protocols, the old protocol, RESP2, and a new one introduced connection's name, or provide a contextual client report.
with Redis 6, RESP3. RESP3 has certain advantages since when the connection is
in this mode, Redis is able to reply with more semantical replies: for instance Redis version 6 and above supports two protocols: the old protocol, RESP2, and a
`HGETALL` will return a _map type_, so a client library implementation no longer new one introduced with Redis 6, RESP3. RESP3 has certain advantages since when
requires to know in advance to translate the array into a hash before returning the connection is in this mode, Redis is able to reply with more semantical
it to the caller. For a full coverage of RESP3 please replies: for instance, `HGETALL` will return a _map type_, so a client library
implementation no longer requires to know in advance to translate the array into
a hash before returning it to the caller. For a full coverage of RESP3, please
[check this repository](https://github.com/antirez/resp3). [check this repository](https://github.com/antirez/resp3).
Redis 6 connections starts in RESP2 mode, so clients implementing RESP2 do not In Redis 6 connections start in RESP2 mode, so clients implementing RESP2 do not
need to change (nor there are short term plans to drop support for RESP2). need to updated or changed. There are no short term plans to drop support for
Clients that want to handshake the RESP3 mode need to call the `HELLO` command, RESP2, although future version may default to RESP3.
using "3" as first argument.
`HELLO` always replies with a list of current server and connection properties,
such as: versions, modules loaded, client ID, replication role and so forth.
When called without any arguments in Redis 6.2 and its default use of RESP2
protocol, the reply looks like this:
> HELLO
1) "server"
2) "redis"
3) "version"
4) "255.255.255"
5) "proto"
6) (integer) 2
7) "id"
8) (integer) 5
9) "mode"
10) "standalone"
11) "role"
12) "master"
13) "modules"
14) (empty array)
Clients that want to handshake using the RESP3 mode need to call the `HELLO`
command and specify the value "3" as the `protover` argument, like so:
> HELLO 3 > HELLO 3
1# "server" => "redis" 1# "server" => "redis"
@ -21,26 +46,28 @@ using "3" as first argument.
6# "role" => "master" 6# "role" => "master"
7# "modules" => (empty array) 7# "modules" => (empty array)
The `HELLO` command has a useful reply that will state a number of facts about Because `HELLO` replies with useful information, and given that `protover` is
the server: the exact version, the set of modules loaded, the client ID, the optional or can be set to "2", client library authors may consider using this
replication role and so forth. Because of that, and given that the `HELLO` command instead of the canonical `PING` when setting up the connection.
command also works with "2" as argument, both in order to downgrade the protocol
back to version 2, or just to get the reply from the server without switching
the protocol, client library authors may consider using this command instead of
the canonical `PING` when setting up the connection.
This command accepts two non mandatory options: When called with the optional `protover` argument, this command switches the
protocol to the specified version and also accepts the following options:
- `AUTH <username> <password>`: directly authenticate the connection other than - `AUTH <username> <password>`: directly authenticate the connection in addition
switching to the specified protocol. In this way there is no need to call to switching to the specified protocol version. This makes calling `AUTH`
`AUTH` before `HELLO` when setting up new connections. Note that the username before `HELLO` unnecessary when setting up a new connection. Note that the
can be set to "default" in order to authenticate against a server that does `username` can be set to "default" to authenticate against a server that does
not use ACLs, but the simpler `requirepass` mechanism of Redis before not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to
version 6. version 6.
- `SETNAME <clientname>`: this is equivalent to also call `CLIENT SETNAME`. - `SETNAME <clientname>`: this is the equivalent of calling `CLIENT SETNAME`.
@return @return
@array-reply: a list of server properties. The reply is a map instead of an @array-reply: a list of server properties. The reply is a map instead of an
array when RESP3 is selected. The command returns an error if the protocol array when RESP3 is selected. The command returns an error if the `protover`
requested does not exist. requested does not exist.
@history
- `>= 6.2`: `protover` made optional; when called without arguments the command
reports the current connection's context.

View file

@ -2,7 +2,7 @@ Sets the specified fields to their respective values in the hash stored at
`key`. This command overwrites any specified fields already existing in the `key`. This command overwrites any specified fields already existing in the
hash. If `key` does not exist, a new key holding a hash is created. hash. If `key` does not exist, a new key holding a hash is created.
As per Redis 4.0.0, HMSET is considered deprecated. Please use `HSET` in new As per Redis 4.0.0, HMSET is considered deprecated. Please prefer `HSET` in new
code. code.
@return @return

View file

@ -0,0 +1,50 @@
When called with just the `key` argument, return a random field from the hash
value stored at `key`.
If the provided `count` argument is positive, return an array of **distinct
fields**. The array's length is either `count` or the hash's number of fields
(`HLEN`), whichever is lower.
If called with a negative `count`, the behavior changes and the command is
allowed to return the **same field multiple times**. In this case, the number of
returned fields is the absolute value of the specified `count`.
The optional `WITHVALUES` modifier changes the reply so it includes the
respective values of the randomly selected hash fields.
@return
@bulk-string-reply: without the additional `count` argument, the command returns
a Bulk Reply with the randomly selected field, or `nil` when `key` does not
exist.
@array-reply: when the additional `count` argument is passed, the command
returns an array of fields, or an empty array when `key` does not exist. If the
`WITHVALUES` modifier is used, the reply is a list fields and their values from
the hash.
@examples
```cli
HMSET coin heads obverse tails reverse edge null
HRANDFIELD coin
HRANDFIELD coin
HRANDFIELD coin -5 WITHVALUES
```
## Specification of the behavior when count is passed
When the `count` argument is a positive value this command behaves as follows:
- No repeated fields are returned.
- If `count` is bigger than the number of fields in the hash, the command will
only return the whole hash without additional fields.
- The order of fields in the reply is not truly random, so it is up to the
client to shuffle them if needed.
When the `count` is a negative value, the behavior changes as follows:
- Repeating fields are possible.
- Exactly `count` fields, or an empty array if the hash is empty (non-existing
key), are always returned.
- The order of fields in the reply is truly random.

View file

@ -65,14 +65,14 @@ The more simple and direct implementation of this pattern is the following:
FUNCTION LIMIT_API_CALL(ip) FUNCTION LIMIT_API_CALL(ip)
ts = CURRENT_UNIX_TIME() ts = CURRENT_UNIX_TIME()
keyname = ip+":"+ts keyname = ip+":"+ts
current = GET(keyname)
IF current != NULL AND current > 10 THEN
ERROR "too many requests per second"
ELSE
MULTI MULTI
INCR(keyname,1) INCR(keyname)
EXPIRE(keyname,10) EXPIRE(keyname,10)
EXEC EXEC
current = RESPONSE_OF_INCR_WITHIN_MULTI
IF current > 10 THEN
ERROR "too many requests per second"
ELSE
PERFORM_API_CALL() PERFORM_API_CALL()
END END
``` ```
@ -119,7 +119,7 @@ script that is send using the `EVAL` command (only available since Redis version
``` ```
local current local current
current = redis.call("incr",KEYS[1]) current = redis.call("incr",KEYS[1])
if tonumber(current) == 1 then if current == 1 then
redis.call("expire",KEYS[1],1) redis.call("expire",KEYS[1],1)
end end
``` ```

View file

@ -15,6 +15,7 @@ The optional parameter can be used to select a specific section of information:
- `modules`: Modules section - `modules`: Modules section
- `keyspace`: Database related statistics - `keyspace`: Database related statistics
- `modules`: Module related sections - `modules`: Module related sections
- `errorstats`: Redis error statistics
It can also take the following values: It can also take the following values:
@ -60,6 +61,7 @@ Here is the meaning of all fields in the **server** section:
- `run_id`: Random value identifying the Redis server (to be used by Sentinel - `run_id`: Random value identifying the Redis server (to be used by Sentinel
and Cluster) and Cluster)
- `tcp_port`: TCP/IP listen port - `tcp_port`: TCP/IP listen port
- `server_time_in_usec`: Epoch-based system time with microsecond precision
- `uptime_in_seconds`: Number of seconds since Redis server start - `uptime_in_seconds`: Number of seconds since Redis server start
- `uptime_in_days`: Same value expressed in days - `uptime_in_days`: Same value expressed in days
- `hz`: The server's current frequency setting - `hz`: The server's current frequency setting
@ -72,14 +74,20 @@ Here is the meaning of all fields in the **clients** section:
- `connected_clients`: Number of client connections (excluding connections from - `connected_clients`: Number of client connections (excluding connections from
replicas) replicas)
- `cluster_connections`: An approximation of the number of sockets used by the
cluster's bus
- `maxclients`: The value of the `maxclients` configuration directive. This is
the upper limit for the sum of `connected_clients`, `connected_slaves` and
`cluster_connections`.
- `client_longest_output_list`: Longest output list among current client - `client_longest_output_list`: Longest output list among current client
connections connections
- `client_biggest_input_buf`: Biggest input buffer among current client - `client_biggest_input_buf`: Biggest input buffer among current client
connections connections
- `blocked_clients`: Number of clients pending on a blocking call (`BLPOP`, - `blocked_clients`: Number of clients pending on a blocking call (`BLPOP`,
`BRPOP`, `BRPOPLPUSH`, `BZPOPMIN`, `BZPOPMAX`) `BRPOP`, `BRPOPLPUSH`, `BLMOVE`, `BZPOPMIN`, `BZPOPMAX`)
- `tracking_clients`: Number of clients being tracked (`CLIENT TRACKING`) - `tracking_clients`: Number of clients being tracked (`CLIENT TRACKING`)
- `clients_in_timeout_table`: Number of clients in the clients timeout table - `clients_in_timeout_table`: Number of clients in the clients timeout table
- `io_threads_active`: Flag indicating if I/O threads are active
Here is the meaning of all fields in the **memory** section: Here is the meaning of all fields in the **memory** section:
@ -143,6 +151,15 @@ by referring to the `MEMORY STATS` command and the `MEMORY DOCTOR`.
Here is the meaning of all fields in the **persistence** section: Here is the meaning of all fields in the **persistence** section:
- `loading`: Flag indicating if the load of a dump file is on-going - `loading`: Flag indicating if the load of a dump file is on-going
- `current_cow_size`: The size in bytes of copy-on-write memory while a child
fork is running
- `current_fork_perc`: The percentage of progress of the current fork process.
For AOF and RDB forks it is the percentage of `current_save_keys_processed`
out of `current_save_keys_total`.
- `current_save_keys_processed`: Number of keys processed by the current save
operation
- `current_save_keys_total`: Number of keys at the beginning of the current save
operation
- `rdb_changes_since_last_save`: Number of changes since the last dump - `rdb_changes_since_last_save`: Number of changes since the last dump
- `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going - `rdb_bgsave_in_progress`: Flag indicating a RDB save is on-going
- `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save - `rdb_last_save_time`: Epoch-based timestamp of last successful RDB save
@ -150,8 +167,8 @@ Here is the meaning of all fields in the **persistence** section:
- `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in seconds - `rdb_last_bgsave_time_sec`: Duration of the last RDB save operation in seconds
- `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if - `rdb_current_bgsave_time_sec`: Duration of the on-going RDB save operation if
any any
- `rdb_last_cow_size`: The size in bytes of copy-on-write allocations during the - `rdb_last_cow_size`: The size in bytes of copy-on-write memory during the last
last RDB save operation RDB save operation
- `aof_enabled`: Flag indicating AOF logging is activated - `aof_enabled`: Flag indicating AOF logging is activated
- `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is on-going - `aof_rewrite_in_progress`: Flag indicating a AOF rewrite operation is on-going
- `aof_rewrite_scheduled`: Flag indicating an AOF rewrite operation will be - `aof_rewrite_scheduled`: Flag indicating an AOF rewrite operation will be
@ -162,11 +179,11 @@ Here is the meaning of all fields in the **persistence** section:
if any if any
- `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation - `aof_last_bgrewrite_status`: Status of the last AOF rewrite operation
- `aof_last_write_status`: Status of the last write operation to the AOF - `aof_last_write_status`: Status of the last write operation to the AOF
- `aof_last_cow_size`: The size in bytes of copy-on-write allocations during the - `aof_last_cow_size`: The size in bytes of copy-on-write memory during the last
last AOF rewrite operation AOF rewrite operation
- `module_fork_in_progress`: Flag indicating a module fork is on-going - `module_fork_in_progress`: Flag indicating a module fork is on-going
- `module_fork_last_cow_size`: The size in bytes of copy-on-write allocations - `module_fork_last_cow_size`: The size in bytes of copy-on-write memory during
during the last module fork operation the last module fork operation
`rdb_changes_since_last_save` refers to the number of operations that produced `rdb_changes_since_last_save` refers to the number of operations that produced
some kind of changes in the dataset since the last time either `SAVE` or some kind of changes in the dataset since the last time either `SAVE` or
@ -187,6 +204,8 @@ If a load operation is on-going, these additional fields will be added:
- `loading_start_time`: Epoch-based timestamp of the start of the load operation - `loading_start_time`: Epoch-based timestamp of the start of the load operation
- `loading_total_bytes`: Total file size - `loading_total_bytes`: Total file size
- `loading_rdb_used_mem`: The memory usage of the server that had generated the
RDB file at the time of the file's creation
- `loading_loaded_bytes`: Number of bytes already loaded - `loading_loaded_bytes`: Number of bytes already loaded
- `loading_loaded_perc`: Same value expressed as a percentage - `loading_loaded_perc`: Same value expressed as a percentage
- `loading_eta_seconds`: ETA in seconds for the load to be complete - `loading_eta_seconds`: ETA in seconds for the load to be complete
@ -218,6 +237,7 @@ Here is the meaning of all fields in the **stats** section:
- `pubsub_channels`: Global number of pub/sub channels with client subscriptions - `pubsub_channels`: Global number of pub/sub channels with client subscriptions
- `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions - `pubsub_patterns`: Global number of pub/sub pattern with client subscriptions
- `latest_fork_usec`: Duration of the latest fork operation in microseconds - `latest_fork_usec`: Duration of the latest fork operation in microseconds
- `total_forks`: Total number of fork operations since the server start
- `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes - `migrate_cached_sockets`: The number of sockets open for `MIGRATE` purposes
- `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes - `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes
(applicable only to writable replicas) (applicable only to writable replicas)
@ -235,12 +255,22 @@ Here is the meaning of all fields in the **stats** section:
(only applicable for broadcast mode) (only applicable for broadcast mode)
- `unexpected_error_replies`: Number of unexpected error replies, that are types - `unexpected_error_replies`: Number of unexpected error replies, that are types
of errors from an AOF load or replication of errors from an AOF load or replication
- `total_error_replies`: Total number of issued error replies, that is the sum
of rejected commands (errors prior command execution) and failed commands
(errors within the command execution)
- `total_reads_processed`: Total number of read events processed
- `total_writes_processed`: Total number of write events processed
- `io_threaded_reads_processed`: Number of read events processed by the main and
I/O threads
- `io_threaded_writes_processed`: Number of write events processed by the main
and I/O threads
Here is the meaning of all fields in the **replication** section: Here is the meaning of all fields in the **replication** section:
- `role`: Value is "master" if the instance is replica of no one, or "slave" if - `role`: Value is "master" if the instance is replica of no one, or "slave" if
the instance is a replica of some master instance. Note that a replica can be the instance is a replica of some master instance. Note that a replica can be
master of another replica (chained replication). master of another replica (chained replication).
- `master_failover_state`: The state of an ongoing failover, if any.
- `master_replid`: The replication ID of the Redis server. - `master_replid`: The replication ID of the Redis server.
- `master_replid2`: The secondary replication ID, used for PSYNC after a - `master_replid2`: The secondary replication ID, used for PSYNC after a
failover. failover.
@ -267,7 +297,15 @@ If the instance is a replica, these additional fields are provided:
If a SYNC operation is on-going, these additional fields are provided: If a SYNC operation is on-going, these additional fields are provided:
- `master_sync_left_bytes`: Number of bytes left before syncing is complete - `master_sync_total_bytes`: Total number of bytes that need to be transferred.
this may be 0 when the size is unknown (for example, when the
`repl-diskless-sync` configuration directive is used)
- `master_sync_read_bytes`: Number of bytes already transferred
- `master_sync_left_bytes`: Number of bytes left before syncing is complete (may
be negative when `master_sync_total_bytes` is 0)
- `master_sync_perc`: The percentage `master_sync_read_bytes` from
`master_sync_total_bytes`, or an approximation that uses
`loading_rdb_used_mem` when `master_sync_total_bytes` is 0
- `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O - `master_sync_last_io_seconds_ago`: Number of seconds since last transfer I/O
during a SYNC operation during a SYNC operation
@ -291,18 +329,36 @@ For each replica, the following line is added:
Here is the meaning of all fields in the **cpu** section: Here is the meaning of all fields in the **cpu** section:
- `used_cpu_sys`: System CPU consumed by the Redis server - `used_cpu_sys`: System CPU consumed by the Redis server, which is the sum of
- `used_cpu_user`:User CPU consumed by the Redis server system CPU consumed by all threads of the server process (main thread and
background threads)
- `used_cpu_user`: User CPU consumed by the Redis server, which is the sum of
user CPU consumed by all threads of the server process (main thread and
background threads)
- `used_cpu_sys_children`: System CPU consumed by the background processes - `used_cpu_sys_children`: System CPU consumed by the background processes
- `used_cpu_user_children`: User CPU consumed by the background processes - `used_cpu_user_children`: User CPU consumed by the background processes
- `used_cpu_sys_main_thread`: System CPU consumed by the Redis server main
thread
- `used_cpu_user_main_thread`: User CPU consumed by the Redis server main thread
The **commandstats** section provides statistics based on the command type, The **commandstats** section provides statistics based on the command type,
including the number of calls, the total CPU time consumed by these commands, including the number of calls that reached command execution (not rejected), the
and the average CPU consumed per command execution. total CPU time consumed by these commands, the average CPU consumed per command
execution, the number of rejected calls (errors prior command execution), and
the number of failed calls (errors within the command execution).
For each command type, the following line is added: For each command type, the following line is added:
- `cmdstat_XXX`: `calls=XXX,usec=XXX,usec_per_call=XXX` - `cmdstat_XXX`:
`calls=XXX,usec=XXX,usec_per_call=XXX,rejected_calls=XXX,failed_calls=XXX`
The **errorstats** section enables keeping track of the different errors that
occurred within Redis, based upon the reply error prefix ( The first word after
the "-", up to the first space. Example: `ERR` ).
For each error type, the following line is added:
- `errorstat_XXX`: `count=XXX`
The **cluster** section currently only contains a unique field: The **cluster** section currently only contains a unique field:

View file

@ -31,6 +31,6 @@ For more information refer to the [Latency Monitoring Framework page][lm].
[lm]: /topics/latency-monitor [lm]: /topics/latency-monitor
@reply @return
@integer-reply: the number of event time series that were reset. @integer-reply: the number of event time series that were reset.

View file

@ -0,0 +1,77 @@
Atomically returns and removes the first/last element (head/tail depending on
the `wherefrom` argument) of the list stored at `source`, and pushes the element
at the first/last element (head/tail depending on the `whereto` argument) of the
list stored at `destination`.
For example: consider `source` holding the list `a,b,c`, and `destination`
holding the list `x,y,z`. Executing `LMOVE source destination RIGHT LEFT`
results in `source` holding `a,b` and `destination` holding `c,x,y,z`.
If `source` does not exist, the value `nil` is returned and no operation is
performed. If `source` and `destination` are the same, the operation is
equivalent to removing the first/last element from the list and pushing it as
first/last element of the list, so it can be considered as a list rotation
command (or a no-op if `wherefrom` is the same as `whereto`).
This command comes in place of the now deprecated `RPOPLPUSH`. Doing
`LMOVE RIGHT LEFT` is equivalent.
@return
@bulk-string-reply: the element being popped and pushed.
@examples
```cli
RPUSH mylist "one"
RPUSH mylist "two"
RPUSH mylist "three"
LMOVE mylist myotherlist RIGHT LEFT
LMOVE mylist myotherlist LEFT RIGHT
LRANGE mylist 0 -1
LRANGE myotherlist 0 -1
```
## Pattern: Reliable queue
Redis is often used as a messaging server to implement processing of background
jobs or other kinds of messaging tasks. A simple form of queue is often obtained
pushing values into a list in the producer side, and waiting for this values in
the consumer side using `RPOP` (using polling), or `BRPOP` if the client is
better served by a blocking operation.
However in this context the obtained queue is not _reliable_ as messages can be
lost, for example in the case there is a network problem or if the consumer
crashes just after the message is received but it is still to process.
`LMOVE` (or `BLMOVE` for the blocking variant) offers a way to avoid this
problem: the consumer fetches the message and at the same time pushes it into a
_processing_ list. It will use the `LREM` command in order to remove the message
from the _processing_ list once the message has been processed.
An additional client may monitor the _processing_ list for items that remain
there for too much time, and will push those timed out items into the queue
again if needed.
## Pattern: Circular list
Using `LMOVE` with the same source and destination key, a client can visit all
the elements of an N-elements list, one after the other, in O(N) without
transferring the full list from the server to the client using a single `LRANGE`
operation.
The above pattern works even if the following two conditions:
- There are multiple clients rotating the list: they'll fetch different
elements, until all the elements of the list are visited, and the process
restarts.
- Even if other clients are actively pushing new items at the end of the list.
The above makes it very simple to implement a system where a set of items must
be processed by N workers continuously as fast as possible. An example is a
monitoring system that must check that a set of web sites are reachable, with
the smallest delay possible, using a number of parallel workers.
Note that this implementation of workers is trivially scalable and reliable,
because even if a message is lost the item is still in the queue and will be
processed at the next iteration.

View file

@ -1,16 +1,29 @@
Removes and returns the first element of the list stored at `key`. Removes and returns the first elements of the list stored at `key`.
By default, the command pops a single element from the beginning of the list.
When provided with the optional `count` argument, the reply will consist of up
to `count` elements, depending on the list's length.
@return @return
When called without the `count` argument:
@bulk-string-reply: the value of the first element, or `nil` when `key` does not @bulk-string-reply: the value of the first element, or `nil` when `key` does not
exist. exist.
When called with the `count` argument:
@array-reply: list of popped elements, or `nil` when `key` does not exist.
@history
- `>= 6.2`: Added the `count` argument.
@examples @examples
```cli ```cli
RPUSH mylist "one" RPUSH mylist "one" "two" "three" "four" "five"
RPUSH mylist "two"
RPUSH mylist "three"
LPOP mylist LPOP mylist
LPOP mylist 2
LRANGE mylist 0 -1 LRANGE mylist 0 -1
``` ```

View file

@ -2,7 +2,7 @@ The command returns the index of matching elements inside a Redis list. By
default, when no options are given, it will scan the list from head to tail, default, when no options are given, it will scan the list from head to tail,
looking for the first match of "element". If the element is found, its index looking for the first match of "element". If the element is found, its index
(the zero-based position in the list) is returned. Otherwise, if no match is (the zero-based position in the list) is returned. Otherwise, if no match is
found, NULL is returned. found, `nil` is returned.
``` ```
> RPUSH mylist a b c 1 2 3 c c > RPUSH mylist a b c 1 2 3 c c
@ -64,12 +64,12 @@ indexes. This is better than giving a very large `COUNT` option because it is
more general. more general.
``` ```
> LPOS mylist COUNT 0 > LPOS mylist c COUNT 0
[2,6,7] [2,6,7]
``` ```
When `COUNT` is used and no match is found, an empty array is returned. However When `COUNT` is used and no match is found, an empty array is returned. However
when `COUNT` is not used and there are no matches, the command returns NULL. when `COUNT` is not used and there are no matches, the command returns `nil`.
Finally, the `MAXLEN` option tells the command to compare the provided element Finally, the `MAXLEN` option tells the command to compare the provided element
only with a given maximum number of list items. So for instance specifying only with a given maximum number of list items. So for instance specifying
@ -80,9 +80,13 @@ useful to limit the maximum complexity of the command. It is also useful when we
expect the match to be found very early, but want to be sure that in case this expect the match to be found very early, but want to be sure that in case this
is not true, the command does not take too much time to run. is not true, the command does not take too much time to run.
When `MAXLEN` is used, it is possible to specify 0 as the maximum number of
comparisons, as a way to tell the command we want unlimited comparisons. This is
better than giving a very large `MAXLEN` option because it is more general.
@return @return
The command returns the integer representing the matching element, or null if The command returns the integer representing the matching element, or `nil` if
there is no match. However, if the `COUNT` option is given the command returns there is no match. However, if the `COUNT` option is given the command returns
an array (empty if there are no matches). an array (empty if there are no matches).

View file

@ -37,4 +37,4 @@ OK
@return @return
@integer-reply: the memory usage in bytes @integer-reply: the memory usage in bytes, or `nil` when the key does not exist.

View file

@ -68,9 +68,12 @@ a single key exists.
- `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or - `AUTH2` -- Authenticate with the given username and password pair (Redis 6 or
greater ACL auth style). greater ACL auth style).
`COPY` and `REPLACE` are available only in 3.0 and above. `KEYS` is available @history
starting with Redis 3.0.6. `AUTH` is available starting with Redis 4.0.7.
`AUTH2` is available starting with Redis 6.0.0. - `>= 3.0.0`: Added the `COPY` and `REPLACE` options.
- `>= 3.0.6`: Added the `KEYS` option.
- `>= 4.0.7`: Added the `AUTH` option.
- `>= 6.0.0`: Added the `AUTH2` option.
@return @return

View file

@ -5,7 +5,7 @@ specified by the `path` argument. The `path` should be the absolute path of the
library, including the full filename. Any additional arguments are passed library, including the full filename. Any additional arguments are passed
unmodified to the module. unmodified to the module.
**Note**: modules can also be loaded at server startup with 'loadmodule' **Note**: modules can also be loaded at server startup with `loadmodule`
configuration directive in `redis.conf`. configuration directive in `redis.conf`.
@return @return

View file

@ -36,8 +36,8 @@ QUIT
Connection closed by foreign host. Connection closed by foreign host.
``` ```
Manually issue the `QUIT` command to stop a `MONITOR` stream running via Manually issue the `QUIT` or `RESET` commands to stop a `MONITOR` stream running
`telnet`. via `telnet`.
## Commands not logged by MONITOR ## Commands not logged by MONITOR
@ -90,4 +90,5 @@ flow.
@history @history
- `>= 6.2`: `RESET` can be called to exit monitor mode.
- `>= 6.0`: `AUTH` excluded from the command's output. - `>= 6.0`: `AUTH` excluded from the command's output.

View file

@ -0,0 +1,19 @@
`PEXPIRETIME` has the same semantic as `EXPIRETIME`, but returns the absolute
Unix expiration timestamp in milliseconds instead of seconds.
@return
@integer-reply: Expiration Unix timestamp in milliseconds, or a negative value
in order to signal an error (see the description below).
- The command returns `-1` if the key exists but has no associated expiration
time.
- The command returns `-2` if the key does not exist.
@examples
```cli
SET mykey "Hello"
PEXPIREAT mykey 33177117420000
PEXPIRETIME mykey
```

View file

@ -1,5 +1,11 @@
Posts a message to the given channel. Posts a message to the given channel.
In a Redis Cluster clients can publish to every node. The cluster makes sure
that published messages are forwarded as needed, so clients can subscribe to any
channel by connecting to any one of the nodes.
@return @return
@integer-reply: the number of clients that received the message. @integer-reply: the number of clients that received the message. Note that in a
Redis Cluster, only clients that are connected to the same node as the
publishing client are included in the count.

View file

@ -4,6 +4,12 @@ separately. The general form is:
PUBSUB <subcommand> ... args ... PUBSUB <subcommand> ... args ...
Cluster note: in a Redis Cluster clients can subscribe to every node, and can
also publish to every other node. The cluster will make sure that published
messages are forwarded as needed. That said, `PUBSUB`'s replies in a cluster
only report information from the node's Pub/Sub context, rather than the entire
cluster.
# PUBSUB CHANNELS [pattern] # PUBSUB CHANNELS [pattern]
Lists the currently _active channels_. An active channel is a Pub/Sub channel Lists the currently _active channels_. An active channel is a Pub/Sub channel

View file

@ -0,0 +1,23 @@
This command performs a full reset of the connection's server-side context,
mimicking the effect of disconnecting and reconnecting again.
When the command is called from a regular client connection, it does the
following:
- Discards the current `MULTI` transaction block, if one exists.
- Unwatches all keys `WATCH`ed by the connection.
- Disables `CLIENT TRACKING`, if in use.
- Sets the connection to `READWRITE` mode.
- Cancels the connection's `ASKING` mode, if previously set.
- Sets `CLIENT REPLY` to `ON`.
- Sets the protocol version to RESP2.
- `SELECT`s database 0.
- Exits `MONITOR` mode, when applicable.
- Aborts Pub/Sub's subscription state (`SUBSCRIBE` and `PSUBSCRIBE`), when
appropriate.
- Deauthenticates the connection, requiring a call `AUTH` to reauthenticate when
authentication is enabled.
@return
@simple-string-reply: always 'RESET'.

View file

@ -1,16 +1,29 @@
Removes and returns the last element of the list stored at `key`. Removes and returns the last elements of the list stored at `key`.
By default, the command pops a single element from the end of the list. When
provided with the optional `count` argument, the reply will consist of up to
`count` elements, depending on the list's length.
@return @return
When called without the `count` argument:
@bulk-string-reply: the value of the last element, or `nil` when `key` does not @bulk-string-reply: the value of the last element, or `nil` when `key` does not
exist. exist.
When called with the `count` argument:
@array-reply: list of popped elements, or `nil` when `key` does not exist.
@history
- `>= 6.2`: Added the `count` argument.
@examples @examples
```cli ```cli
RPUSH mylist "one" RPUSH mylist "one" "two" "three" "four" "five"
RPUSH mylist "two"
RPUSH mylist "three"
RPOP mylist RPOP mylist
RPOP mylist 2
LRANGE mylist 0 -1 LRANGE mylist 0 -1
``` ```

View file

@ -11,6 +11,9 @@ performed. If `source` and `destination` are the same, the operation is
equivalent to removing the last element from the list and pushing it as first equivalent to removing the last element from the list and pushing it as first
element of the list, so it can be considered as a list rotation command. element of the list, so it can be considered as a list rotation command.
As per Redis 6.2.0, RPOPLPUSH is considered deprecated. Please prefer `LMOVE` in
new code.
@return @return
@bulk-string-reply: the element being popped and pushed. @bulk-string-reply: the element being popped and pushed.

View file

@ -7,7 +7,7 @@ An error is returned when the value stored at `key` is not a set.
@return @return
@integer-reply: the number of elements that were added to the set, not including @integer-reply: the number of elements that were added to the set, not including
all the elements already present into the set. all the elements already present in the set.
@history @history

View file

@ -246,7 +246,7 @@ may receive no elements in many iterations.
It is possible for an infinite number of clients to iterate the same collection It is possible for an infinite number of clients to iterate the same collection
at the same time, as the full state of the iterator is in the cursor, that is at the same time, as the full state of the iterator is in the cursor, that is
obtained and returned to the client at every call. Server side no state is taken obtained and returned to the client at every call. No server side state is taken
at all. at all.
## Terminating iterations in the middle ## Terminating iterations in the middle

View file

@ -17,8 +17,8 @@ is active and retains all changes to the data set once it ends.
- `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are - `YES`. Enable non-blocking asynchronous debugging of Lua scripts (changes are
discarded). discarded).
- `SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to - `!SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes
data). to data).
- `NO`. Disables scripts debug mode. - `NO`. Disables scripts debug mode.
@return @return

View file

@ -3,6 +3,21 @@ Flush the Lua scripts cache.
Please refer to the `EVAL` documentation for detailed information about Redis Please refer to the `EVAL` documentation for detailed information about Redis
Lua scripting. Lua scripting.
By default, `SCRIPT FLUSH` will synchronously flush the cache. Starting with
Redis 6.2, setting the **lazyfree-lazy-user-flush** configuration directive to
"yes" changes the default flush mode to asynchronous.
It is possible to use one of the following modifiers to dictate the flushing
mode explicitly:
- `ASYNC`: flushes the cache asynchronously
- `!SYNC`: flushes the cache synchronously
@return @return
@simple-string-reply @simple-string-reply
@history
- `>= 6.2.0`: Added the `ASYNC` and `!SYNC` flushing mode modifiers, as well as
the **lazyfree-lazy-user-flush** configuration directive.

View file

@ -8,24 +8,41 @@ The `SET` command supports a set of options that modify its behavior:
- `EX` _seconds_ -- Set the specified expire time, in seconds. - `EX` _seconds_ -- Set the specified expire time, in seconds.
- `PX` _milliseconds_ -- Set the specified expire time, in milliseconds. - `PX` _milliseconds_ -- Set the specified expire time, in milliseconds.
- `EXAT` _timestamp-seconds_ -- Set the specified Unix time at which the key
will expire, in seconds.
- `PXAT` _timestamp-milliseconds_ -- Set the specified Unix time at which the
key will expire, in milliseconds.
- `NX` -- Only set the key if it does not already exist. - `NX` -- Only set the key if it does not already exist.
- `XX` -- Only set the key if it already exist. - `XX` -- Only set the key if it already exist.
- `KEEPTTL` -- Retain the time to live associated with the key. - `KEEPTTL` -- Retain the time to live associated with the key.
- `GET` -- Return the old string stored at key, or nil if key did not exist. An
error is returned and `SET` aborted if the value stored at key is not a
string.
Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`, it Note: Since the `SET` command options can replace `SETNX`, `SETEX`, `PSETEX`,
is possible that in future versions of Redis these three commands will be `GETSET`, it is possible that in future versions of Redis these commands will be
deprecated and finally removed. deprecated and finally removed.
@return @return
@simple-string-reply: `OK` if `SET` was executed correctly. @nil-reply: a Null @simple-string-reply: `OK` if `SET` was executed correctly.
Bulk Reply is returned if the `SET` operation was not performed because the user
@nil-reply: `(nil)` if the `SET` operation was not performed because the user
specified the `NX` or `XX` option but the condition was not met. specified the `NX` or `XX` option but the condition was not met.
If the command is issued with the `GET` option, the above does not apply. It
will instead reply as follows, regardless if the `SET` was actually performed:
@bulk-string-reply: the old string value stored at key.
@nil-reply: `(nil)` if the key did not exist.
@history @history
- `>= 2.6.12`: Added the `EX`, `PX`, `NX` and `XX` options. - `>= 2.6.12`: Added the `EX`, `PX`, `NX` and `XX` options.
- `>= 6.0`: Added the `KEEPTTL` option. - `>= 6.0`: Added the `KEEPTTL` option.
- `>= 6.2`: Added the `GET`, `EXAT` and `PXAT` option.
- `>= 7.0`: Allowed the `NX` and `GET` options to be used together.
@examples @examples
@ -39,7 +56,7 @@ SET anotherkey "will expire in a minute" EX 60
## Patterns ## Patterns
**Note:** The following pattern is discouraged in favor of **Note:** The following pattern is discouraged in favor of
[the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit
more complex to implement, but offers better guarantees and is fault tolerant. more complex to implement, but offers better guarantees and is fault tolerant.
The command `SET resource-name anystring NX EX max-lock-time` is a simple way to The command `SET resource-name anystring NX EX max-lock-time` is a simple way to

View file

@ -1,7 +1,8 @@
Sets or clears the bit at _offset_ in the string value stored at _key_. Sets or clears the bit at _offset_ in the string value stored at _key_.
The bit is either set or cleared depending on _value_, which can be either 0 The bit is either set or cleared depending on _value_, which can be either 0 or
or 1.
1.
When _key_ does not exist, a new string value is created. The string is grown to When _key_ does not exist, a new string value is created. The string is grown to
make sure it can hold a bit at _offset_. The _offset_ argument is required to be make sure it can hold a bit at _offset_. The _offset_ argument is required to be

View file

@ -22,7 +22,7 @@ GET mykey
**Please note that:** **Please note that:**
1. The following pattern is discouraged in favor of 1. The following pattern is discouraged in favor of
[the Redlock algorithm](http://redis.io/topics/distlock) which is only a bit [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit
more complex to implement, but offers better guarantees and is fault more complex to implement, but offers better guarantees and is fault
tolerant. tolerant.
2. We document the old pattern anyway because certain existing implementations 2. We document the old pattern anyway because certain existing implementations

View file

@ -29,6 +29,8 @@ _slowlog-log-slower-than_ config parameter to zero) with minor performance hit.
To read the slow log the **SLOWLOG GET** command is used, that returns every To read the slow log the **SLOWLOG GET** command is used, that returns every
entry in the slow log. It is possible to return only the N most recent entries entry in the slow log. It is possible to return only the N most recent entries
passing an additional argument to the command (for instance **SLOWLOG GET 10**). passing an additional argument to the command (for instance **SLOWLOG GET 10**).
The default requested length is 10 (when the argument is omitted). It's possible
to pass -1 to get the entire slowlog.
Note that you need a recent version of redis-cli in order to read the slow log Note that you need a recent version of redis-cli in order to read the slow log
output, since it uses some features of the protocol that were not formerly output, since it uses some features of the protocol that were not formerly

View file

@ -0,0 +1,17 @@
Returns whether each `member` is a member of the set stored at `key`.
For every `member`, `1` is returned if the value is a member of the set, or `0`
if the element is not a member of the set or if `key` does not exist.
@return
@array-reply: list representing the membership of the given elements, in the
same order as they are requested.
@examples
```cli
SADD myset "one"
SADD myset "one"
SMISMEMBER myset "one" "notamember"
```

View file

@ -1,14 +1,26 @@
Removes and returns one or more random elements from the set value store at Removes and returns one or more random members from the set value store at
`key`. `key`.
This operation is similar to `SRANDMEMBER`, that returns one or more random This operation is similar to `SRANDMEMBER`, that returns one or more random
elements from a set but does not remove it. elements from a set but does not remove it.
The `count` argument is available since version 3.2. By default, the command pops a single member from the set. When provided with
the optional `count` argument, the reply will consist of up to `count` members,
depending on the set's cardinality.
@return @return
@bulk-string-reply: the removed element, or `nil` when `key` does not exist. When called without the `count` argument:
@bulk-string-reply: the removed member, or `nil` when `key` does not exist.
When called with the `count` argument:
@array-reply: the removed members, or an empty array when `key` does not exist.
@history
- `>= 3.2`: Added the `count` argument.
@examples @examples
@ -24,18 +36,8 @@ SPOP myset 3
SMEMBERS myset SMEMBERS myset
``` ```
## Specification of the behavior when count is passed
If count is bigger than the number of elements inside the Set, the command will
only return the whole set without additional elements.
## Distribution of returned elements ## Distribution of returned elements
Note that this command is not suitable when you need a guaranteed uniform Note that this command is not suitable when you need a guaranteed uniform
distribution of the returned elements. For more information about the algorithms distribution of the returned elements. For more information about the algorithms
used for SPOP, look up both the Knuth sampling and Floyd sampling algorithms. used for `SPOP`, look up both the Knuth sampling and Floyd sampling algorithms.
## Count argument extension
Redis 3.2 introduced an optional `count` argument that can be passed to `SPOP`
in order to retrieve multiple elements in a single call.

View file

@ -1,22 +1,21 @@
When called with just the `key` argument, return a random element from the set When called with just the `key` argument, return a random element from the set
value stored at `key`. value stored at `key`.
Starting from Redis version 2.6, when called with the additional `count` If the provided `count` argument is positive, return an array of **distinct
argument, return an array of `count` **distinct elements** if `count` is elements**. The array's length is either `count` or the set's cardinality
positive. If called with a negative `count` the behavior changes and the command (`SCARD`), whichever is lower.
is allowed to return the **same element multiple times**. In this case the
number of returned elements is the absolute value of the specified `count`.
When called with just the key argument, the operation is similar to `SPOP`, If called with a negative `count`, the behavior changes and the command is
however while `SPOP` also removes the randomly selected element from the set, allowed to return the **same element multiple times**. In this case, the number
`SRANDMEMBER` will just return a random element without altering the original of returned elements is the absolute value of the specified `count`.
set in any way.
@return @return
@bulk-string-reply: without the additional `count` argument the command returns @bulk-string-reply: without the additional `count` argument, the command returns
a Bulk Reply with the randomly selected element, or `nil` when `key` does not a Bulk Reply with the randomly selected element, or `nil` when `key` does not
exist. @array-reply: when the additional `count` argument is passed the command exist.
@array-reply: when the additional `count` argument is passed, the command
returns an array of elements, or an empty array when `key` does not exist. returns an array of elements, or an empty array when `key` does not exist.
@examples @examples
@ -28,26 +27,32 @@ SRANDMEMBER myset 2
SRANDMEMBER myset -5 SRANDMEMBER myset -5
``` ```
@history
- `>= 2.6.0`: Added the optional `count` argument.
## Specification of the behavior when count is passed ## Specification of the behavior when count is passed
When a count argument is passed and is positive, the elements are returned as if When the `count` argument is a positive value this command behaves as follows:
every selected element is removed from the set (like the extraction of numbers
in the game of Bingo). However elements are **not removed** from the Set. So
basically:
- No repeated elements are returned. - No repeated elements are returned.
- If count is bigger than the number of elements inside the Set, the command - If `count` is bigger than the set's cardinality, the command will only return
will only return the whole set without additional elements. the whole set without additional elements.
- The order of elements in the reply is not truly random, so it is up to the
client to shuffle them if needed.
When instead the count is negative, the behavior changes and the extraction When the `count` is a negative value, the behavior changes as follows:
happens as if you put the extracted element inside the bag again after every
extraction, so repeated elements are possible, and the number of elements - Repeating elements are possible.
requested is always returned as we can repeat the same elements again and again, - Exactly `count` elements, or an empty array if the set is empty (non-existing
with the exception of an empty Set (non existing key) that will always produce key), are always returned.
an empty array as a result. - The order of elements in the reply is truly random.
## Distribution of returned elements ## Distribution of returned elements
Note: this section is relevant only for Redis 5 or below, as Redis 6 implements
a fairer algorithm.
The distribution of the returned elements is far from perfect when the number of The distribution of the returned elements is far from perfect when the number of
elements in the set is small, this is due to the fact that we used an elements in the set is small, this is due to the fact that we used an
approximated random element function that does not really guarantees good approximated random element function that does not really guarantees good

View file

@ -11,7 +11,7 @@ argument must be "LCS", since this is the only implemented one.
## LCS algorithm ## LCS algorithm
``` ```
STRALGO LCS [KEYS ...] [STRINGS ...] [LEN] [IDX] [MINMATCHLEN <len>] [WITHMATCHLEN] STRALGO LCS STRINGS <string_a> <string_b> | KEYS <key_a> <key_b> [LEN] [IDX] [MINMATCHLEN <len>] [WITHMATCHLEN]
``` ```
The LCS subcommand implements the longest common subsequence algorithm. Note The LCS subcommand implements the longest common subsequence algorithm. Note
@ -113,9 +113,9 @@ For the LCS algorithm:
- Without modifiers the string representing the longest common substring is - Without modifiers the string representing the longest common substring is
returned. returned.
- When LEN is given the command returns the length of the longest common - When `LEN` is given the command returns the length of the longest common
substring. substring.
- When IDX is given the command returns an array with the LCS length and all the - When `IDX` is given the command returns an array with the LCS length and all
ranges in both the strings, start and end offset for each string, where there the ranges in both the strings, start and end offset for each string, where
are matches. When WITHMATCHLEN is given each array representing a match will there are matches. When `WITHMATCHLEN` is given each array representing a
also have the length of the match (see examples). match will also have the length of the match (see examples).

View file

@ -2,4 +2,8 @@ Subscribes the client to the specified channels.
Once the client enters the subscribed state it is not supposed to issue any Once the client enters the subscribed state it is not supposed to issue any
other commands, except for additional `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`, other commands, except for additional `SUBSCRIBE`, `PSUBSCRIBE`, `UNSUBSCRIBE`,
`PUNSUBSCRIBE`, `PING` and `QUIT` commands. `PUNSUBSCRIBE`, `PING`, `RESET` and `QUIT` commands.
@history
- `>= 6.2`: `RESET` can be called to exit subscribed state.

View file

@ -1,5 +1,5 @@
The `XACK` command removes one or multiple messages from the _pending entries The `XACK` command removes one or multiple messages from the _Pending Entries
list_ (PEL) of a stream consumer group. A message is pending, and as such stored List_ (PEL) of a stream consumer group. A message is pending, and as such stored
inside the PEL, when it was delivered to some consumer, normally as a side inside the PEL, when it was delivered to some consumer, normally as a side
effect of calling `XREADGROUP`, or when a consumer took ownership of a message effect of calling `XREADGROUP`, or when a consumer took ownership of a message
calling `XCLAIM`. The pending message was delivered to some consumer but the calling `XCLAIM`. The pending message was delivered to some consumer but the
@ -17,9 +17,13 @@ entry about this message is also purged, releasing memory from the Redis server.
@integer-reply, specifically: @integer-reply, specifically:
The command returns the number of messages successfully acknowledged. Certain The command returns the number of messages successfully acknowledged. Certain
message IDs may no longer be part of the PEL (for example because they have been message IDs may no longer be part of the PEL (for example because they have
already acknowledge), and XACK will not count them as successfully acknowledged. already been acknowledged), and XACK will not count them as successfully
acknowledged.
@examples
```cli ```
XACK mystream mygroup 1526569495631-0 redis> XACK mystream mygroup 1526569495631-0
(integer) 1
``` ```

View file

@ -1,6 +1,7 @@
Appends the specified stream entry to the stream at the specified key. If the Appends the specified stream entry to the stream at the specified key. If the
key does not exist, as a side effect of running this command the key is created key does not exist, as a side effect of running this command the key is created
with a stream value. with a stream value. The creation of stream's key can be disabled with the
`NOMKSTREAM` option.
An entry is composed of a set of field-value pairs, it is basically a small An entry is composed of a set of field-value pairs, it is basically a small
dictionary. The field-value pairs are stored in the same order they are given by dictionary. The field-value pairs are stored in the same order they are given by
@ -14,11 +15,12 @@ stream.
## Specifying a Stream ID as an argument ## Specifying a Stream ID as an argument
A stream entry ID identifies a given entry inside a stream. The `XADD` command A stream entry ID identifies a given entry inside a stream.
will auto-generate a unique ID for you if the ID argument specified is the `*`
character (asterisk ASCII character). However, while useful only in very rare The `XADD` command will auto-generate a unique ID for you if the ID argument
cases, it is possible to specify a well-formed ID, so that the new entry will be specified is the `*` character (asterisk ASCII character). However, while useful
added exactly with the specified ID. only in very rare cases, it is possible to specify a well-formed ID, so that the
new entry will be added exactly with the specified ID.
IDs are specified by two numbers separated by a `-` character: IDs are specified by two numbers separated by a `-` character:
@ -39,30 +41,27 @@ or if after a failover the new master has a different absolute time.
When a user specified an explicit ID to `XADD`, the minimum valid ID is `0-1`, When a user specified an explicit ID to `XADD`, the minimum valid ID is `0-1`,
and the user _must_ specify an ID which is greater than any other ID currently and the user _must_ specify an ID which is greater than any other ID currently
inside the stream, otherwise the command will fail. Usually resorting to inside the stream, otherwise the command will fail and return an error. Usually
specific IDs is useful only if you have another system generating unique IDs resorting to specific IDs is useful only if you have another system generating
(for instance an SQL table) and you really want the Redis stream IDs to match unique IDs (for instance an SQL table) and you really want the Redis stream IDs
the one of this other system. to match the one of this other system.
## Capped streams ## Capped streams
It is possible to limit the size of the stream to a maximum number of elements `XADD` incorporates the same semantics as the `XTRIM` command - refer to its
using the **MAXLEN** option. documentation page for more information. This allows adding new entries and
keeping the stream's size in check with a single call to `XADD`, effectively
capping the stream with an arbitrary threshold. Although exact trimming is
possible and is the default, due to the internal representation of steams it is
more efficient to add an entry and trim stream with `XADD` using **almost
exact** trimming (the `~` argument).
Trimming with **MAXLEN** can be expensive compared to just adding entries with For example, calling `XADD` in the following form:
`XADD`: streams are represented by macro nodes into a radix tree, in order to be
very memory efficient. Altering the single macro node, consisting of a few tens
of elements, is not optimal. So it is possible to give the command in the
following special form:
XADD mystream MAXLEN ~ 1000 * ... entry fields here ... XADD mystream MAXLEN ~ 1000 * ... entry fields here ...
The `~` argument between the **MAXLEN** option and the actual count means that Will add a new entry but will also evict old entries so that the stream will
the user is not really requesting that the stream length is exactly 1000 items, contain only 1000 entries, or at most a few tens more.
but instead it could be a few tens of entries more, but never less than 1000
items. When this option modifier is used, the trimming is performed only when
Redis is able to remove a whole macro node. This makes it much more efficient,
and it is usually what you want.
## Additional information about streams ## Additional information about streams
@ -77,6 +76,14 @@ The command returns the ID of the added entry. The ID is the one auto-generated
if `*` is passed as ID argument, otherwise the command just returns the same ID if `*` is passed as ID argument, otherwise the command just returns the same ID
specified by the user during insertion. specified by the user during insertion.
The command returns a @nil-reply when used with the `NOMKSTREAM` option and the
key doesn't exist.
@history
- `>= 6.2`: Added the `NOMKSTREAM` option, `MINID` trimming strategy and the
`LIMIT` option.
@examples @examples
```cli ```cli

View file

@ -0,0 +1,70 @@
This command transfers ownership of pending stream entries that match the
specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling
`XPENDING` and then `XCLAIM`, but provides a more straightforward way to deal
with message delivery failures via `SCAN`-like semantics.
Like `XCLAIM`, the command operates on the stream entries at `<key>` and in the
context of the provided `<group>`. It transfers ownership to `<consumer>` of
messages pending for more than `<min-idle-time>` milliseconds and having an
equal or greater ID than `<start>`.
The optional `<count>` argument, which defaults to 100, is the upper limit of
the number of entries that the command attempts to claim. Internally, the
command begins scanning the consumer group's Pending Entries List (PEL) from
`<start>` and filters out entries having an idle time less than or equal to
`<min-idle-time>`. The maximum number of pending entries that the command scans
is the product of multiplying `<count>`'s value by 10 (hard-coded). It is
possible, therefore, that the number of entries claimed will be less than the
specified value.
The optional `JUSTID` argument changes the reply to return just an array of IDs
of messages successfully claimed, without returning the actual message. Using
this option means the retry counter is not incremented.
The command returns the claimed entries as an array. It also returns a stream ID
intended for cursor-like use as the `<start>` argument for its subsequent call.
When there are no remaining PEL entries, the command returns the special `0-0`
ID to signal completion. However, note that you may want to continue calling
`XAUTOCLAIM` even after the scan is complete with the `0-0` as `<start>` ID,
because enough time passed, so older pending entries may now be eligible for
claiming.
Note that only messages that are idle longer than `<min-idle-time>` are claimed,
and claiming a message resets its idle time. This ensures that only a single
consumer can successfully claim a given pending message at a specific instant of
time and trivially reduces the probability of processing the same message
multiple times.
Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted
deliveries count for that message, unless the `JUSTID` option has been specified
(which only delivers the message ID, not the message itself). Messages that
cannot be processed for some reason - for example, because consumers
systematically crash when processing them - will exhibit high attempted delivery
counts that can be detected by monitoring.
@return
@array-reply, specifically:
An array with two elements:
1. The first element is a stream ID to be used as the `<start>` argument for the
next call to `XAUTOCLAIM`
2. The second element is an array containing all the successfully claimed
messages in the same format as `XRANGE`.
@examples
```
> XAUTOCLAIM mystream mygroup Alice 3600000 0-0 COUNT 25
1) "0-0"
2) 1) 1) "1609338752495-0"
2) 1) "field"
2) "value"
```
In the above example, we attempt to claim up to 25 entries that are pending and
idle (not having been acknowledged or claimed) for at least an hour, starting at
the stream's beginning. The consumer "Alice" from the "mygroup" group acquires
ownership of these messages. Note that the stream ID returned in the example is
`0-0`, indicating that the entire stream was scanned.

View file

@ -5,14 +5,15 @@ command argument. Normally this is what happens:
1. There is a stream with an associated consumer group. 1. There is a stream with an associated consumer group.
2. Some consumer A reads a message via `XREADGROUP` from a stream, in the 2. Some consumer A reads a message via `XREADGROUP` from a stream, in the
context of that consumer group. context of that consumer group.
3. As a side effect a pending message entry is created in the pending entries 3. As a side effect a pending message entry is created in the Pending Entries
list (PEL) of the consumer group: it means the message was delivered to a List (PEL) of the consumer group: it means the message was delivered to a
given consumer, but it was not yet acknowledged via `XACK`. given consumer, but it was not yet acknowledged via `XACK`.
4. Then suddenly that consumer fails forever. 4. Then suddenly that consumer fails forever.
5. Other consumers may inspect the list of pending messages, that are stale for 5. Other consumers may inspect the list of pending messages, that are stale for
quite some time, using the `XPENDING` command. In order to continue quite some time, using the `XPENDING` command. In order to continue
processing such messages, they use `XCLAIM` to acquire the ownership of the processing such messages, they use `XCLAIM` to acquire the ownership of the
message and continue. message and continue. As of Redis 6.2, consumers can use the `XAUTOCLAIM`
command to automatically scan and claim stale pending messages.
This dynamic is clearly explained in the This dynamic is clearly explained in the
[Stream intro documentation](/topics/streams-intro). [Stream intro documentation](/topics/streams-intro).
@ -68,7 +69,7 @@ The command returns all the messages successfully claimed, in the same format as
`XRANGE`. However if the `JUSTID` option was specified, only the message IDs are `XRANGE`. However if the `JUSTID` option was specified, only the message IDs are
reported, without including the actual message. reported, without including the actual message.
Example: @examples
``` ```
> XCLAIM mystream mygroup Alice 3600000 1526569498055-0 > XCLAIM mystream mygroup Alice 3600000 1526569498055-0

View file

@ -11,7 +11,7 @@ To create a new consumer group, use the following form:
XGROUP CREATE mystream consumer-group-name $ XGROUP CREATE mystream consumer-group-name $
The last argument is the ID of the last item in the stream to consider already The last argument is the ID of the last item in the stream to consider already
delivered. In the above case we used the special ID '\$' (that means: the ID of delivered. In the above case we used the special ID '$' (that means: the ID of
the last item in the stream). In this case the consumers fetching data from that the last item in the stream). In this case the consumers fetching data from that
consumer group will only see new elements arriving in the stream. consumer group will only see new elements arriving in the stream.
@ -22,8 +22,9 @@ starting ID for the consumer group:
Of course it is also possible to use any other valid ID. If the specified Of course it is also possible to use any other valid ID. If the specified
consumer group already exists, the command returns a `-BUSYGROUP` error. consumer group already exists, the command returns a `-BUSYGROUP` error.
Otherwise the operation is performed and OK is returned. There are no hard Otherwise, the operation is performed and a @simple-string-reply `OK` is
limits to the number of consumer groups you can associate to a given stream. returned. There are no hard limits to the number of consumer groups you can
associate with a given stream.
If the specified stream doesn't exist when creating a group, an error will be If the specified stream doesn't exist when creating a group, an error will be
returned. You can use the optional `MKSTREAM` subcommand as the last argument returned. You can use the optional `MKSTREAM` subcommand as the last argument
@ -38,16 +39,26 @@ A consumer group can be destroyed completely by using the following form:
The consumer group will be destroyed even if there are active consumers and The consumer group will be destroyed even if there are active consumers and
pending messages, so make sure to call this command only when really needed. pending messages, so make sure to call this command only when really needed.
This form returns an @integer-reply with the number of destroyed consumer groups
(0 or 1).
Consumers in a consumer group are auto-created every time a new consumer name is
mentioned by some command. They can also be explicitly created by using the
following form:
XGROUP CREATECONSUMER mystream consumer-group-name myconsumer123
This form returns an @integer-reply with the number of created consumers (0 or
1).
To just remove a given consumer from a consumer group, the following form is To just remove a given consumer from a consumer group, the following form is
used: used:
XGROUP DELCONSUMER mystream consumer-group-name myconsumer123 XGROUP DELCONSUMER mystream consumer-group-name myconsumer123
Consumers in a consumer group are auto-created every time a new consumer name is Sometimes it may be useful to remove old consumers since they are no longer
mentioned by some command. However sometimes it may be useful to remove old used. This form returns an @integer-reply with the number of pending messages
consumers since they are no longer used. This form returns the number of pending that the consumer had before it was deleted.
messages that the consumer had before it was deleted.
Finally it possible to set the next message to deliver using the `SETID` Finally it possible to set the next message to deliver using the `SETID`
subcommand. Normally the next ID is set when the consumer is created, as the subcommand. Normally the next ID is set when the consumer is created, as the
@ -58,7 +69,13 @@ messages in a stream, you may want to set its next ID to 0:
XGROUP SETID mystream consumer-group-name 0 XGROUP SETID mystream consumer-group-name 0
This form returns a @simple-string-reply `OK` or an error.
Finally to get some help if you don't remember the syntax, use the HELP Finally to get some help if you don't remember the syntax, use the HELP
subcommand: subcommand:
XGROUP HELP XGROUP HELP
@history
- `>= 6.2.0`: Supports the `CREATECONSUMER` subcommand.

View file

@ -39,7 +39,8 @@ is the stream content.
- `XINFO STREAM <key> FULL [COUNT <count>]` - `XINFO STREAM <key> FULL [COUNT <count>]`
In this form the command returns the entire state of the stream, including In this form the command returns the entire state of the stream, including
entries, groups, consumers and PELs. This form is available since Redis 6.0. entries, groups, consumers and Pending Entries Lists (PELs). This form is
available since Redis 6.0.
``` ```
> XADD mystream * foo bar > XADD mystream * foo bar

View file

@ -2,7 +2,7 @@ Fetching data from a stream via a consumer group, and not acknowledging such
data, has the effect of creating _pending entries_. This is well explained in data, has the effect of creating _pending entries_. This is well explained in
the `XREADGROUP` command, and even better in our the `XREADGROUP` command, and even better in our
[introduction to Redis Streams](/topics/streams-intro). The `XACK` command will [introduction to Redis Streams](/topics/streams-intro). The `XACK` command will
immediately remove the pending entry from the Pending Entry List (PEL) since immediately remove the pending entry from the Pending Entries List (PEL) since
once a message is successfully processed, there is no longer need for the once a message is successfully processed, there is no longer need for the
consumer group to track it and to remember the current owner of the message. consumer group to track it and to remember the current owner of the message.
@ -58,10 +58,13 @@ consumer group, which is one, followed by the smallest and greatest ID among the
pending messages, and then list every consumer in the consumer group with at pending messages, and then list every consumer in the consumer group with at
least one pending message, and the number of pending messages it has. least one pending message, and the number of pending messages it has.
This is a good overview, but sometimes we are interested in the details. In ## Extended form of XPENDING
order to see all the pending messages with more associated information we need
to also pass a range of IDs, in a similar way we do it with `XRANGE`, and a non The summary provides a good overview, but sometimes we are interested in the
optional _count_ argument, to limit the number of messages returned per call: details. In order to see all the pending messages with more associated
information we need to also pass a range of IDs, in a similar way we do it with
`XRANGE`, and a non optional _count_ argument, to limit the number of messages
returned per call:
``` ```
> XPENDING mystream group55 - + 10 > XPENDING mystream group55 - + 10
@ -71,7 +74,7 @@ optional _count_ argument, to limit the number of messages returned per call:
4) (integer) 1 4) (integer) 1
``` ```
In the extended form we no longer see the summary information, instead there are In the extended form we no longer see the summary information, instead there is
detailed information for each message in the pending entries list. For each detailed information for each message in the pending entries list. For each
message four attributes are returned: message four attributes are returned:
@ -87,8 +90,8 @@ when some other consumer _claims_ the message with `XCLAIM`, or when the message
is delivered again via `XREADGROUP`, when accessing the history of a consumer in is delivered again via `XREADGROUP`, when accessing the history of a consumer in
a consumer group (see the `XREADGROUP` page for more info). a consumer group (see the `XREADGROUP` page for more info).
Finally it is possible to pass an additional argument to the command, in order It is possible to pass an additional argument to the command, in order to see
to see the messages having a specific owner: the messages having a specific owner:
``` ```
> XPENDING mystream group55 - + 10 consumer-123 > XPENDING mystream group55 - + 10 consumer-123
@ -101,6 +104,29 @@ even when there are many pending messages from many consumers: we have a pending
entries list data structure both globally, and for every consumer, so we can entries list data structure both globally, and for every consumer, so we can
very efficiently show just messages pending for a single consumer. very efficiently show just messages pending for a single consumer.
## Idle time filter
Since version 6.2 it is possible to filter entries by their idle-time, given in
milliseconds (useful for `XCLAIM`ing entries that have not been processed for
some time):
```
> XPENDING mystream group55 IDLE 9000 - + 10
> XPENDING mystream group55 IDLE 9000 - + 10 consumer-123
```
The first case will return the first 10 (or less) PEL entries of the entire
group that are idle for over 9 seconds, whereas in the second case only those of
`consumer-123`.
## Exclusive ranges and iterating the PEL
The `XPENDING` command allows iterating over the pending entries just like
`XRANGE` and `XREVRANGE` allow for the stream's entries. You can do this by
prefixing the ID of the last-read pending entry with the `(` character that
denotes an open (exclusive) range, and proving it to the subsequent call to the
command.
@return @return
@array-reply, specifically: @array-reply, specifically:
@ -108,3 +134,7 @@ very efficiently show just messages pending for a single consumer.
The command returns data in different format depending on the way it is called, The command returns data in different format depending on the way it is called,
as previously explained in this page. However the reply is always an array of as previously explained in this page. However the reply is always an array of
items. items.
@history
- `>= 6.2.0`: Added the `IDLE` option and exclusive range intervals.

View file

@ -67,6 +67,13 @@ Used in this way `XRANGE` works as a range query command to obtain entries in a
specified time. This is very handy in order to access the history of past events specified time. This is very handy in order to access the history of past events
in a stream. in a stream.
## Exclusive ranges
The range is close (inclusive) by default, meaning that the reply can include
entries with IDs matching the query's start and end intervals. It is possible to
specify an open interval (exclusive) by prefixing the ID with the character `(`.
This is useful for iterating the stream, as explained below.
## Returning a maximum number of entries ## Returning a maximum number of entries
Using the **COUNT** option it is possible to reduce the number of entries Using the **COUNT** option it is possible to reduce the number of entries
@ -110,14 +117,14 @@ is trivial:
``` ```
Then instead of starting the iteration again from `-`, as the start of the range Then instead of starting the iteration again from `-`, as the start of the range
we use the entry ID of the _last_ entry returned by the previous `XRANGE` call, we use the entry ID of the _last_ entry returned by the previous `XRANGE` call
adding the sequence part of the ID by one. as an exclusive interval.
The ID of the last entry is `1526985685298-0`, so we just add 1 to the sequence The ID of the last entry is `1526985685298-0`, so we just prefix it with a '(',
to obtain `1526985685298-1`, and continue our iteration: and continue our iteration:
``` ```
> XRANGE writers 1526985685298-1 + COUNT 2 > XRANGE writers (1526985685298-0 + COUNT 2
1) 1) 1526985691746-0 1) 1) 1526985691746-0
2) 1) "name" 2) 1) "name"
2) "Toni" 2) "Toni"
@ -139,6 +146,37 @@ The command `XREAD` is also able to iterate the stream. The command `XREVRANGE`
can iterate the stream reverse, from higher IDs (or times) to lower IDs (or can iterate the stream reverse, from higher IDs (or times) to lower IDs (or
times). times).
### Iterating with earlier versions of Redis
While exclusive range intervals are only available from Redis 6.2, it is still
possible to use a similar stream iteration pattern with earlier versions. You
start fetching from the stream the same way as described above to obtain the
first entries.
For the subsequent calls, you'll need to programmatically advance the last
entry's ID returned. Most Redis client should abstract this detail, but the
implementation can also be in the application if needed. In the example above,
this means incrementing the sequence of `1526985685298-0` by one, from 0 to 1.
The second call would, therefore, be:
```
> XRANGE writers 1526985685298-1 + COUNT 2
1) 1) 1526985691746-0
2) 1) "name"
2) "Toni"
...
```
Also, note that once the sequence part of the last ID equals
18446744073709551615, you'll need to increment the timestamp and reset the
sequence part to 0. For example, incrementing the ID
`1526985685298-18446744073709551615` should result in `1526985685299-0`.
A symmetrical pattern applies to iterating the stream with `XREVRANGE`. The only
difference is that the client needs to decrement the ID for the subsequent
calls. When decrementing an ID with a sequence part of 0, the timestamp needs to
be decremented by 1 and the sequence set to 18446744073709551615.
## Fetching single items ## Fetching single items
If you look for an `XGET` command you'll be disappointed because `XRANGE` is If you look for an `XGET` command you'll be disappointed because `XRANGE` is
@ -170,6 +208,10 @@ returned entries are complete, that means that the ID and all the fields they
are composed are returned. Moreover, the entries are returned with their fields are composed are returned. Moreover, the entries are returned with their fields
and values in the exact same order as `XADD` added them. and values in the exact same order as `XADD` added them.
@history
- `>= 6.2` Added exclusive ranges.
@examples @examples
```cli ```cli

View file

@ -29,7 +29,7 @@ the history of messages that were delivered to it, so a message has just a
single owner. However there is a special feature called _message claiming_ that single owner. However there is a special feature called _message claiming_ that
allows other consumers to claim messages in case there is a non recoverable allows other consumers to claim messages in case there is a non recoverable
failure of some consumer. In order to implement such semantics, consumer groups failure of some consumer. In order to implement such semantics, consumer groups
require explicit acknowledgement of the messages successfully processed by the require explicit acknowledgment of the messages successfully processed by the
consumer, via the `XACK` command. This is needed because the stream will track, consumer, via the `XACK` command. This is needed because the stream will track,
for each consumer group, who is processing what message. for each consumer group, who is processing what message.
@ -88,7 +88,7 @@ no differences in this regard.
Two things: Two things:
1. If the message was never delivered to anyone, that is, if we are talking 1. If the message was never delivered to anyone, that is, if we are talking
about a new message, then a PEL (Pending Entry List) is created. about a new message, then a PEL (Pending Entries List) is created.
2. If instead the message was already delivered to this consumer, and it is just 2. If instead the message was already delivered to this consumer, and it is just
re-fetching the same message again, then the _last delivery counter_ is re-fetching the same message again, then the _last delivery counter_ is
updated to the current time, and the _number of deliveries_ is incremented by updated to the current time, and the _number of deliveries_ is incremented by
@ -129,3 +129,19 @@ acknowledged all the pending messages: we can start to use `>` as ID, in order
to get the new messages and rejoin the consumers that are processing new things. to get the new messages and rejoin the consumers that are processing new things.
To see how the command actually replies, please check the `XREAD` command page. To see how the command actually replies, please check the `XREAD` command page.
@return
@array-reply, specifically:
The command returns an array of results: each element of the returned array is
an array composed of a two element containing the key name and the entries
reported for that key. The entries reported are full stream entries, having IDs
and the list of all the fields and values. Field and values are guaranteed to be
reported in the same order they were added by `XADD`.
When **BLOCK** is used, on timeout a null reply is returned.
Reading the [Redis Streams introduction](/topics/streams-intro) is highly
suggested in order to understand more about the streams overall behavior and
semantics.

View file

@ -14,54 +14,6 @@ send:
XREVRANGE somestream + - COUNT 1 XREVRANGE somestream + - COUNT 1
## Iterating with XREVRANGE
Like `XRANGE` this command can be used in order to iterate the whole stream
content, however note that in this case, the next command calls should use the
ID of the last entry, with the sequence number decremented by one. However if
the sequence number is already 0, the time part of the ID should be decremented
by 1, and the sequence part should be set to the maximum possible sequence
number, that is, 18446744073709551615, or could be omitted at all, and the
command will automatically assume it to be such a number (see `XRANGE` for more
info about incomplete IDs).
Example:
```
> XREVRANGE writers + - COUNT 2
1) 1) 1526985723355-0
2) 1) "name"
2) "Ngozi"
3) "surname"
4) "Adichie"
2) 1) 1526985712947-0
2) 1) "name"
2) "Agatha"
3) "surname"
4) "Christie"
```
The last ID returned is `1526985712947-0`, since the sequence number is already
zero, the next ID I'll use instead of the `+` special ID will be
`1526985712946-18446744073709551615`, or just `18446744073709551615`:
```
> XREVRANGE writers 1526985712946-18446744073709551615 - COUNT 2
1) 1) 1526985691746-0
2) 1) "name"
2) "Toni"
3) "surname"
4) "Morrison"
2) 1) 1526985685298-0
2) 1) "name"
2) "Jane"
3) "surname"
4) "Austen"
```
And so for until the iteration is complete and no result is returned. See the
`XRANGE` page about iterating for more information.
@return @return
@array-reply, specifically: @array-reply, specifically:
@ -72,6 +24,10 @@ means that the ID and all the fields they are composed are returned. Moreover
the entries are returned with their fields and values in the exact same order as the entries are returned with their fields and values in the exact same order as
`XADD` added them. `XADD` added them.
@history
- `>= 6.2` Added exclusive ranges.
@examples @examples
```cli ```cli

View file

@ -1,34 +1,71 @@
`XTRIM` trims the stream to a given number of items, evicting older items (items `XTRIM` trims the stream by evicting older entries (entries with lower IDs) if
with lower IDs) if needed. The command is conceived to accept multiple trimming needed.
strategies, however currently only a single one is implemented, which is
`MAXLEN`, and works exactly as the `MAXLEN` option in `XADD`.
For example the following command will trim the stream to exactly the latest Trimming the stream can be done using one of these strategies:
1000 items:
- `MAXLEN`: Evicts entries as long as the stream's length exceeds the specified
`threshold`, where `threshold` is a positive integer.
- `MINID`: Evicts entries with IDs lower than `threshold`, where `threshold` is
a stream ID.
For example, this will trim the stream to exactly the latest 1000 items:
``` ```
XTRIM mystream MAXLEN 1000 XTRIM mystream MAXLEN 1000
``` ```
It is possible to give the command in the following special form in order to Whereas in this example, all entries that have an ID lower than 649085820-0 will
make it more efficient: be evicted:
```
XTRIM mystream MINID 649085820
```
By default, or when provided with the optional `=` argument, the command
performs exact trimming.
Depending on the strategy, exact trimming means:
- `MAXLEN`: the trimmed stream's length will be exactly the minimum between its
original length and the specified `threshold`.
- `MINID`: the oldest ID in the stream will be exactly the minimum between its
original oldest ID and the specified `threshold`.
## Nearly exact trimming
Because exact trimming may require additional effort from the Redis server, the
optional `~` argument can be provided to make it more efficient.
For example:
``` ```
XTRIM mystream MAXLEN ~ 1000 XTRIM mystream MAXLEN ~ 1000
``` ```
The `~` argument between the **MAXLEN** option and the actual count means that The `~` argument between the `MAXLEN` strategy and the `threshold` means that
the user is not really requesting that the stream length is exactly 1000 items, the user is requesting to trim the stream so its length is **at least** the
but instead it could be a few tens of entries more, but never less than 1000 `threshold`, but possibly slightly more. In this case, Redis will stop trimming
items. When this option modifier is used, the trimming is performed only when early when performance can be gained (for example, when a whole macro node in
Redis is able to remove a whole macro node. This makes it much more efficient, the data structure can't be removed). This makes trimming much more efficient,
and it is usually what you want. and it is usually what you want, although after trimming, the stream may have
few tens of additional entries over the `threshold`.
Another way to control the amount of work done by the command when using the
`~`, is the `LIMIT` clause. When used, it specifies the maximal `count` of
entries that will be evicted. When `LIMIT` and `count` aren't specified, the
default value of 100 \* the number of entries in a macro node will be implicitly
used as the `count`. Specifying the value 0 as `count` disables the limiting
mechanism entirely.
@return @return
@integer-reply, specifically: @integer-reply: The number of entries deleted from the stream.
The command returns the number of entries deleted from the stream. @history
- `>= 6.2`: Added the `MINID` trimming strategy and the `LIMIT` option.
@examples
```cli ```cli
XADD mystream * field1 A field2 B field3 C field4 D XADD mystream * field1 A field2 B field3 C field4 D

View file

@ -10,13 +10,17 @@ not hold a sorted set, an error is returned.
The score values should be the string representation of a double precision The score values should be the string representation of a double precision
floating point number. `+inf` and `-inf` values are valid values as well. floating point number. `+inf` and `-inf` values are valid values as well.
## ZADD options (Redis 3.0.2 or greater) ## ZADD options
ZADD supports a list of options, specified after the name of the key and before ZADD supports a list of options, specified after the name of the key and before
the first score argument. Options are: the first score argument. Options are:
- **XX**: Only update elements that already exist. Never add elements. - **XX**: Only update elements that already exist. Don't add new elements.
- **NX**: Don't update already existing elements. Always add new elements. - **NX**: Only add new elements. Don't update already existing elements.
- **LT**: Only update existing elements if the new score is **less than** the
current score. This flag doesn't prevent adding new elements.
- **GT**: Only update existing elements if the new score is **greater than** the
current score. This flag doesn't prevent adding new elements.
- **CH**: Modify the return value from the number of new elements added, to the - **CH**: Modify the return value from the number of new elements added, to the
total number of elements changed (CH is an abbreviation of _changed_). Changed total number of elements changed (CH is an abbreviation of _changed_). Changed
elements are **new elements added** and elements already existing for which elements are **new elements added** and elements already existing for which
@ -26,6 +30,8 @@ the first score argument. Options are:
- **INCR**: When this option is specified `ZADD` acts like `ZINCRBY`. Only one - **INCR**: When this option is specified `ZADD` acts like `ZINCRBY`. Only one
score-element pair can be specified in this mode. score-element pair can be specified in this mode.
Note: The **GT**, **LT** and **NX** options are mutually exclusive.
## Range of integer scores that can be expressed precisely ## Range of integer scores that can be expressed precisely
Redis sorted sets use a _double 64-bit floating point number_ to represent the Redis sorted sets use a _double 64-bit floating point number_ to represent the
@ -74,8 +80,10 @@ is also possible to query sorted sets by range of scores using `ZRANGEBYSCORE`).
@integer-reply, specifically: @integer-reply, specifically:
- The number of elements added to the sorted set, not including elements already - When used without optional arguments, the number of elements added to the
existing for which the score was updated. sorted set (excluding score updates).
- If the `CH` option is specified, the number of elements that were changed
(added or updated).
If the `INCR` option is specified, the return value will be @bulk-string-reply: If the `INCR` option is specified, the return value will be @bulk-string-reply:
@ -87,6 +95,8 @@ If the `INCR` option is specified, the return value will be @bulk-string-reply:
- `>= 2.4`: Accepts multiple elements. In Redis versions older than 2.4 it was - `>= 2.4`: Accepts multiple elements. In Redis versions older than 2.4 it was
possible to add or update a single member per call. possible to add or update a single member per call.
- `>= 3.0.2`: Added the `XX`, `NX`, `CH` and `INCR` options.
- `>= 6.2`: Added the `GT` and `LT` options.
@examples @examples

Some files were not shown because too many files have changed in this diff Show more