fix(stomp-adapter): intermitent connection to STOMP server

This commit is contained in:
leandrofars 2024-09-05 10:33:19 -03:00
parent 48921f8f8b
commit e2160fee15
601 changed files with 46 additions and 318326 deletions

View File

@ -8,10 +8,10 @@ import (
"strings"
"time"
"github.com/go-stomp/stomp/v3"
"github.com/go-stomp/stomp/v3/frame"
"github.com/nats-io/nats.go"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/config"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
"golang.org/x/sys/unix"
)

View File

@ -2,8 +2,8 @@
Go language implementation of a STOMP client library.
![Build Status](https://github.com/go-stomp/stomp/actions/workflows/test.yml/badge.svg?branch=master)
[![Go Reference](https://pkg.go.dev/badge/github.com/go-stomp/stomp/v3.svg)](https://pkg.go.dev/github.com/go-stomp/stomp/v3)
![Build Status](https://github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/actions/workflows/test.yml/badge.svg?branch=master)
[![Go Reference](https://pkg.go.dev/badge/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3.svg)](https://pkg.go.dev/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3)
Features:
@ -15,10 +15,10 @@ Features:
## Usage Instructions
```
go get github.com/go-stomp/stomp/v3
go get github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3
```
For API documentation, see https://pkg.go.dev/github.com/go-stomp/stomp/v3
For API documentation, see https://pkg.go.dev/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3
Breaking changes between this previous version and the current version are

View File

@ -1,8 +1,6 @@
package stomp
import (
"github.com/go-stomp/stomp/v3/frame"
)
import "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
// The AckMode type is an enumeration of the acknowledgement modes for a
// STOMP subscription.

View File

@ -15,7 +15,7 @@ The API it's stable the only breaking change is the import path.
Version 3:
```go
import (
"github.com/go-stomp/stomp/v3"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3"
)
```
@ -37,7 +37,7 @@ import (
Version 2:
```go
import (
"github.com/go-stomp/stomp"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp"
)
```
@ -64,11 +64,11 @@ package, and the types moved are not needed in normal usage of the `stomp` packa
Version 2 of the stomp library makes use of functional options to provide a clean, flexible way
of specifying options in the following API calls:
* [Dial()](http://godoc.org/github.com/go-stomp/stomp#Dial)
* [Connect()](http://godoc.org/github.com/go-stomp/stomp#Connect)
* [Conn.Send()](http://godoc.org/github.com/go-stomp/stomp#Conn.Send)
* [Transaction.Send()](http://godoc.org/github.com/go-stomp/stomp#Transaction.Send)
* [Conn.Subscribe()](http://godoc.org/github.com/go-stomp/stomp#Conn.Subscribe)
* [Dial()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Dial)
* [Connect()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Connect)
* [Conn.Send()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Conn.Send)
* [Transaction.Send()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Transaction.Send)
* [Conn.Subscribe()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Conn.Subscribe)
The idea for this comes from Dave Cheney's very excellent blog post,
[Functional Options for Friendly APIs](http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis).

View File

@ -8,7 +8,7 @@ import (
"sync"
"time"
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// Default time span to add to read/write heart-beat timeouts
@ -396,16 +396,19 @@ func processLoop(c *Conn, writer *frame.Writer) {
}
sendFrame = false
} else {
id, _ := req.Frame.Header.Contains(frame.Id)
// id, _ := req.Frame.Header.Contains(frame.Id)
// is this trying to be too clever -- add a receipt
// header so that when the server responds with a
// RECEIPT frame, the corresponding channel will be closed
req.Frame.Header.Set(frame.Receipt, id)
// req.Frame.Header.Set(frame.Receipt, id)
// don't wait for a receipt frame from the server, just send the unsubscribe frame and go on
}
}
// frame to send, if enabled
if sendFrame {
//log.Println("Sending frame", req.Frame.Command)
err := writer.Write(req.Frame)
if err != nil {
sendError(channels, err)

View File

@ -5,8 +5,8 @@ import (
"strings"
"time"
"github.com/go-stomp/stomp/v3/frame"
"github.com/go-stomp/stomp/v3/internal/log"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/internal/log"
)
// ConnOptions is an opaque structure used to collection options

View File

@ -1,7 +1,7 @@
package stomp
import (
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// Error values

View File

@ -2,7 +2,8 @@ package stomp
import (
"io"
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// A Message represents a message received from the STOMP server.

View File

@ -1,7 +1,7 @@
package stomp
import (
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// SendOpt contains options for for the Conn.Send and Transaction.Send functions.

View File

@ -20,7 +20,6 @@ Disconnect method. This will perform a graceful shutdown sequence as specified i
Source code and other details for the project are available at GitHub:
https://github.com/go-stomp/stomp
https://github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp
*/
package stomp

View File

@ -1,7 +1,7 @@
package stomp
import (
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// SubscribeOpt contains options for for the Conn.Subscribe function.

View File

@ -7,7 +7,7 @@ import (
"sync/atomic"
"time"
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
const (
@ -97,17 +97,21 @@ func (s *Subscription) Unsubscribe(opts ...func(*frame.Frame) error) error {
// We don't want to interfere with `s.C` since we might be "stealing"
// MESSAGEs or ERRORs from another goroutine, so use a sync.Cond to
// wait for the terminal state transition instead.
s.closeMutex.Lock()
for atomic.LoadInt32(&s.state) != subStateClosed {
err = waitWithTimeout(s.closeCond, s.unsubscribeReceiptTimeout)
if err != nil && errors.Is(err, &ErrUnsubscribeReceiptTimeout) {
msg := s.subscriptionErrorMessage("channel unsubscribe receipt timeout")
s.C <- msg
return err
}
}
s.closeMutex.Unlock()
return err
// s.closeMutex.Lock()
// for atomic.LoadInt32(&s.state) != subStateClosed {
// err = waitWithTimeout(s.closeCond, s.unsubscribeReceiptTimeout)
// if err != nil && errors.Is(err, &ErrUnsubscribeReceiptTimeout) {
// msg := s.subscriptionErrorMessage("channel unsubscribe receipt timeout")
// s.C <- msg
// return err
// }
// }
// s.closeMutex.Unlock()
s.closeCond.L.Lock()
s.closeChannel(nil)
s.closeCond.L.Unlock()
return nil
}
func waitWithTimeout(cond *sync.Cond, timeout time.Duration) error {

View File

@ -1,7 +1,7 @@
package stomp
import (
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// A Transaction applies to the sending of messages to the STOMP server,

View File

@ -1,7 +1,7 @@
package stomp
import (
"github.com/go-stomp/stomp/v3/frame"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// Validator is an interface for validating STOMP frames.

View File

@ -1 +0,0 @@
.DS_Store

View File

@ -1,23 +0,0 @@
Copyright (c) 2013 John Barton
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,202 +0,0 @@
# GoDotEnv ![CI](https://github.com/joho/godotenv/workflows/CI/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv)
A Go (golang) port of the Ruby [dotenv](https://github.com/bkeepers/dotenv) project (which loads env vars from a .env file).
From the original Library:
> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environmentssuch as resource handles for databases or credentials for external servicesshould be extracted from the code into environment variables.
>
> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped.
It can be used as a library (for loading in env for your own daemons etc.) or as a bin command.
There is test coverage and CI for both linuxish and Windows environments, but I make no guarantees about the bin version working on Windows.
## Installation
As a library
```shell
go get github.com/joho/godotenv
```
or if you want to use it as a bin command
go >= 1.17
```shell
go install github.com/joho/godotenv/cmd/godotenv@latest
```
go < 1.17
```shell
go get github.com/joho/godotenv/cmd/godotenv
```
## Usage
Add your application configuration to your `.env` file in the root of your project:
```shell
S3_BUCKET=YOURS3BUCKET
SECRET_KEY=YOURSECRETKEYGOESHERE
```
Then in your Go app you can do something like
```go
package main
import (
"log"
"os"
"github.com/joho/godotenv"
)
func main() {
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
s3Bucket := os.Getenv("S3_BUCKET")
secretKey := os.Getenv("SECRET_KEY")
// now do something with s3 or whatever
}
```
If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import
```go
import _ "github.com/joho/godotenv/autoload"
```
While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit
```go
godotenv.Load("somerandomfile")
godotenv.Load("filenumberone.env", "filenumbertwo.env")
```
If you want to be really fancy with your env file you can do comments and exports (below is a valid env file)
```shell
# I am a comment and that is OK
SOME_VAR=someval
FOO=BAR # comments at line end are OK too
export BAR=BAZ
```
Or finally you can do YAML(ish) style
```yaml
FOO: bar
BAR: baz
```
as a final aside, if you don't want godotenv munging your env you can just get a map back instead
```go
var myEnv map[string]string
myEnv, err := godotenv.Read()
s3Bucket := myEnv["S3_BUCKET"]
```
... or from an `io.Reader` instead of a local file
```go
reader := getRemoteFile()
myEnv, err := godotenv.Parse(reader)
```
... or from a `string` if you so desire
```go
content := getRemoteFileContent()
myEnv, err := godotenv.Unmarshal(content)
```
### Precedence & Conventions
Existing envs take precedence of envs that are loaded later.
The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use)
for managing multiple environments (i.e. development, test, production)
is to create an env named `{YOURAPP}_ENV` and load envs in this order:
```go
env := os.Getenv("FOO_ENV")
if "" == env {
env = "development"
}
godotenv.Load(".env." + env + ".local")
if "test" != env {
godotenv.Load(".env.local")
}
godotenv.Load(".env." + env)
godotenv.Load() // The Original .env
```
If you need to, you can also use `godotenv.Overload()` to defy this convention
and overwrite existing envs instead of only supplanting them. Use with caution.
### Command Mode
Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH`
```
godotenv -f /some/path/to/.env some_command with some args
```
If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD`
By default, it won't override existing environment variables; you can do that with the `-o` flag.
### Writing Env Files
Godotenv can also write a map representing the environment to a correctly-formatted and escaped file
```go
env, err := godotenv.Unmarshal("KEY=value")
err := godotenv.Write(env, "./.env")
```
... or to a string
```go
env, err := godotenv.Unmarshal("KEY=value")
content, err := godotenv.Marshal(env)
```
## Contributing
Contributions are welcome, but with some caveats.
This library has been declared feature complete (see [#182](https://github.com/joho/godotenv/issues/182) for background) and will not be accepting issues or pull requests adding new functionality or breaking the library API.
Contributions would be gladly accepted that:
* bring this library's parsing into closer compatibility with the mainline dotenv implementations, in particular [Ruby's dotenv](https://github.com/bkeepers/dotenv) and [Node.js' dotenv](https://github.com/motdotla/dotenv)
* keep the library up to date with the go ecosystem (ie CI bumps, documentation changes, changes in the core libraries)
* bug fixes for use cases that pertain to the library's purpose of easing development of codebases deployed into twelve factor environments
*code changes without tests and references to peer dotenv implementations will not be accepted*
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Added some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Releases
Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`.
Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1`
## Who?
The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library.

View File

@ -1,228 +0,0 @@
// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
//
// Examples/readme can be found on the GitHub page at https://github.com/joho/godotenv
//
// The TL;DR is that you make a .env file that looks something like
//
// SOME_ENV_VAR=somevalue
//
// and then in your go code you can call
//
// godotenv.Load()
//
// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR")
package godotenv
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"sort"
"strconv"
"strings"
)
const doubleQuoteSpecialChars = "\\\n\r\"!$`"
// Parse reads an env file from io.Reader, returning a map of keys and values.
func Parse(r io.Reader) (map[string]string, error) {
var buf bytes.Buffer
_, err := io.Copy(&buf, r)
if err != nil {
return nil, err
}
return UnmarshalBytes(buf.Bytes())
}
// Load will read your env file(s) and load them into ENV for this process.
//
// Call this function as close as possible to the start of your program (ideally in main).
//
// If you call Load without any args it will default to loading .env in the current path.
//
// You can otherwise tell it which files to load (there can be more than one) like:
//
// godotenv.Load("fileone", "filetwo")
//
// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults.
func Load(filenames ...string) (err error) {
filenames = filenamesOrDefault(filenames)
for _, filename := range filenames {
err = loadFile(filename, false)
if err != nil {
return // return early on a spazout
}
}
return
}
// Overload will read your env file(s) and load them into ENV for this process.
//
// Call this function as close as possible to the start of your program (ideally in main).
//
// If you call Overload without any args it will default to loading .env in the current path.
//
// You can otherwise tell it which files to load (there can be more than one) like:
//
// godotenv.Overload("fileone", "filetwo")
//
// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefully set all vars.
func Overload(filenames ...string) (err error) {
filenames = filenamesOrDefault(filenames)
for _, filename := range filenames {
err = loadFile(filename, true)
if err != nil {
return // return early on a spazout
}
}
return
}
// Read all env (with same file loading semantics as Load) but return values as
// a map rather than automatically writing values into env
func Read(filenames ...string) (envMap map[string]string, err error) {
filenames = filenamesOrDefault(filenames)
envMap = make(map[string]string)
for _, filename := range filenames {
individualEnvMap, individualErr := readFile(filename)
if individualErr != nil {
err = individualErr
return // return early on a spazout
}
for key, value := range individualEnvMap {
envMap[key] = value
}
}
return
}
// Unmarshal reads an env file from a string, returning a map of keys and values.
func Unmarshal(str string) (envMap map[string]string, err error) {
return UnmarshalBytes([]byte(str))
}
// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values.
func UnmarshalBytes(src []byte) (map[string]string, error) {
out := make(map[string]string)
err := parseBytes(src, out)
return out, err
}
// Exec loads env vars from the specified filenames (empty map falls back to default)
// then executes the cmd specified.
//
// Simply hooks up os.Stdin/err/out to the command and calls Run().
//
// If you want more fine grained control over your command it's recommended
// that you use `Load()`, `Overload()` or `Read()` and the `os/exec` package yourself.
func Exec(filenames []string, cmd string, cmdArgs []string, overload bool) error {
op := Load
if overload {
op = Overload
}
if err := op(filenames...); err != nil {
return err
}
command := exec.Command(cmd, cmdArgs...)
command.Stdin = os.Stdin
command.Stdout = os.Stdout
command.Stderr = os.Stderr
return command.Run()
}
// Write serializes the given environment and writes it to a file.
func Write(envMap map[string]string, filename string) error {
content, err := Marshal(envMap)
if err != nil {
return err
}
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
_, err = file.WriteString(content + "\n")
if err != nil {
return err
}
return file.Sync()
}
// Marshal outputs the given environment as a dotenv-formatted environment file.
// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped.
func Marshal(envMap map[string]string) (string, error) {
lines := make([]string, 0, len(envMap))
for k, v := range envMap {
if d, err := strconv.Atoi(v); err == nil {
lines = append(lines, fmt.Sprintf(`%s=%d`, k, d))
} else {
lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v)))
}
}
sort.Strings(lines)
return strings.Join(lines, "\n"), nil
}
func filenamesOrDefault(filenames []string) []string {
if len(filenames) == 0 {
return []string{".env"}
}
return filenames
}
func loadFile(filename string, overload bool) error {
envMap, err := readFile(filename)
if err != nil {
return err
}
currentEnv := map[string]bool{}
rawEnv := os.Environ()
for _, rawEnvLine := range rawEnv {
key := strings.Split(rawEnvLine, "=")[0]
currentEnv[key] = true
}
for key, value := range envMap {
if !currentEnv[key] || overload {
_ = os.Setenv(key, value)
}
}
return nil
}
func readFile(filename string) (envMap map[string]string, err error) {
file, err := os.Open(filename)
if err != nil {
return
}
defer file.Close()
return Parse(file)
}
func doubleQuoteEscape(line string) string {
for _, c := range doubleQuoteSpecialChars {
toReplace := "\\" + string(c)
if c == '\n' {
toReplace = `\n`
}
if c == '\r' {
toReplace = `\r`
}
line = strings.Replace(line, string(c), toReplace, -1)
}
return line
}

View File

@ -1,271 +0,0 @@
package godotenv
import (
"bytes"
"errors"
"fmt"
"regexp"
"strings"
"unicode"
)
const (
charComment = '#'
prefixSingleQuote = '\''
prefixDoubleQuote = '"'
exportPrefix = "export"
)
func parseBytes(src []byte, out map[string]string) error {
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
cutset := src
for {
cutset = getStatementStart(cutset)
if cutset == nil {
// reached end of file
break
}
key, left, err := locateKeyName(cutset)
if err != nil {
return err
}
value, left, err := extractVarValue(left, out)
if err != nil {
return err
}
out[key] = value
cutset = left
}
return nil
}
// getStatementPosition returns position of statement begin.
//
// It skips any comment line or non-whitespace character.
func getStatementStart(src []byte) []byte {
pos := indexOfNonSpaceChar(src)
if pos == -1 {
return nil
}
src = src[pos:]
if src[0] != charComment {
return src
}
// skip comment section
pos = bytes.IndexFunc(src, isCharFunc('\n'))
if pos == -1 {
return nil
}
return getStatementStart(src[pos:])
}
// locateKeyName locates and parses key name and returns rest of slice
func locateKeyName(src []byte) (key string, cutset []byte, err error) {
// trim "export" and space at beginning
src = bytes.TrimLeftFunc(src, isSpace)
if bytes.HasPrefix(src, []byte(exportPrefix)) {
trimmed := bytes.TrimPrefix(src, []byte(exportPrefix))
if bytes.IndexFunc(trimmed, isSpace) == 0 {
src = bytes.TrimLeftFunc(trimmed, isSpace)
}
}
// locate key name end and validate it in single loop
offset := 0
loop:
for i, char := range src {
rchar := rune(char)
if isSpace(rchar) {
continue
}
switch char {
case '=', ':':
// library also supports yaml-style value declaration
key = string(src[0:i])
offset = i + 1
break loop
case '_':
default:
// variable name should match [A-Za-z0-9_.]
if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) || rchar == '.' {
continue
}
return "", nil, fmt.Errorf(
`unexpected character %q in variable name near %q`,
string(char), string(src))
}
}
if len(src) == 0 {
return "", nil, errors.New("zero length string")
}
// trim whitespace
key = strings.TrimRightFunc(key, unicode.IsSpace)
cutset = bytes.TrimLeftFunc(src[offset:], isSpace)
return key, cutset, nil
}
// extractVarValue extracts variable value and returns rest of slice
func extractVarValue(src []byte, vars map[string]string) (value string, rest []byte, err error) {
quote, hasPrefix := hasQuotePrefix(src)
if !hasPrefix {
// unquoted value - read until end of line
endOfLine := bytes.IndexFunc(src, isLineEnd)
// Hit EOF without a trailing newline
if endOfLine == -1 {
endOfLine = len(src)
if endOfLine == 0 {
return "", nil, nil
}
}
// Convert line to rune away to do accurate countback of runes
line := []rune(string(src[0:endOfLine]))
// Assume end of line is end of var
endOfVar := len(line)
if endOfVar == 0 {
return "", src[endOfLine:], nil
}
// Work backwards to check if the line ends in whitespace then
// a comment (ie asdasd # some comment)
for i := endOfVar - 1; i >= 0; i-- {
if line[i] == charComment && i > 0 {
if isSpace(line[i-1]) {
endOfVar = i
break
}
}
}
trimmed := strings.TrimFunc(string(line[0:endOfVar]), isSpace)
return expandVariables(trimmed, vars), src[endOfLine:], nil
}
// lookup quoted string terminator
for i := 1; i < len(src); i++ {
if char := src[i]; char != quote {
continue
}
// skip escaped quote symbol (\" or \', depends on quote)
if prevChar := src[i-1]; prevChar == '\\' {
continue
}
// trim quotes
trimFunc := isCharFunc(rune(quote))
value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc))
if quote == prefixDoubleQuote {
// unescape newlines for double quote (this is compat feature)
// and expand environment variables
value = expandVariables(expandEscapes(value), vars)
}
return value, src[i+1:], nil
}
// return formatted error if quoted string is not terminated
valEndIndex := bytes.IndexFunc(src, isCharFunc('\n'))
if valEndIndex == -1 {
valEndIndex = len(src)
}
return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex])
}
func expandEscapes(str string) string {
out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string {
c := strings.TrimPrefix(match, `\`)
switch c {
case "n":
return "\n"
case "r":
return "\r"
default:
return match
}
})
return unescapeCharsRegex.ReplaceAllString(out, "$1")
}
func indexOfNonSpaceChar(src []byte) int {
return bytes.IndexFunc(src, func(r rune) bool {
return !unicode.IsSpace(r)
})
}
// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character
func hasQuotePrefix(src []byte) (prefix byte, isQuored bool) {
if len(src) == 0 {
return 0, false
}
switch prefix := src[0]; prefix {
case prefixDoubleQuote, prefixSingleQuote:
return prefix, true
default:
return 0, false
}
}
func isCharFunc(char rune) func(rune) bool {
return func(v rune) bool {
return v == char
}
}
// isSpace reports whether the rune is a space character but not line break character
//
// this differs from unicode.IsSpace, which also applies line break as space
func isSpace(r rune) bool {
switch r {
case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0:
return true
}
return false
}
func isLineEnd(r rune) bool {
if r == '\n' || r == '\r' {
return true
}
return false
}
var (
escapeRegex = regexp.MustCompile(`\\.`)
expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`)
unescapeCharsRegex = regexp.MustCompile(`\\([^$])`)
)
func expandVariables(v string, m map[string]string) string {
return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string {
submatch := expandVarRegex.FindStringSubmatch(s)
if submatch == nil {
return s
}
if submatch[1] == "\\" || submatch[2] == "(" {
return submatch[0][1:]
} else if submatch[4] != "" {
return m[submatch[4]]
}
return s
})
}

View File

@ -1,304 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2019 Klaus Post. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------------------
Files: gzhttp/*
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2017 The New York Times Company
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------
Files: s2/cmd/internal/readahead/*
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------------------
Files: snappy/*
Files: internal/snapref/*
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----------------
Files: s2/cmd/internal/filepathx/*
Copyright 2016 The filepathx Authors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@ -1,184 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
// - Literal insertions: Runs of one or more symbols are inserted into the data
// stream as is. This is accomplished through the writeByte method for a
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
// Any valid stream must start with a literal insertion if no preset dictionary
// is used.
//
// - Backward copies: Runs of one or more symbols are copied from previously
// emitted data. Backward copies come as the tuple (dist, length) where dist
// determines how far back in the stream to copy from and length determines how
// many bytes to copy. Note that it is valid for the length to be greater than
// the distance. Since LZ77 uses forward copies, that situation is used to
// perform a form of run-length encoding on repeated runs of symbols.
// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
// method call must be respected.
type dictDecoder struct {
hist []byte // Sliding window history
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
wrPos int // Current output position in buffer
rdPos int // Have emitted hist[:rdPos] already
full bool // Has a full window length been written yet?
}
// init initializes dictDecoder to have a sliding window dictionary of the given
// size. If a preset dict is provided, it will initialize the dictionary with
// the contents of dict.
func (dd *dictDecoder) init(size int, dict []byte) {
*dd = dictDecoder{hist: dd.hist}
if cap(dd.hist) < size {
dd.hist = make([]byte, size)
}
dd.hist = dd.hist[:size]
if len(dict) > len(dd.hist) {
dict = dict[len(dict)-len(dd.hist):]
}
dd.wrPos = copy(dd.hist, dict)
if dd.wrPos == len(dd.hist) {
dd.wrPos = 0
dd.full = true
}
dd.rdPos = dd.wrPos
}
// histSize reports the total amount of historical data in the dictionary.
func (dd *dictDecoder) histSize() int {
if dd.full {
return len(dd.hist)
}
return dd.wrPos
}
// availRead reports the number of bytes that can be flushed by readFlush.
func (dd *dictDecoder) availRead() int {
return dd.wrPos - dd.rdPos
}
// availWrite reports the available amount of output buffer space.
func (dd *dictDecoder) availWrite() int {
return len(dd.hist) - dd.wrPos
}
// writeSlice returns a slice of the available buffer to write data to.
//
// This invariant will be kept: len(s) <= availWrite()
func (dd *dictDecoder) writeSlice() []byte {
return dd.hist[dd.wrPos:]
}
// writeMark advances the writer pointer by cnt.
//
// This invariant must be kept: 0 <= cnt <= availWrite()
func (dd *dictDecoder) writeMark(cnt int) {
dd.wrPos += cnt
}
// writeByte writes a single byte to the dictionary.
//
// This invariant must be kept: 0 < availWrite()
func (dd *dictDecoder) writeByte(c byte) {
dd.hist[dd.wrPos] = c
dd.wrPos++
}
// writeCopy copies a string at a given (dist, length) to the output.
// This returns the number of bytes copied and may be less than the requested
// length if the available space in the output buffer is too small.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) writeCopy(dist, length int) int {
dstBase := dd.wrPos
dstPos := dstBase
srcPos := dstPos - dist
endPos := dstPos + length
if endPos > len(dd.hist) {
endPos = len(dd.hist)
}
// Copy non-overlapping section after destination position.
//
// This section is non-overlapping in that the copy length for this section
// is always less than or equal to the backwards distance. This can occur
// if a distance refers to data that wraps-around in the buffer.
// Thus, a backwards copy is performed here; that is, the exact bytes in
// the source prior to the copy is placed in the destination.
if srcPos < 0 {
srcPos += len(dd.hist)
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
srcPos = 0
}
// Copy possibly overlapping section before destination position.
//
// This section can overlap if the copy length for this section is larger
// than the backwards distance. This is allowed by LZ77 so that repeated
// strings can be succinctly represented using (dist, length) pairs.
// Thus, a forwards copy is performed here; that is, the bytes copied is
// possibly dependent on the resulting bytes in the destination as the copy
// progresses along. This is functionally equivalent to the following:
//
// for i := 0; i < endPos-dstPos; i++ {
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
// }
// dstPos = endPos
//
for dstPos < endPos {
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// tryWriteCopy tries to copy a string at a given (distance, length) to the
// output. This specialized version is optimized for short distances.
//
// This method is designed to be inlined for performance reasons.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
dstPos := dd.wrPos
endPos := dstPos + length
if dstPos < dist || endPos > len(dd.hist) {
return 0
}
dstBase := dstPos
srcPos := dstPos - dist
// Copy possibly overlapping section before destination position.
loop:
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
if dstPos < endPos {
goto loop // Avoid for-loop so that this function can be inlined
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// readFlush returns a slice of the historical buffer that is ready to be
// emitted to the user. The data returned by readFlush must be fully consumed
// before calling any other dictDecoder methods.
func (dd *dictDecoder) readFlush() []byte {
toRead := dd.hist[dd.rdPos:dd.wrPos]
dd.rdPos = dd.wrPos
if dd.wrPos == len(dd.hist) {
dd.wrPos, dd.rdPos = 0, 0
dd.full = true
}
return toRead
}

View File

@ -1,193 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Modified for deflate by Klaus Post (c) 2015.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"fmt"
)
type fastEnc interface {
Encode(dst *tokens, src []byte)
Reset()
}
func newFastEnc(level int) fastEnc {
switch level {
case 1:
return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
case 2:
return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
case 3:
return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
case 4:
return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
case 5:
return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
case 6:
return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
default:
panic("invalid level specified")
}
}
const (
tableBits = 15 // Bits used in the table
tableSize = 1 << tableBits // Size of the table
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
baseMatchOffset = 1 // The smallest match offset
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
maxMatchOffset = 1 << 15 // The largest match offset
bTableBits = 17 // Bits used in the big tables
bTableSize = 1 << bTableBits // Size of the table
allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
)
const (
prime3bytes = 506832829
prime4bytes = 2654435761
prime5bytes = 889523592379
prime6bytes = 227718039650203
prime7bytes = 58295818150454627
prime8bytes = 0xcf1bbcdcb7a56463
)
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
type tableEntry struct {
offset int32
}
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastGen struct {
hist []byte
cur int32
}
func (e *fastGen) addBlock(src []byte) int32 {
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.hist = make([]byte, 0, allocHistory)
} else {
if cap(e.hist) < maxMatchOffset*2 {
panic("unexpected buffer size")
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
// copy(e.hist[0:maxMatchOffset], e.hist[offset:])
*(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
type tableEntryPrev struct {
Cur tableEntry
Prev tableEntry
}
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
}
// hashLen returns a hash of the lowest mls bytes of with length output bits.
// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
// length should always be < 32.
// Preferably length and mls should be a constant for inlining.
func hashLen(u uint64, length, mls uint8) uint32 {
switch mls {
case 3:
return (uint32(u<<8) * prime3bytes) >> (32 - length)
case 5:
return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
case 6:
return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
case 7:
return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
case 8:
return uint32((u * prime8bytes) >> (64 - length))
default:
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
if debugDecode {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > maxMatchOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:s1], src[t:]))
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > maxMatchOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:], src[t:]))
}
// Reset the encoding table.
func (e *fastGen) Reset() {
if cap(e.hist) < allocHistory {
e.hist = make([]byte, 0, allocHistory)
}
// We offset current position so everything will be out of reach.
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
if e.cur <= bufferReset {
e.cur += maxMatchOffset + int32(len(e.hist))
}
e.hist = e.hist[:0]
}

View File

@ -1,417 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"math"
"math/bits"
)
const (
maxBitsLimit = 16
// number of valid literals
literalCount = 286
)
// hcode is a huffman code with a bit code and bit length.
type hcode uint32
func (h hcode) len() uint8 {
return uint8(h)
}
func (h hcode) code64() uint64 {
return uint64(h >> 8)
}
func (h hcode) zero() bool {
return h == 0
}
type huffmanEncoder struct {
codes []hcode
bitCount [17]int32
// Allocate a reusable buffer with the longest possible frequency table.
// Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
// The largest of these is literalCount, so we allocate for that case.
freqcache [literalCount + 1]literalNode
}
type literalNode struct {
literal uint16
freq uint16
}
// A levelInfo describes the state of the constructed tree for a given depth.
type levelInfo struct {
// Our level. for better printing
level int32
// The frequency of the last node at this level
lastFreq int32
// The frequency of the next character to add to this level
nextCharFreq int32
// The frequency of the next pair (from level below) to add to this level.
// Only valid if the "needed" value of the next lower level is 0.
nextPairFreq int32
// The number of chains remaining to generate for this level before moving
// up to the next level
needed int32
}
// set sets the code and length of an hcode.
func (h *hcode) set(code uint16, length uint8) {
*h = hcode(length) | (hcode(code) << 8)
}
func newhcode(code uint16, length uint8) hcode {
return hcode(length) | (hcode(code) << 8)
}
func reverseBits(number uint16, bitLength byte) uint16 {
return bits.Reverse16(number << ((16 - bitLength) & 15))
}
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
func newHuffmanEncoder(size int) *huffmanEncoder {
// Make capacity to next power of two.
c := uint(bits.Len32(uint32(size - 1)))
return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
}
// Generates a HuffmanCode corresponding to the fixed literal table
func generateFixedLiteralEncoding() *huffmanEncoder {
h := newHuffmanEncoder(literalCount)
codes := h.codes
var ch uint16
for ch = 0; ch < literalCount; ch++ {
var bits uint16
var size uint8
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
bits = ch + 48
size = 8
case ch < 256:
// size 9, 110010000 .. 111111111
bits = ch + 400 - 144
size = 9
case ch < 280:
// size 7, 0000000 .. 0010111
bits = ch - 256
size = 7
default:
// size 8, 11000000 .. 11000111
bits = ch + 192 - 280
size = 8
}
codes[ch] = newhcode(reverseBits(bits, size), size)
}
return h
}
func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
}
return h
}
var fixedLiteralEncoding = generateFixedLiteralEncoding()
var fixedOffsetEncoding = generateFixedOffsetEncoding()
func (h *huffmanEncoder) bitLength(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
total += int(f) * int(h.codes[i].len())
}
}
return total
}
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
total += int(h.codes[f].len())
}
return total
}
// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
code := h.codes[i]
if code.zero() {
return math.MaxInt32
}
total += int(f) * int(code.len())
}
}
return total
}
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list An array of the literals with non-zero frequencies
//
// and their associated frequencies. The array is in order of increasing
// frequency, and has as its last element a special element with frequency
// MaxInt32
//
// maxBits The maximum number of bits that should be used to encode any literal.
//
// Must be less than 16.
//
// return An integer array in which array[i] indicates the number of literals
//
// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
}
n := int32(len(list))
list = list[0 : n+1]
list[n] = maxNode()
// The tree can't have greater depth than n - 1, no matter what. This
// saves a little bit of work in some small cases
if maxBits > n-1 {
maxBits = n - 1
}
// Create information about each of the levels.
// A bogus "Level 0" whose sole purpose is so that
// level1.prev.needed==0. This makes level1.nextPairFreq
// be a legitimate value that never gets chosen.
var levels [maxBitsLimit]levelInfo
// leafCounts[i] counts the number of literals at the left
// of ancestors of the rightmost node at level i.
// leafCounts[i][j] is the number of literals at the left
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
// Descending to only have 1 bounds check.
l2f := int32(list[2].freq)
l1f := int32(list[1].freq)
l0f := int32(list[0].freq) + int32(list[1].freq)
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
lastFreq: l1f,
nextCharFreq: l2f,
nextPairFreq: l0f,
}
leafCounts[level][level] = 2
if level == 1 {
levels[level].nextPairFreq = math.MaxInt32
}
}
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
level := uint32(maxBits)
for level < 16 {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leafs and pairs.
// End all calculations for this level.
// To make sure we never come back to this level or any lower level,
// set nextPairFreq impossibly large.
l.needed = 0
levels[level+1].nextPairFreq = math.MaxInt32
level++
continue
}
prevFreq := l.lastFreq
if l.nextCharFreq < l.nextPairFreq {
// The next item on this row is a leaf node.
n := leafCounts[level][level] + 1
l.lastFreq = l.nextCharFreq
// Lower leafCounts are the same of the previous node.
leafCounts[level][level] = n
e := list[n]
if e.literal < math.MaxUint16 {
l.nextCharFreq = int32(e.freq)
} else {
l.nextCharFreq = math.MaxInt32
}
} else {
// The next item on this row is a pair from the previous row.
// nextPairFreq isn't valid until we generate two
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
if true {
save := leafCounts[level][level]
leafCounts[level] = leafCounts[level-1]
leafCounts[level][level] = save
} else {
copy(leafCounts[level][:level], leafCounts[level-1][:level])
}
levels[l.level-1].needed = 2
}
if l.needed--; l.needed == 0 {
// We've done everything we need to do for this level.
// Continue calculating one level up. Fill in nextPairFreq
// of that level with the sum of the two nodes we've just calculated on
// this level.
if l.level == maxBits {
// All done!
break
}
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
level++
} else {
// If we stole from below, move down temporarily to replenish it.
for levels[level-1].needed > 0 {
level--
}
}
}
// Somethings is wrong if at the end, the top level is null or hasn't used
// all of the leaves.
if leafCounts[maxBits][maxBits] != n {
panic("leafCounts[maxBits][maxBits] != n")
}
bitCount := h.bitCount[:maxBits+1]
bits := 1
counts := &leafCounts[maxBits]
for level := maxBits; level > 0; level-- {
// chain.leafCount gives the number of literals requiring at least "bits"
// bits to encode.
bitCount[bits] = counts[level] - counts[level-1]
bits++
}
return bitCount
}
// Look at the leaves and assign them a bit count and an encoding as specified
// in RFC 1951 3.2.2
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
code := uint16(0)
for n, bits := range bitCount {
code <<= 1
if n == 0 || bits == 0 {
continue
}
// The literals list[len(list)-bits] .. list[len(list)-bits]
// are encoded using "bits" bits, and get the values
// code, code + 1, .... The code values are
// assigned in literal order (not frequency order).
chunk := list[len(list)-int(bits):]
sortByLiteral(chunk)
for _, node := range chunk {
h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
code++
}
list = list[0 : len(list)-int(bits)]
}
}
// Update this Huffman Code object to be the minimum code for the specified frequency count.
//
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list := h.freqcache[:len(freq)+1]
codes := h.codes[:len(freq)]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
for i, f := range freq {
if f != 0 {
list[count] = literalNode{uint16(i), f}
count++
} else {
codes[i] = 0
}
}
list[count] = literalNode{}
list = list[:count]
if count <= 2 {
// Handle the small cases here, because they are awkward for the general case code. With
// two or fewer literals, everything has bit length 1.
for i, node := range list {
// "list" is in order of increasing literal value.
h.codes[node.literal].set(uint16(i), 1)
}
return
}
sortByFreq(list)
// Get the number of literals for each bit count
bitCount := h.bitCounts(list, maxBits)
// And do the assignment
h.assignEncodingAndSize(bitCount, list)
}
// atLeastOne clamps the result between 1 and 15.
func atLeastOne(v float32) float32 {
if v < 1 {
return 1
}
if v > 15 {
return 15
}
return v
}
func histogram(b []byte, h []uint16) {
if true && len(b) >= 8<<10 {
// Split for bigger inputs
histogramSplit(b, h)
} else {
h = h[:256]
for _, t := range b {
h[t]++
}
}
}
func histogramSplit(b []byte, h []uint16) {
// Tested, and slightly faster than 2-way.
// Writing to separate arrays and combining is also slightly slower.
h = h[:256]
for len(b)&3 != 0 {
h[b[0]]++
b = b[1:]
}
n := len(b) / 4
x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
for i, t := range x {
v0 := &h[t]
v1 := &h[y[i]]
v3 := &h[w[i]]
v2 := &h[z[i]]
*v0++
*v1++
*v2++
*v3++
}
}

View File

@ -1,159 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// Sort sorts data.
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
// data.Less and data.Swap. The sort is not guaranteed to be stable.
func sortByFreq(data []literalNode) {
n := len(data)
quickSortByFreq(data, 0, n, maxDepth(n))
}
func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
for b-a > 12 { // Use ShellSort for slices <= 12 elements
if maxDepth == 0 {
heapSort(data, a, b)
return
}
maxDepth--
mlo, mhi := doPivotByFreq(data, a, b)
// Avoiding recursion on the larger subproblem guarantees
// a stack depth of at most lg(b-a).
if mlo-a < b-mhi {
quickSortByFreq(data, a, mlo, maxDepth)
a = mhi // i.e., quickSortByFreq(data, mhi, b)
} else {
quickSortByFreq(data, mhi, b, maxDepth)
b = mlo // i.e., quickSortByFreq(data, a, mlo)
}
}
if b-a > 1 {
// Do ShellSort pass with gap 6
// It could be written in this simplified form cause b-a <= 12
for i := a + 6; i < b; i++ {
if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
data[i], data[i-6] = data[i-6], data[i]
}
}
insertionSortByFreq(data, a, b)
}
}
func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
// Tukey's ``Ninther,'' median of three medians of three.
s := (hi - lo) / 8
medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
medianOfThreeSortByFreq(data, m, m-s, m+s)
medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
}
medianOfThreeSortByFreq(data, lo, m, hi-1)
// Invariants are:
// data[lo] = pivot (set up by ChoosePivot)
// data[lo < i < a] < pivot
// data[a <= i < b] <= pivot
// data[b <= i < c] unexamined
// data[c <= i < hi-1] > pivot
// data[hi-1] >= pivot
pivot := lo
a, c := lo+1, hi-1
for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
}
b := a
for {
for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
}
for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
}
if b >= c {
break
}
// data[b] > pivot; data[c-1] <= pivot
data[b], data[c-1] = data[c-1], data[b]
b++
c--
}
// If hi-c<3 then there are duplicates (by property of median of nine).
// Let's be a bit more conservative, and set border to 5.
protect := hi-c < 5
if !protect && hi-c < (hi-lo)/4 {
// Lets test some points for equality to pivot
dups := 0
if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
data[c], data[hi-1] = data[hi-1], data[c]
c++
dups++
}
if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
b--
dups++
}
// m-lo = (hi-lo)/2 > 6
// b-lo > (hi-lo)*3/4-1 > 8
// ==> m < b ==> data[m] <= pivot
if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
data[m], data[b-1] = data[b-1], data[m]
b--
dups++
}
// if at least 2 points are equal to pivot, assume skewed distribution
protect = dups > 1
}
if protect {
// Protect against a lot of duplicates
// Add invariant:
// data[a <= i < b] unexamined
// data[b <= i < c] = pivot
for {
for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
}
for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
}
if a >= b {
break
}
// data[a] == pivot; data[b-1] < pivot
data[a], data[b-1] = data[b-1], data[a]
a++
b--
}
}
// Swap pivot into middle
data[pivot], data[b-1] = data[b-1], data[pivot]
return b - 1, c
}
// Insertion sort
func insertionSortByFreq(data []literalNode, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// quickSortByFreq, loosely following Bentley and McIlroy,
// ``Engineering a Sort Function,'' SP&E November 1993.
// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
// sort 3 elements
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
data[m1], data[m0] = data[m0], data[m1]
}
// data[m0] <= data[m1]
if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
data[m2], data[m1] = data[m1], data[m2]
// data[m0] <= data[m2] && data[m1] < data[m2]
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
data[m1], data[m0] = data[m0], data[m1]
}
}
// now data[m0] <= data[m1] <= data[m2]
}

View File

@ -1,201 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// Sort sorts data.
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
// data.Less and data.Swap. The sort is not guaranteed to be stable.
func sortByLiteral(data []literalNode) {
n := len(data)
quickSort(data, 0, n, maxDepth(n))
}
func quickSort(data []literalNode, a, b, maxDepth int) {
for b-a > 12 { // Use ShellSort for slices <= 12 elements
if maxDepth == 0 {
heapSort(data, a, b)
return
}
maxDepth--
mlo, mhi := doPivot(data, a, b)
// Avoiding recursion on the larger subproblem guarantees
// a stack depth of at most lg(b-a).
if mlo-a < b-mhi {
quickSort(data, a, mlo, maxDepth)
a = mhi // i.e., quickSort(data, mhi, b)
} else {
quickSort(data, mhi, b, maxDepth)
b = mlo // i.e., quickSort(data, a, mlo)
}
}
if b-a > 1 {
// Do ShellSort pass with gap 6
// It could be written in this simplified form cause b-a <= 12
for i := a + 6; i < b; i++ {
if data[i].literal < data[i-6].literal {
data[i], data[i-6] = data[i-6], data[i]
}
}
insertionSort(data, a, b)
}
}
func heapSort(data []literalNode, a, b int) {
first := a
lo := 0
hi := b - a
// Build heap with greatest element at top.
for i := (hi - 1) / 2; i >= 0; i-- {
siftDown(data, i, hi, first)
}
// Pop elements, largest first, into end of data.
for i := hi - 1; i >= 0; i-- {
data[first], data[first+i] = data[first+i], data[first]
siftDown(data, lo, i, first)
}
}
// siftDown implements the heap property on data[lo, hi).
// first is an offset into the array where the root of the heap lies.
func siftDown(data []literalNode, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
child++
}
if data[first+root].literal > data[first+child].literal {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
// Tukey's ``Ninther,'' median of three medians of three.
s := (hi - lo) / 8
medianOfThree(data, lo, lo+s, lo+2*s)
medianOfThree(data, m, m-s, m+s)
medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
}
medianOfThree(data, lo, m, hi-1)
// Invariants are:
// data[lo] = pivot (set up by ChoosePivot)
// data[lo < i < a] < pivot
// data[a <= i < b] <= pivot
// data[b <= i < c] unexamined
// data[c <= i < hi-1] > pivot
// data[hi-1] >= pivot
pivot := lo
a, c := lo+1, hi-1
for ; a < c && data[a].literal < data[pivot].literal; a++ {
}
b := a
for {
for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
}
for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
}
if b >= c {
break
}
// data[b] > pivot; data[c-1] <= pivot
data[b], data[c-1] = data[c-1], data[b]
b++
c--
}
// If hi-c<3 then there are duplicates (by property of median of nine).
// Let's be a bit more conservative, and set border to 5.
protect := hi-c < 5
if !protect && hi-c < (hi-lo)/4 {
// Lets test some points for equality to pivot
dups := 0
if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
data[c], data[hi-1] = data[hi-1], data[c]
c++
dups++
}
if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
b--
dups++
}
// m-lo = (hi-lo)/2 > 6
// b-lo > (hi-lo)*3/4-1 > 8
// ==> m < b ==> data[m] <= pivot
if data[m].literal > data[pivot].literal { // data[m] = pivot
data[m], data[b-1] = data[b-1], data[m]
b--
dups++
}
// if at least 2 points are equal to pivot, assume skewed distribution
protect = dups > 1
}
if protect {
// Protect against a lot of duplicates
// Add invariant:
// data[a <= i < b] unexamined
// data[b <= i < c] = pivot
for {
for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
}
for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
}
if a >= b {
break
}
// data[a] == pivot; data[b-1] < pivot
data[a], data[b-1] = data[b-1], data[a]
a++
b--
}
}
// Swap pivot into middle
data[pivot], data[b-1] = data[b-1], data[pivot]
return b - 1, c
}
// Insertion sort
func insertionSort(data []literalNode, a, b int) {
for i := a + 1; i < b; i++ {
for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
data[j], data[j-1] = data[j-1], data[j]
}
}
}
// maxDepth returns a threshold at which quicksort should switch
// to heapsort. It returns 2*ceil(lg(n+1)).
func maxDepth(n int) int {
var depth int
for i := n; i > 0; i >>= 1 {
depth++
}
return depth * 2
}
// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
func medianOfThree(data []literalNode, m1, m0, m2 int) {
// sort 3 elements
if data[m1].literal < data[m0].literal {
data[m1], data[m0] = data[m0], data[m1]
}
// data[m0] <= data[m1]
if data[m2].literal < data[m1].literal {
data[m2], data[m1] = data[m1], data[m2]
// data[m0] <= data[m2] && data[m1] < data[m2]
if data[m1].literal < data[m0].literal {
data[m1], data[m0] = data[m0], data[m1]
}
}
// now data[m0] <= data[m1] <= data[m2]
}

View File

@ -1,829 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package flate implements the DEFLATE compressed data format, described in
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
// formats.
package flate
import (
"bufio"
"compress/flate"
"fmt"
"io"
"math/bits"
"sync"
)
const (
maxCodeLen = 16 // max length of Huffman code
maxCodeLenMask = 15 // mask for max length of Huffman code
// The next three numbers come from the RFC section 3.2.7, with the
// additional proviso in section 3.2.5 which implies that distance codes
// 30 and 31 should never occur in compressed data.
maxNumLit = 286
maxNumDist = 30
numCodes = 19 // number of codes in Huffman meta-code
debugDecode = false
)
// Value of length - 3 and extra bits.
type lengthExtra struct {
length, extra uint8
}
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
var bitMask32 = [32]uint32{
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
} // up to 32 bits
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
// A CorruptInputError reports the presence of corrupt input at a given offset.
type CorruptInputError = flate.CorruptInputError
// An InternalError reports an error in the flate code itself.
type InternalError string
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
// A ReadError reports an error encountered while reading input.
//
// Deprecated: No longer returned.
type ReadError = flate.ReadError
// A WriteError reports an error encountered while writing output.
//
// Deprecated: No longer returned.
type WriteError = flate.WriteError
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
// Reset discards any buffered data and resets the Resetter as if it was
// newly initialized with the given reader.
Reset(r io.Reader, dict []byte) error
}
// The data structure for decoding Huffman tables is based on that of
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
// For codes smaller than the table width, there are multiple entries
// (each combination of trailing bits has the same value). For codes
// larger than the table width, the table contains a link to an overflow
// table. The width of each entry in the link table is the maximum code
// size minus the chunk width.
//
// Note that you can do a lookup in the table even without all bits
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
// have the property that shorter codes come before longer ones, the
// bit length estimate in the result is a lower bound on the actual
// number of bits.
//
// See the following:
// http://www.gzip.org/algorithm.txt
// chunk & 15 is number of bits
// chunk >> 4 is value, including table link
const (
huffmanChunkBits = 9
huffmanNumChunks = 1 << huffmanChunkBits
huffmanCountMask = 15
huffmanValueShift = 4
)
type huffmanDecoder struct {
maxRead int // the maximum number of bits we can read and not overread
chunks *[huffmanNumChunks]uint16 // chunks as described above
links [][]uint16 // overflow links
linkMask uint32 // mask the width of the link table
}
// Initialize Huffman decoding tables from array of code lengths.
// Following this function, h is guaranteed to be initialized into a complete
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted.
func (h *huffmanDecoder) init(lengths []int) bool {
// Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests.
const sanity = false
if h.chunks == nil {
h.chunks = new([huffmanNumChunks]uint16)
}
if h.maxRead != 0 {
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
}
// Count number of codes of each length,
// compute maxRead and max length.
var count [maxCodeLen]int
var min, max int
for _, n := range lengths {
if n == 0 {
continue
}
if min == 0 || n < min {
min = n
}
if n > max {
max = n
}
count[n&maxCodeLenMask]++
}
// Empty tree. The decompressor.huffSym function will fail later if the tree
// is used. Technically, an empty tree is only valid for the HDIST tree and
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
// is guaranteed to fail since it will attempt to use the tree to decode the
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
// guaranteed to fail later since the compressed data section must be
// composed of at least one symbol (the end-of-block marker).
if max == 0 {
return true
}
code := 0
var nextcode [maxCodeLen]int
for i := min; i <= max; i++ {
code <<= 1
nextcode[i&maxCodeLenMask] = code
code += count[i&maxCodeLenMask]
}
// Check that the coding is complete (i.e., that we've
// assigned all 2-to-the-max possible bit sequences).
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
if debugDecode {
fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
}
return false
}
h.maxRead = min
chunks := h.chunks[:]
for i := range chunks {
chunks[i] = 0
}
if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1)
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
if cap(h.links) < huffmanNumChunks-link {
h.links = make([][]uint16, huffmanNumChunks-link)
} else {
h.links = h.links[:huffmanNumChunks-link]
}
for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(bits.Reverse16(uint16(j)))
reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link)
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
if cap(h.links[off]) < numLinks {
h.links[off] = make([]uint16, numLinks)
} else {
h.links[off] = h.links[off][:numLinks]
}
}
} else {
h.links = h.links[:0]
}
for i, n := range lengths {
if n == 0 {
continue
}
code := nextcode[n]
nextcode[n]++
chunk := uint16(i<<huffmanValueShift | n)
reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
// lower 4 "count" bits should be
// between 1 and 15.
if sanity && h.chunks[off] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[off] = chunk
}
} else {
j := reverse & (huffmanNumChunks - 1)
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
// Longer codes should have been
// associated with a link table above.
panic("impossible: not an indirect chunk")
}
value := h.chunks[j] >> huffmanValueShift
linktab := h.links[value]
reverse >>= huffmanChunkBits
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
if sanity && linktab[off] != 0 {
panic("impossible: overwriting existing chunk")
}
linktab[off] = chunk
}
}
}
if sanity {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
for i, chunk := range h.chunks {
if chunk == 0 {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
if code == 1 && i%2 == 1 {
continue
}
panic("impossible: missing chunk")
}
}
for _, linktab := range h.links {
for _, chunk := range linktab {
if chunk == 0 {
panic("impossible: missing chunk")
}
}
}
}
return true
}
// Reader is the actual read interface needed by NewReader.
// If the passed in io.Reader does not also have ReadByte,
// the NewReader will introduce its own buffering.
type Reader interface {
io.Reader
io.ByteReader
}
type step uint8
const (
copyData step = iota + 1
nextBlock
huffmanBytesBuffer
huffmanBytesReader
huffmanBufioReader
huffmanStringsReader
huffmanGenericReader
)
// Decompress state.
type decompressor struct {
// Input source.
r Reader
roffset int64
// Huffman decoders for literal/length, distance.
h1, h2 huffmanDecoder
// Length arrays used to define Huffman codes.
bits *[maxNumLit + maxNumDist]int
codebits *[numCodes]int
// Output history, buffer.
dict dictDecoder
// Next step in the decompression,
// and decompression state.
step step
stepState int
err error
toRead []byte
hl, hd *huffmanDecoder
copyLen int
copyDist int
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Input bits, in top of b.
b uint32
nb uint
final bool
}
func (f *decompressor) nextBlock() {
for f.nb < 1+2 {
if f.err = f.moreBits(); f.err != nil {
return
}
}
f.final = f.b&1 == 1
f.b >>= 1
typ := f.b & 3
f.b >>= 2
f.nb -= 1 + 2
switch typ {
case 0:
f.dataBlock()
if debugDecode {
fmt.Println("stored block")
}
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlockDecoder()
if debugDecode {
fmt.Println("predefinied huffman block")
}
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
break
}
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlockDecoder()
if debugDecode {
fmt.Println("dynamic huffman block")
}
default:
// 3 is reserved.
if debugDecode {
fmt.Println("reserved data block encountered")
}
f.err = CorruptInputError(f.roffset)
}
}
func (f *decompressor) Read(b []byte) (int, error) {
for {
if len(f.toRead) > 0 {
n := copy(b, f.toRead)
f.toRead = f.toRead[n:]
if len(f.toRead) == 0 {
return n, f.err
}
return n, nil
}
if f.err != nil {
return 0, f.err
}
f.doStep()
if f.err != nil && len(f.toRead) == 0 {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
}
}
}
// WriteTo implements the io.WriteTo interface for io.Copy and friends.
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
total := int64(0)
flushed := false
for {
if len(f.toRead) > 0 {
n, err := w.Write(f.toRead)
total += int64(n)
if err != nil {
f.err = err
return total, err
}
if n != len(f.toRead) {
return total, io.ErrShortWrite
}
f.toRead = f.toRead[:0]
}
if f.err != nil && flushed {
if f.err == io.EOF {
return total, nil
}
return total, f.err
}
if f.err == nil {
f.doStep()
}
if len(f.toRead) == 0 && f.err != nil && !flushed {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
flushed = true
}
}
}
func (f *decompressor) Close() error {
if f.err == io.EOF {
return nil
}
return f.err
}
// RFC 1951 section 3.2.7.
// Compression with dynamic Huffman codes
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
func (f *decompressor) readHuffman() error {
// HLIT[5], HDIST[5], HCLEN[4].
for f.nb < 5+5+4 {
if err := f.moreBits(); err != nil {
return err
}
}
nlit := int(f.b&0x1F) + 257
if nlit > maxNumLit {
if debugDecode {
fmt.Println("nlit > maxNumLit", nlit)
}
return CorruptInputError(f.roffset)
}
f.b >>= 5
ndist := int(f.b&0x1F) + 1
if ndist > maxNumDist {
if debugDecode {
fmt.Println("ndist > maxNumDist", ndist)
}
return CorruptInputError(f.roffset)
}
f.b >>= 5
nclen := int(f.b&0xF) + 4
// numCodes is 19, so nclen is always valid.
f.b >>= 4
f.nb -= 5 + 5 + 4
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
for i := 0; i < nclen; i++ {
for f.nb < 3 {
if err := f.moreBits(); err != nil {
return err
}
}
f.codebits[codeOrder[i]] = int(f.b & 0x7)
f.b >>= 3
f.nb -= 3
}
for i := nclen; i < len(codeOrder); i++ {
f.codebits[codeOrder[i]] = 0
}
if !f.h1.init(f.codebits[0:]) {
if debugDecode {
fmt.Println("init codebits failed")
}
return CorruptInputError(f.roffset)
}
// HLIT + 257 code lengths, HDIST + 1 code lengths,
// using the code length Huffman code.
for i, n := 0, nlit+ndist; i < n; {
x, err := f.huffSym(&f.h1)
if err != nil {
return err
}
if x < 16 {
// Actual length.
f.bits[i] = x
i++
continue
}
// Repeat previous length or zero.
var rep int
var nb uint
var b int
switch x {
default:
return InternalError("unexpected length code")
case 16:
rep = 3
nb = 2
if i == 0 {
if debugDecode {
fmt.Println("i==0")
}
return CorruptInputError(f.roffset)
}
b = f.bits[i-1]
case 17:
rep = 3
nb = 3
b = 0
case 18:
rep = 11
nb = 7
b = 0
}
for f.nb < nb {
if err := f.moreBits(); err != nil {
if debugDecode {
fmt.Println("morebits:", err)
}
return err
}
}
rep += int(f.b & uint32(1<<(nb&regSizeMaskUint32)-1))
f.b >>= nb & regSizeMaskUint32
f.nb -= nb
if i+rep > n {
if debugDecode {
fmt.Println("i+rep > n", i, rep, n)
}
return CorruptInputError(f.roffset)
}
for j := 0; j < rep; j++ {
f.bits[i] = b
i++
}
}
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
if debugDecode {
fmt.Println("init2 failed")
}
return CorruptInputError(f.roffset)
}
// As an optimization, we can initialize the maxRead bits to read at a time
// for the HLIT tree to the length of the EOB marker since we know that
// every block must terminate with one. This preserves the property that
// we never read any extra bytes after the end of the DEFLATE stream.
if f.h1.maxRead < f.bits[endBlockMarker] {
f.h1.maxRead = f.bits[endBlockMarker]
}
if !f.final {
// If not the final block, the smallest block possible is
// a predefined table, BTYPE=01, with a single EOB marker.
// This will take up 3 + 7 bits.
f.h1.maxRead += 10
}
return nil
}
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
// Discard current half-byte.
left := (f.nb) & 7
f.nb -= left
f.b >>= left
offBytes := f.nb >> 3
// Unfilled values will be overwritten.
f.buf[0] = uint8(f.b)
f.buf[1] = uint8(f.b >> 8)
f.buf[2] = uint8(f.b >> 16)
f.buf[3] = uint8(f.b >> 24)
f.roffset += int64(offBytes)
f.nb, f.b = 0, 0
// Length then ones-complement of length.
nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
f.roffset += int64(nr)
if err != nil {
f.err = noEOF(err)
return
}
n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
if nn != ^n {
if debugDecode {
ncomp := ^n
fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
}
f.err = CorruptInputError(f.roffset)
return
}
if n == 0 {
f.toRead = f.dict.readFlush()
f.finishBlock()
return
}
f.copyLen = int(n)
f.copyData()
}
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
// It pauses for reads when f.hist is full.
func (f *decompressor) copyData() {
buf := f.dict.writeSlice()
if len(buf) > f.copyLen {
buf = buf[:f.copyLen]
}
cnt, err := io.ReadFull(f.r, buf)
f.roffset += int64(cnt)
f.copyLen -= cnt
f.dict.writeMark(cnt)
if err != nil {
f.err = noEOF(err)
return
}
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = copyData
return
}
f.finishBlock()
}
func (f *decompressor) finishBlock() {
if f.final {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.err = io.EOF
}
f.step = nextBlock
}
func (f *decompressor) doStep() {
switch f.step {
case copyData:
f.copyData()
case nextBlock:
f.nextBlock()
case huffmanBytesBuffer:
f.huffmanBytesBuffer()
case huffmanBytesReader:
f.huffmanBytesReader()
case huffmanBufioReader:
f.huffmanBufioReader()
case huffmanStringsReader:
f.huffmanStringsReader()
case huffmanGenericReader:
f.huffmanGenericReader()
default:
panic("BUG: unexpected step state")
}
}
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
func noEOF(e error) error {
if e == io.EOF {
return io.ErrUnexpectedEOF
}
return e
}
func (f *decompressor) moreBits() error {
c, err := f.r.ReadByte()
if err != nil {
return noEOF(err)
}
f.roffset++
f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
f.nb += 8
return nil
}
// Read the next Huffman-encoded symbol from f according to h.
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(h.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for {
for nb < n {
c, err := f.r.ReadByte()
if err != nil {
f.b = b
f.nb = nb
return 0, noEOF(err)
}
f.roffset++
b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := h.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= nb {
if n == 0 {
f.b = b
f.nb = nb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return 0, f.err
}
f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
return int(chunk >> huffmanValueShift), nil
}
}
}
func makeReader(r io.Reader) Reader {
if rr, ok := r.(Reader); ok {
return rr
}
return bufio.NewReader(r)
}
func fixedHuffmanDecoderInit() {
fixedOnce.Do(func() {
// These come from the RFC section 3.2.6.
var bits [288]int
for i := 0; i < 144; i++ {
bits[i] = 8
}
for i := 144; i < 256; i++ {
bits[i] = 9
}
for i := 256; i < 280; i++ {
bits[i] = 7
}
for i := 280; i < 288; i++ {
bits[i] = 8
}
fixedHuffmanDecoder.init(bits[:])
})
}
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
*f = decompressor{
r: makeReader(r),
bits: f.bits,
codebits: f.codebits,
h1: f.h1,
h2: f.h2,
dict: f.dict,
step: nextBlock,
}
f.dict.init(maxMatchOffset, dict)
return nil
}
// NewReader returns a new ReadCloser that can be used
// to read the uncompressed version of r.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser
// when finished reading.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, nil)
return &f
}
// NewReaderDict is like NewReader but initializes the reader
// with a preset dictionary. The returned Reader behaves as if
// the uncompressed data stream started with the given dictionary,
// which has already been read. NewReaderDict is typically used
// to read data compressed by NewWriterDict.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, dict)
return &f
}

View File

@ -1,241 +0,0 @@
package flate
import (
"encoding/binary"
"fmt"
"math/bits"
)
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL1 struct {
fastGen
table [tableSize]tableEntry
}
// EncodeL1 uses a similar algorithm to level 1
func (e *fastEncL1) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, tableBits, hashBytes)
candidate = e.table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hashLen(now, tableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
now >>= 8
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
cv = now
s = nextS
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
var l = int32(4)
if false {
l = e.matchlenLong(s+4, t+4, src) + 4
} else {
// inlined:
a := src[s+4:]
b := src[t+4:]
for len(a) >= 8 {
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
l += int32(bits.TrailingZeros64(diff) >> 3)
break
}
l += 8
a = a[8:]
b = b[8:]
}
if len(a) < 8 {
b = b[:len(a)]
for i := range a {
if a[i] != b[i] {
break
}
l++
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
// Save the match found
if false {
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
} else {
// Inlined...
xoffset := uint32(s - t - baseMatchOffset)
xlength := l
oc := offsetCode(xoffset)
xoffset |= oc << 16
for xlength > 0 {
xl := xlength
if xl > 258 {
if xl > 258+baseMatchLength {
xl = 258
} else {
xl = 258 - baseMatchLength
}
}
xlength -= xl
xl -= baseMatchLength
dst.extraHist[lengthCodes1[uint8(xl)]]++
dst.offHist[oc]++
dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
dst.n++
}
}
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+l+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
x >>= 16
currHash := hashLen(x, tableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
cv = x >> 8
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View File

@ -1,214 +0,0 @@
package flate
import "fmt"
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type fastEncL2 struct {
fastGen
table [bTableSize]tableEntry
}
// EncodeL2 uses a similar algorithm to level 1, but is capable
// of matching across blocks giving better compression at a small slowdown.
func (e *fastEncL2) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
// When should we start skipping if we haven't found matches in a long while.
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, bTableBits, hashBytes)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidate = e.table[nextHash]
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
nextHash = hashLen(now, bTableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
now >>= 8
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
cv = now
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
l := e.matchlenLong(s+4, t+4, src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+l+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, i)
nextHash := hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one
x >>= 16
nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
// Skip one
x >>= 16
nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 to s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
prevHash := hashLen(x, bTableBits, hashBytes)
prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
e.table[prevHash2] = tableEntry{offset: o + 1}
currHash := hashLen(x>>16, bTableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
cv = x >> 24
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View File

@ -1,241 +0,0 @@
package flate
import "fmt"
// fastEncL3
type fastEncL3 struct {
fastGen
table [1 << 16]tableEntryPrev
}
// Encode uses a similar algorithm to level 2, will check up to two candidates.
func (e *fastEncL3) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
tableBits = 16
tableSize = 1 << tableBits
hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
}
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
e.table[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// Skip if too small.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 7
nextS := s
var candidate tableEntry
for {
nextHash := hashLen(cv, tableBits, hashBytes)
s = nextS
nextS = s + 1 + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidates := e.table[nextHash]
now := load6432(src, nextS)
// Safe offset distance until s + 4...
minOffset := e.cur + s - (maxMatchOffset - 4)
e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
// Check both candidates
candidate = candidates.Cur
if candidate.offset < minOffset {
cv = now
// Previous will also be invalid, we have nothing.
continue
}
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
break
}
// Both match and are valid, pick longest.
offset := s - (candidate.offset - e.cur)
o2 := s - (candidates.Prev.offset - e.cur)
l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
if l2 > l1 {
candidate = candidates.Prev
}
break
} else {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
}
cv = now
}
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
//
t := candidate.offset - e.cur
l := e.matchlenLong(s+4, t+4, src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
t += l
// Index first pair after match end.
if int(t+8) < len(src) && t > 0 {
cv = load6432(src, t)
nextHash := hashLen(cv, tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + t},
}
}
goto emitRemainder
}
// Store every 5th hash in-between.
for i := s - l + 2; i < s-5; i += 6 {
nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + i}}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 to s.
x := load6432(src, s-2)
prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 2},
}
x >>= 8
prevHash = hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 1},
}
x >>= 8
currHash := hashLen(x, tableBits, hashBytes)
candidates := e.table[currHash]
cv = x
e.table[currHash] = tableEntryPrev{
Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur},
}
// Check both candidates
candidate = candidates.Cur
minOffset := e.cur + s - (maxMatchOffset - 4)
if candidate.offset > minOffset {
if uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Found a match...
continue
}
candidate = candidates.Prev
if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
// Match at prev...
continue
}
}
cv = x >> 8
s++
break
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View File

@ -1,221 +0,0 @@
package flate
import "fmt"
type fastEncL4 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntry
}
func (e *fastEncL4) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntry{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.bTable[i].offset = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
e.bTable[nextHashL] = entry
t = lCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
// We got a long match. Use that.
break
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
lCandidate = e.bTable[hash7(next, tableBits)]
// If the next long is a candidate, check if we should use that instead...
lOff := nextS - (lCandidate.offset - e.cur)
if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
if l2 > l1 {
s = nextS
t = lCandidate.offset - e.cur
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Extend the 4-byte match as long as possible.
l := e.matchlenLong(s+4, t+4, src) + 4
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic("s-t")
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index first pair after match end.
if int(s+8) < len(src) {
cv := load6432(src, s)
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
// Store every 3rd hash in-between
if true {
i := nextS
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
i += 3
for ; i < s-1; i += 3 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
e.bTable[prevHashL] = tableEntry{offset: o}
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View File

@ -1,708 +0,0 @@
package flate
import "fmt"
type fastEncL5 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL5) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
t2 := eLong - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// Store every 3rd hash in-between.
if true {
const hashEvery = 3
i := s - l + 1
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// Do an long at i+1
cv >>= 8
t = tableEntry{offset: t.offset + 1}
eLong = &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
for ; i < s-1; i += hashEvery {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// fastEncL5Window is a level 5 encoder,
// but with a custom window size.
type fastEncL5Window struct {
hist []byte
cur int32
maxOffset int32
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
maxMatchOffset := e.maxOffset
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
for {
const skipLog = 6
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// If the next long is a candidate, use that...
t2 := lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
if l == 0 {
// Extend the 4-byte match as long as possible.
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
t2 := eLong - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if debugDeflate {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// Store every 3rd hash in-between.
if true {
const hashEvery = 3
i := s - l + 1
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// Do an long at i+1
cv >>= 8
t = tableEntry{offset: t.offset + 1}
eLong = &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
for ; i < s-1; i += hashEvery {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
cv = x >> 8
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}
// Reset the encoding table.
func (e *fastEncL5Window) Reset() {
// We keep the same allocs, since we are compressing the same block sizes.
if cap(e.hist) < allocHistory {
e.hist = make([]byte, 0, allocHistory)
}
// We offset current position so everything will be out of reach.
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
if e.cur <= int32(bufferReset) {
e.cur += e.maxOffset + int32(len(e.hist))
}
e.hist = e.hist[:0]
}
func (e *fastEncL5Window) addBlock(src []byte) int32 {
// check if we have space already
maxMatchOffset := e.maxOffset
if len(e.hist)+len(src) > cap(e.hist) {
if cap(e.hist) == 0 {
e.hist = make([]byte, 0, allocHistory)
} else {
if cap(e.hist) < int(maxMatchOffset*2) {
panic("unexpected buffer size")
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
}
s := int32(len(e.hist))
e.hist = append(e.hist, src...)
return s
}
// matchlen will return the match length between offsets and t in src.
// The maximum length returned is maxMatchLength - 4.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
if debugDecode {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:s1], src[t:]))
}
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
if int(s) >= len(src) {
panic(fmt.Sprint("s >= len(src):", s, len(src)))
}
if t < 0 {
panic(fmt.Sprint("t < 0:", t))
}
if s-t > e.maxOffset {
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
}
}
// Extend the match to be as long as possible.
return int32(matchLen(src[s:], src[t:]))
}

View File

@ -1,325 +0,0 @@
package flate
import "fmt"
type fastEncL6 struct {
fastGen
table [tableSize]tableEntry
bTable [tableSize]tableEntryPrev
}
func (e *fastEncL6) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
}
// Protect against e.cur wraparound.
for e.cur >= bufferReset {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
for i := range e.bTable[:] {
e.bTable[i] = tableEntryPrev{}
}
e.cur = maxMatchOffset
break
}
// Shift down everything in the table that isn't already too far away.
minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
for i := range e.table[:] {
v := e.table[i].offset
if v <= minOff {
v = 0
} else {
v = v - e.cur + maxMatchOffset
}
e.table[i].offset = v
}
for i := range e.bTable[:] {
v := e.bTable[i]
if v.Cur.offset <= minOff {
v.Cur.offset = 0
v.Prev.offset = 0
} else {
v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
if v.Prev.offset <= minOff {
v.Prev.offset = 0
} else {
v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
}
}
e.bTable[i] = v
}
e.cur = maxMatchOffset
}
s := e.addBlock(src)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Override src
src = e.hist
nextEmit := s
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load6432(src, s)
// Repeat MUST be > 1 and within range
repeat := int32(1)
for {
const skipLog = 7
const doEvery = 1
nextS := s
var l int32
var t int32
for {
nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
// Fetch a short+long candidate
sCandidate := e.table[nextHashS]
lCandidate := e.bTable[nextHashL]
next := load6432(src, nextS)
entry := tableEntry{offset: s + e.cur}
e.table[nextHashS] = entry
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
// Calculate hashes of 'next'
nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
if s-t < maxMatchOffset {
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
// Long candidate matches at least 4 bytes.
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// Check the previous long candidate as well.
t2 := lCandidate.Prev.offset - e.cur
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
l = e.matchlen(s+4, t+4, src) + 4
ml1 := e.matchlen(s+4, t2+4, src) + 4
if ml1 > l {
t = t2
l = ml1
break
}
}
break
}
// Current value did not match, but check if previous long value does.
t = lCandidate.Prev.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
break
}
}
t = sCandidate.offset - e.cur
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
// Found a 4 match...
l = e.matchlen(s+4, t+4, src) + 4
// Look up next long candidate (at nextS)
lCandidate = e.bTable[nextHashL]
// Store the next match
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
// Check repeat at s + repOff
const repOff = 1
t2 := s - repeat + repOff
if load3232(src, t2) == uint32(cv>>(8*repOff)) {
ml := e.matchlen(s+4+repOff, t2+4, src) + 4
if ml > l {
t = t2
l = ml
s += repOff
// Not worth checking more.
break
}
}
// If the next long is a candidate, use that...
t2 = lCandidate.Cur.offset - e.cur
if nextS-t2 < maxMatchOffset {
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
// This is ok, but check previous as well.
}
}
// If the previous long is a candidate, use that...
t2 = lCandidate.Prev.offset - e.cur
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
ml := e.matchlen(nextS+4, t2+4, src) + 4
if ml > l {
t = t2
s = nextS
l = ml
break
}
}
}
break
}
cv = next
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
// Extend the 4-byte match as long as possible.
if l == 0 {
l = e.matchlenLong(s+4, t+4, src) + 4
} else if l == maxMatchLength {
l += e.matchlenLong(s+l, t+l, src)
}
// Try to locate a better match by checking the end-of-match...
if sAt := s + l; sAt < sLimit {
// Allow some bytes at the beginning to mismatch.
// Sweet spot is 2/3 bytes depending on input.
// 3 is only a little better when it is but sometimes a lot worse.
// The skipped bytes are tested in Extend backwards,
// and still picked up as part of the match if they do.
const skipBeginning = 2
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
// Test current
t2 := eLong.Cur.offset - e.cur - l + skipBeginning
s2 := s + skipBeginning
off := s2 - t2
if off < maxMatchOffset {
if off > 0 && t2 >= 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
// Test next:
t2 = eLong.Prev.offset - e.cur - l + skipBeginning
off := s2 - t2
if off > 0 && off < maxMatchOffset && t2 >= 0 {
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
s = s2
}
}
}
}
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
if false {
if t >= s {
panic(fmt.Sprintln("s-t", s, t))
}
if (s - t) > maxMatchOffset {
panic(fmt.Sprintln("mmo", s-t))
}
if l < baseMatchLength {
panic("bml")
}
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
repeat = s - t
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
// Index after match end.
for i := nextS + 1; i < int32(len(src))-8; i += 2 {
cv := load6432(src, i)
e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
}
goto emitRemainder
}
// Store every long hash in-between and every second short.
if true {
for i := nextS + 1; i < s-1; i += 2 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong.Cur, eLong.Prev = t, eLong.Cur
eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
}
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s.
cv = load6432(src, s)
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View File

@ -1,16 +0,0 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
// matchLen returns how many bytes match in a and b
//
// It assumes that:
//
// len(a) <= len(b) and len(a) > 0
//
//go:noescape
func matchLen(a []byte, b []byte) int

View File

@ -1,68 +0,0 @@
// Copied from S2 implementation.
//go:build !appengine && !noasm && gc && !noasm
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
MOVQ a_len+8(FP), DX
// matchLen
XORL SI, SI
CMPL DX, $0x08
JB matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
TESTQ BX, BX
JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
SARQ $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JAE matchlen_loopback_standalone
matchlen_match4_standalone:
CMPL DX, $0x04
JB matchlen_match2_standalone
MOVL (AX)(SI*1), BX
CMPL (CX)(SI*1), BX
JNE matchlen_match2_standalone
LEAL -4(DX), DX
LEAL 4(SI), SI
matchlen_match2_standalone:
CMPL DX, $0x02
JB matchlen_match1_standalone
MOVW (AX)(SI*1), BX
CMPW (CX)(SI*1), BX
JNE matchlen_match1_standalone
LEAL -2(DX), DX
LEAL 2(SI), SI
matchlen_match1_standalone:
CMPL DX, $0x01
JB gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
INCL SI
gen_match_len_end:
MOVQ SI, ret+48(FP)
RET

View File

@ -1,33 +0,0 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package flate
import (
"encoding/binary"
"math/bits"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}

View File

@ -1,37 +0,0 @@
package flate
const (
// Masks for shifts with register sizes of the shift value.
// This can be used to work around the x86 design of shifting by mod register size.
// It can be used when a variable shift is always smaller than the register size.
// reg8SizeMaskX - shift value is 8 bits, shifted is X
reg8SizeMask8 = 7
reg8SizeMask16 = 15
reg8SizeMask32 = 31
reg8SizeMask64 = 63
// reg16SizeMaskX - shift value is 16 bits, shifted is X
reg16SizeMask8 = reg8SizeMask8
reg16SizeMask16 = reg8SizeMask16
reg16SizeMask32 = reg8SizeMask32
reg16SizeMask64 = reg8SizeMask64
// reg32SizeMaskX - shift value is 32 bits, shifted is X
reg32SizeMask8 = reg8SizeMask8
reg32SizeMask16 = reg8SizeMask16
reg32SizeMask32 = reg8SizeMask32
reg32SizeMask64 = reg8SizeMask64
// reg64SizeMaskX - shift value is 64 bits, shifted is X
reg64SizeMask8 = reg8SizeMask8
reg64SizeMask16 = reg8SizeMask16
reg64SizeMask32 = reg8SizeMask32
reg64SizeMask64 = reg8SizeMask64
// regSizeMaskUintX - shift value is uint, shifted is X
regSizeMaskUint8 = reg8SizeMask8
regSizeMaskUint16 = reg8SizeMask16
regSizeMaskUint32 = reg8SizeMask32
regSizeMaskUint64 = reg8SizeMask64
)

View File

@ -1,40 +0,0 @@
//go:build !amd64
// +build !amd64
package flate
const (
// Masks for shifts with register sizes of the shift value.
// This can be used to work around the x86 design of shifting by mod register size.
// It can be used when a variable shift is always smaller than the register size.
// reg8SizeMaskX - shift value is 8 bits, shifted is X
reg8SizeMask8 = 0xff
reg8SizeMask16 = 0xff
reg8SizeMask32 = 0xff
reg8SizeMask64 = 0xff
// reg16SizeMaskX - shift value is 16 bits, shifted is X
reg16SizeMask8 = 0xffff
reg16SizeMask16 = 0xffff
reg16SizeMask32 = 0xffff
reg16SizeMask64 = 0xffff
// reg32SizeMaskX - shift value is 32 bits, shifted is X
reg32SizeMask8 = 0xffffffff
reg32SizeMask16 = 0xffffffff
reg32SizeMask32 = 0xffffffff
reg32SizeMask64 = 0xffffffff
// reg64SizeMaskX - shift value is 64 bits, shifted is X
reg64SizeMask8 = 0xffffffffffffffff
reg64SizeMask16 = 0xffffffffffffffff
reg64SizeMask32 = 0xffffffffffffffff
reg64SizeMask64 = 0xffffffffffffffff
// regSizeMaskUintX - shift value is uint, shifted is X
regSizeMaskUint8 = ^uint(0)
regSizeMaskUint16 = ^uint(0)
regSizeMaskUint32 = ^uint(0)
regSizeMaskUint64 = ^uint(0)
)

View File

@ -1,318 +0,0 @@
package flate
import (
"io"
"math"
"sync"
)
const (
maxStatelessBlock = math.MaxInt16
// dictionary will be taken from maxStatelessBlock, so limit it.
maxStatelessDict = 8 << 10
slTableBits = 13
slTableSize = 1 << slTableBits
slTableShift = 32 - slTableBits
)
type statelessWriter struct {
dst io.Writer
closed bool
}
func (s *statelessWriter) Close() error {
if s.closed {
return nil
}
s.closed = true
// Emit EOF block
return StatelessDeflate(s.dst, nil, true, nil)
}
func (s *statelessWriter) Write(p []byte) (n int, err error) {
err = StatelessDeflate(s.dst, p, false, nil)
if err != nil {
return 0, err
}
return len(p), nil
}
func (s *statelessWriter) Reset(w io.Writer) {
s.dst = w
s.closed = false
}
// NewStatelessWriter will do compression but without maintaining any state
// between Write calls.
// There will be no memory kept between Write calls,
// but compression and speed will be suboptimal.
// Because of this, the size of actual Write calls will affect output size.
func NewStatelessWriter(dst io.Writer) io.WriteCloser {
return &statelessWriter{dst: dst}
}
// bitWriterPool contains bit writers that can be reused.
var bitWriterPool = sync.Pool{
New: func() interface{} {
return newHuffmanBitWriter(nil)
},
}
// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
var dst tokens
bw := bitWriterPool.Get().(*huffmanBitWriter)
bw.reset(out)
defer func() {
// don't keep a reference to our output
bw.reset(nil)
bitWriterPool.Put(bw)
}()
if eof && len(in) == 0 {
// Just write an EOF block.
// Could be faster...
bw.writeStoredHeader(0, true)
bw.flush()
return bw.err
}
// Truncate dict
if len(dict) > maxStatelessDict {
dict = dict[len(dict)-maxStatelessDict:]
}
// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
var inDict []byte
for len(in) > 0 {
todo := in
if len(inDict) > 0 {
if len(todo) > maxStatelessBlock-maxStatelessDict {
todo = todo[:maxStatelessBlock-maxStatelessDict]
}
} else if len(todo) > maxStatelessBlock-len(dict) {
todo = todo[:maxStatelessBlock-len(dict)]
}
inOrg := in
in = in[len(todo):]
uncompressed := todo
if len(dict) > 0 {
// combine dict and source
bufLen := len(todo) + len(dict)
combined := make([]byte, bufLen)
copy(combined, dict)
copy(combined[len(dict):], todo)
todo = combined
}
// Compress
if len(inDict) == 0 {
statelessEnc(&dst, todo, int16(len(dict)))
} else {
statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
}
isEof := eof && len(in) == 0
if dst.n == 0 {
bw.writeStoredHeader(len(uncompressed), isEof)
if bw.err != nil {
return bw.err
}
bw.writeBytes(uncompressed)
} else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
// If we removed less than 1/16th, huffman compress the block.
bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
} else {
bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
}
if len(in) > 0 {
// Retain a dict if we have more
inDict = inOrg[len(uncompressed)-maxStatelessDict:]
dict = nil
dst.Reset()
}
if bw.err != nil {
return bw.err
}
}
if !eof {
// Align, only a stored block can do that.
bw.writeStoredHeader(0, false)
}
bw.flush()
return bw.err
}
func hashSL(u uint32) uint32 {
return (u * 0x1e35a7bd) >> slTableShift
}
func load3216(b []byte, i int16) uint32 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:4]
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load6416(b []byte, i int16) uint64 {
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
b = b[i:]
b = b[:8]
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func statelessEnc(dst *tokens, src []byte, startAt int16) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
type tableEntry struct {
offset int16
}
var table [slTableSize]tableEntry
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src)-int(startAt) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = 0
return
}
// Index until startAt
if startAt > 0 {
cv := load3232(src, 0)
for i := int16(0); i < startAt; i++ {
table[hashSL(cv)] = tableEntry{offset: i}
cv = (cv >> 8) | (uint32(src[i+4]) << 24)
}
}
s := startAt + 1
nextEmit := startAt
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int16(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
cv := load3216(src, s)
for {
const skipLog = 5
const doEvery = 2
nextS := s
var candidate tableEntry
for {
nextHash := hashSL(cv)
candidate = table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit || nextS <= 0 {
goto emitRemainder
}
now := load6416(src, nextS)
table[nextHash] = tableEntry{offset: s}
nextHash = hashSL(uint32(now))
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
// Do one right away...
cv = uint32(now)
s = nextS
nextS++
candidate = table[nextHash]
now >>= 8
table[nextHash] = tableEntry{offset: s}
if cv == load3216(src, candidate.offset) {
table[nextHash] = tableEntry{offset: nextS}
break
}
cv = uint32(now)
s = nextS
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
t := candidate.offset
l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
s--
t--
l++
}
if nextEmit < s {
if false {
emitLiteral(dst, src[nextEmit:s])
} else {
for _, v := range src[nextEmit:s] {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
}
// Save the match found
dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
s += l
nextEmit = s
if nextS >= s {
s = nextS + 1
}
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-2 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6416(src, s-2)
o := s - 2
prevHash := hashSL(uint32(x))
table[prevHash] = tableEntry{offset: o}
x >>= 16
currHash := hashSL(uint32(x))
candidate = table[currHash]
table[currHash] = tableEntry{offset: o + 2}
if uint32(x) != load3216(src, candidate.offset) {
cv = uint32(x >> 8)
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
// If nothing was added, don't encode literals.
if dst.n == 0 {
return
}
emitLiteral(dst, src[nextEmit:])
}
}

View File

@ -1,379 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
)
const (
// bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
// bits 16-22 offsetcode - 5 bits
// bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
// bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
lengthShift = 22
offsetMask = 1<<lengthShift - 1
typeMask = 3 << 30
literalType = 0 << 30
matchType = 1 << 30
matchOffsetOnlyMask = 0xffff
)
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
// is lengthCodes[length - MIN_MATCH_LENGTH]
var lengthCodes = [256]uint8{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 28,
}
// lengthCodes1 is length codes, but starting at 1.
var lengthCodes1 = [256]uint8{
1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 29,
}
var offsetCodes = [256]uint32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
}
// offsetCodes14 are offsetCodes, but with 14 added.
var offsetCodes14 = [256]uint32{
14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
}
type token uint32
type tokens struct {
extraHist [32]uint16 // codes 256->maxnumlit
offHist [32]uint16 // offset codes
litHist [256]uint16 // codes 0->255
nFilled int
n uint16 // Must be able to contain maxStoreBlockSize
tokens [maxStoreBlockSize + 1]token
}
func (t *tokens) Reset() {
if t.n == 0 {
return
}
t.n = 0
t.nFilled = 0
for i := range t.litHist[:] {
t.litHist[i] = 0
}
for i := range t.extraHist[:] {
t.extraHist[i] = 0
}
for i := range t.offHist[:] {
t.offHist[i] = 0
}
}
func (t *tokens) Fill() {
if t.n == 0 {
return
}
for i, v := range t.litHist[:] {
if v == 0 {
t.litHist[i] = 1
t.nFilled++
}
}
for i, v := range t.extraHist[:literalCount-256] {
if v == 0 {
t.nFilled++
t.extraHist[i] = 1
}
}
for i, v := range t.offHist[:offsetCodeCount] {
if v == 0 {
t.offHist[i] = 1
}
}
}
func indexTokens(in []token) tokens {
var t tokens
t.indexTokens(in)
return t
}
func (t *tokens) indexTokens(in []token) {
t.Reset()
for _, tok := range in {
if tok < matchType {
t.AddLiteral(tok.literal())
continue
}
t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
}
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst *tokens, lit []byte) {
for _, v := range lit {
dst.tokens[dst.n] = token(v)
dst.litHist[v]++
dst.n++
}
}
func (t *tokens) AddLiteral(lit byte) {
t.tokens[t.n] = token(lit)
t.litHist[lit]++
t.n++
}
// from https://stackoverflow.com/a/28730362
func mFastLog2(val float32) float32 {
ux := int32(math.Float32bits(val))
log2 := (float32)(((ux >> 23) & 255) - 128)
ux &= -0x7f800001
ux += 127 << 23
uval := math.Float32frombits(uint32(ux))
log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
return log2
}
// EstimatedBits will return an minimum size estimated by an *optimal*
// compression of the block.
// The size of the block
func (t *tokens) EstimatedBits() int {
shannon := float32(0)
bits := int(0)
nMatches := 0
total := int(t.n) + t.nFilled
if total > 0 {
invTotal := 1.0 / float32(total)
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
}
}
// Just add 15 for EOB
shannon += 15
for i, v := range t.extraHist[1 : literalCount-256] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(lengthExtraBits[i&31]) * int(v)
nMatches += int(v)
}
}
}
if nMatches > 0 {
invTotal := 1.0 / float32(nMatches)
for i, v := range t.offHist[:offsetCodeCount] {
if v > 0 {
n := float32(v)
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
bits += int(offsetExtraBits[i&31]) * int(v)
}
}
}
return int(shannon) + bits
}
// AddMatch adds a match to the tokens.
// This function is very sensitive to inlining and right on the border.
func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
if debugDeflate {
if xlength >= maxMatchLength+baseMatchLength {
panic(fmt.Errorf("invalid length: %v", xlength))
}
if xoffset >= maxMatchOffset+baseMatchOffset {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
oCode := offsetCode(xoffset)
xoffset |= oCode << 16
t.extraHist[lengthCodes1[uint8(xlength)]]++
t.offHist[oCode&31]++
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
t.n++
}
// AddMatchLong adds a match to the tokens, potentially longer than max match length.
// Length should NOT have the base subtracted, only offset should.
func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
if debugDeflate {
if xoffset >= maxMatchOffset+baseMatchOffset {
panic(fmt.Errorf("invalid offset: %v", xoffset))
}
}
oc := offsetCode(xoffset)
xoffset |= oc << 16
for xlength > 0 {
xl := xlength
if xl > 258 {
// We need to have at least baseMatchLength left over for next loop.
if xl > 258+baseMatchLength {
xl = 258
} else {
xl = 258 - baseMatchLength
}
}
xlength -= xl
xl -= baseMatchLength
t.extraHist[lengthCodes1[uint8(xl)]]++
t.offHist[oc&31]++
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
t.n++
}
}
func (t *tokens) AddEOB() {
t.tokens[t.n] = token(endBlockMarker)
t.extraHist[0]++
t.n++
}
func (t *tokens) Slice() []token {
return t.tokens[:t.n]
}
// VarInt returns the tokens as varint encoded bytes.
func (t *tokens) VarInt() []byte {
var b = make([]byte, binary.MaxVarintLen32*int(t.n))
var off int
for _, v := range t.tokens[:t.n] {
off += binary.PutUvarint(b[off:], uint64(v))
}
return b[:off]
}
// FromVarInt restores t to the varint encoded tokens provided.
// Any data in t is removed.
func (t *tokens) FromVarInt(b []byte) error {
var buf = bytes.NewReader(b)
var toks []token
for {
r, err := binary.ReadUvarint(buf)
if err == io.EOF {
break
}
if err != nil {
return err
}
toks = append(toks, token(r))
}
t.indexTokens(toks)
return nil
}
// Returns the type of a token
func (t token) typ() uint32 { return uint32(t) & typeMask }
// Returns the literal of a literal token
func (t token) literal() uint8 { return uint8(t) }
// Returns the extra offset of a match token
func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint8 { return uint8(t >> lengthShift) }
// Convert length to code.
func lengthCode(len uint8) uint8 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
if false {
if off < uint32(len(offsetCodes)) {
return offsetCodes[off&255]
} else if off>>7 < uint32(len(offsetCodes)) {
return offsetCodes[(off>>7)&255] + 14
} else {
return offsetCodes[(off>>14)&255] + 28
}
}
if off < uint32(len(offsetCodes)) {
return offsetCodes[uint8(off)]
}
return offsetCodes14[uint8(off>>7)]
}

View File

@ -1,45 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
# Emacs
*~
\#*\#
.\#*
# vi/vim
.??*.swp
# Mac
.DS_Store
# Eclipse
.project
.settings/
# bin
# Goland
.idea
# VS Code
.vscode

View File

@ -1,16 +0,0 @@
issues:
max-issues-per-linter: 0
max-same-issues: 0
exclude-rules:
- linters:
- errcheck
text: "Unsubscribe"
- linters:
- errcheck
text: "Drain"
- linters:
- errcheck
text: "msg.Ack"
- linters:
- errcheck
text: "watcher.Stop"

View File

@ -1,36 +0,0 @@
language: go
go:
- "1.22.x"
- "1.21.x"
go_import_path: github.com/nats-io/nats.go
install:
- go get -t ./...
- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin
- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then
go install github.com/mattn/goveralls@latest;
go install github.com/wadey/gocovmerge@latest;
go install honnef.co/go/tools/cmd/staticcheck@latest;
go install github.com/client9/misspell/cmd/misspell@latest;
fi
before_script:
- $(exit $(go fmt ./... | wc -l))
- go vet -modfile=go_test.mod ./...
- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then
find . -type f -name "*.go" | xargs misspell -error -locale US;
GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./...;
fi
- golangci-lint run ./jetstream/...
script:
- go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off
- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then ./scripts/cov.sh TRAVIS; else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off -tags=internal_testing; fi
after_success:
- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci; fi
jobs:
include:
- name: "Go: 1.22.x (nats-server@main)"
go: "1.22.x"
before_script:
- go get -modfile go_test.mod github.com/nats-io/nats-server/v2@main
allow_failures:
- name: "Go: 1.22.x (nats-server@main)"

View File

@ -1,106 +0,0 @@
1
derek
dlc
ivan
acknowledgement/SM
arity
deduplication/S
demarshal/SDG
durables
iff
observable/S
redelivery/S
retransmitting
retry/SB
SlowConsumer
AppendInt
ReadMIMEHeader
clientProtoZero
jetstream
v1
v2
ack/SGD
auth
authToken
chans
creds
config/S
cseq
impl
msgh
msgId
mux/S
nack
ptr
puback
scanf
stderr
stdout
structs
tm
todo
unsub/S
permessage
permessage-deflate
urlA
urlB
websocket
ws
wss
NKey
pList
backend/S
backoff/S
decompressor/CGS
inflight
inlined
lookups
reconnection/MS
redeliver/ADGS
responder/S
rewrap/S
rollup/S
unreceive/DRSZGB
variadic
wakeup/S
whitespace
wrap/AS
omitempty
apache
html
ietf
www
sum256
32bit/S
64bit/S
64k
128k
512k
hacky
handroll/D
rfc6455
rfc7692
0x00
0xff
20x
40x
50x
ErrXXX
atlanta
eu

View File

@ -1,25 +0,0 @@
The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries
but populates the dictionary with identifiers from the Go source.
<https://github.com/kortschak/gospel>
Alas, no comments are allowed in the .words file and newer versions of gospel
error out on seeing them. This is really a hunspell restriction.
We assume en_US hunspell dictionaries are installed and used.
The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff)
Invoke `hunspell -D` to see the actual locations.
Words which are in the base dictionary can't have extra affix rules added to
them, so we have to start with the affixed variant we want to add.
Thus `creds` rather than `cred/S` and so on.
So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants,
we have to use unreceive as the stem.
We can't define our own affix or compound rules,
to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2}
The spelling tokenizer doesn't take "permessage-deflate" as allowing for ...
"permessage-deflate", which is an RFC7692 registered extension for websockets.
We have to explicitly list "permessage".

View File

@ -1,3 +0,0 @@
## Community Code of Conduct
NATS follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@ -1,80 +0,0 @@
# Contributing
Thanks for your interest in contributing! This document contains `nats-io/nats.go` specific contributing details. If you
are a first-time contributor, please refer to the general [NATS Contributor Guide](https://nats.io/contributing/) to get
a comprehensive overview of contributing to the NATS project.
## Getting started
There are three general ways you can contribute to this repo:
- Proposing an enhancement or new feature
- Reporting a bug or regression
- Contributing changes to the source code
For the first two, refer to the [GitHub Issues](https://github.com/nats-io/nats.go/issues/new/choose) which guides you
through the available options along with the needed information to collect.
## Contributing changes
_Prior to opening a pull request, it is recommended to open an issue first to ensure the maintainers can review intended
changes. Exceptions to this rule include fixing non-functional source such as code comments, documentation or other
supporting files._
Proposing source code changes is done through GitHub's standard pull request workflow.
If your branch is a work-in-progress then please start by creating your pull requests as draft, by clicking the
down-arrow next to the `Create pull request` button and instead selecting `Create draft pull request`.
This will defer the automatic process of requesting a review from the NATS team and significantly reduces noise until
you are ready. Once you are happy, you can click the `Ready for review` button.
### Guidelines
A good pull request includes:
- A high-level description of the changes, including links to any issues that are related by adding comments
like `Resolves #NNN` to your description.
See [Linking a Pull Request to an Issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)
for more information.
- An up-to-date parent commit. Please make sure you are pulling in the latest `main` branch and rebasing your work on
top of it, i.e. `git rebase main`.
- Unit tests where appropriate. Bug fixes will benefit from the addition of regression tests. New features will not be
accepted without suitable test coverage!
- No more commits than necessary. Sometimes having multiple commits is useful for telling a story or isolating changes
from one another, but please squash down any unnecessary commits that may just be for clean-up, comments or small
changes.
- No additional external dependencies that aren't absolutely essential. Please do everything you can to avoid pulling in
additional libraries/dependencies into `go.mod` as we will be very critical of these.
### Sign-off
In order to accept a contribution, you will first need to certify that the contribution is your original work and that
you license the work to the project under
the [Apache-2.0 license](https://github.com/nats-io/nats.go/blob/main/LICENSE).
This is done by using `Signed-off-by` statements, which should appear in **both** your commit messages and your PR
description. Please note that we can only accept sign-offs under a legal name. Nicknames and aliases are not permitted.
To perform a sign-off with `git`, use `git commit -s` (or `--signoff`).
## Get help
If you have questions about the contribution process, please start
a [GitHub discussion](https://github.com/nats-io/nats.go/discussions), join the [NATS Slack](https://slack.nats.io/), or
send your question to the [NATS Google Group](https://groups.google.com/forum/#!forum/natsio).
## Testing
You should use `go_test.mod` to manage your testing dependencies. Please use the following command to update your
dependencies and avoid changing the main `go.mod` in a PR:
```shell
go mod tidy -modfile=go_test.mod
```
To the tests you can pass `-modfile=go_test.mod` flag to `go test` or instead you can also set `GOFLAGS="-modfile=go_test.mod"` as an environment variable:
```shell
go test ./... -modfile=go_test.mod
```

View File

@ -1,3 +0,0 @@
# NATS Go Client Governance
NATS Go Client (go-nats) is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md).

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,8 +0,0 @@
# Maintainers
Maintainership is on a per project basis.
### Maintainers
- Derek Collison <derek@nats.io> [@derekcollison](https://github.com/derekcollison)
- Ivan Kozlovic <ivan@nats.io> [@kozlovic](https://github.com/kozlovic)
- Waldemar Quevedo <wally@nats.io> [@wallyqs](https://github.com/wallyqs)

View File

@ -1,495 +0,0 @@
# NATS - Go Client
A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io).
[![License Apache 2][License-Image]][License-Url] [![Go Report Card][ReportCard-Image]][ReportCard-Url] [![Build Status][Build-Status-Image]][Build-Status-Url] [![GoDoc][GoDoc-Image]][GoDoc-Url] [![Coverage Status][Coverage-image]][Coverage-Url]
[License-Url]: https://www.apache.org/licenses/LICENSE-2.0
[License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg
[ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go
[ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go
[Build-Status-Url]: https://travis-ci.com/github/nats-io/nats.go
[Build-Status-Image]: https://travis-ci.com/nats-io/nats.go.svg?branch=main
[GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go
[GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c
[Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main
[Coverage-image]: https://coveralls.io/repos/github/nats-io/nats.go/badge.svg?branch=main
**Check out [NATS by example](https://natsbyexample.com) - An evolving collection of runnable, cross-client reference examples for NATS.**
## Installation
```bash
# Go client
go get github.com/nats-io/nats.go/
# Server
go get github.com/nats-io/nats-server
```
When using or transitioning to Go modules support:
```bash
# Go client latest or explicit version
go get github.com/nats-io/nats.go/@latest
go get github.com/nats-io/nats.go/@v1.35.0
# For latest NATS Server, add /v2 at the end
go get github.com/nats-io/nats-server/v2
# NATS Server v1 is installed otherwise
# go get github.com/nats-io/nats-server
```
## Basic Usage
```go
import "github.com/nats-io/nats.go"
// Connect to a server
nc, _ := nats.Connect(nats.DefaultURL)
// Simple Publisher
nc.Publish("foo", []byte("Hello World"))
// Simple Async Subscriber
nc.Subscribe("foo", func(m *nats.Msg) {
fmt.Printf("Received a message: %s\n", string(m.Data))
})
// Responding to a request message
nc.Subscribe("request", func(m *nats.Msg) {
m.Respond([]byte("answer is 42"))
})
// Simple Sync Subscriber
sub, err := nc.SubscribeSync("foo")
m, err := sub.NextMsg(timeout)
// Channel Subscriber
ch := make(chan *nats.Msg, 64)
sub, err := nc.ChanSubscribe("foo", ch)
msg := <- ch
// Unsubscribe
sub.Unsubscribe()
// Drain
sub.Drain()
// Requests
msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond)
// Replies
nc.Subscribe("help", func(m *nats.Msg) {
nc.Publish(m.Reply, []byte("I can help!"))
})
// Drain connection (Preferred for responders)
// Close() not needed if this is called.
nc.Drain()
// Close connection
nc.Close()
```
## JetStream
JetStream is the built-in NATS persistence system. `nats.go` provides a built-in
API enabling both managing JetStream assets as well as publishing/consuming
persistent messages.
### Basic usage
```go
// connect to nats server
nc, _ := nats.Connect(nats.DefaultURL)
// create jetstream context from nats connection
js, _ := jetstream.New(nc)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// get existing stream handle
stream, _ := js.Stream(ctx, "foo")
// retrieve consumer handle from a stream
cons, _ := stream.Consumer(ctx, "cons")
// consume messages from the consumer in callback
cc, _ := cons.Consume(func(msg jetstream.Msg) {
fmt.Println("Received jetstream message: ", string(msg.Data()))
msg.Ack()
})
defer cc.Stop()
```
To find more information on `nats.go` JetStream API, visit
[`jetstream/README.md`](jetstream/README.md)
> The current JetStream API replaces the [legacy JetStream API](legacy_jetstream.md)
## Service API
The service API (`micro`) allows you to [easily build NATS services](micro/README.md) The
services API is currently in beta release.
## Encoded Connections
```go
nc, _ := nats.Connect(nats.DefaultURL)
c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
defer c.Close()
// Simple Publisher
c.Publish("foo", "Hello World")
// Simple Async Subscriber
c.Subscribe("foo", func(s string) {
fmt.Printf("Received a message: %s\n", s)
})
// EncodedConn can Publish any raw Go type using the registered Encoder
type person struct {
Name string
Address string
Age int
}
// Go type Subscriber
c.Subscribe("hello", func(p *person) {
fmt.Printf("Received a person: %+v\n", p)
})
me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"}
// Go type Publisher
c.Publish("hello", me)
// Unsubscribe
sub, err := c.Subscribe("foo", nil)
// ...
sub.Unsubscribe()
// Requests
var response string
err = c.Request("help", "help me", &response, 10*time.Millisecond)
if err != nil {
fmt.Printf("Request failed: %v\n", err)
}
// Replying
c.Subscribe("help", func(subj, reply string, msg string) {
c.Publish(reply, "I can help!")
})
// Close connection
c.Close();
```
## New Authentication (Nkeys and User Credentials)
This requires server with version >= 2.0.0
NATS servers have a new security and authentication mechanism to authenticate with user credentials and Nkeys.
The simplest form is to use the helper method UserCredentials(credsFilepath).
```go
nc, err := nats.Connect(url, nats.UserCredentials("user.creds"))
```
The helper methods creates two callback handlers to present the user JWT and sign the nonce challenge from the server.
The core client library never has direct access to your private key and simply performs the callback for signing the server challenge.
The helper will load and wipe and erase memory it uses for each connect or reconnect.
The helper also can take two entries, one for the JWT and one for the NKey seed file.
```go
nc, err := nats.Connect(url, nats.UserCredentials("user.jwt", "user.nk"))
```
You can also set the callback handlers directly and manage challenge signing directly.
```go
nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB))
```
Bare Nkeys are also supported. The nkey seed should be in a read only file, e.g. seed.txt
```bash
> cat seed.txt
# This is my seed nkey!
SUAGMJH5XLGZKQQWAWKRZJIGMOU4HPFUYLXJMXOO5NLFEO2OOQJ5LPRDPM
```
This is a helper function which will load and decode and do the proper signing for the server nonce.
It will clear memory in between invocations.
You can choose to use the low level option and provide the public key and a signature callback on your own.
```go
opt, err := nats.NkeyOptionFromSeed("seed.txt")
nc, err := nats.Connect(serverUrl, opt)
// Direct
nc, err := nats.Connect(serverUrl, nats.Nkey(pubNkey, sigCB))
```
## TLS
```go
// tls as a scheme will enable secure connections by default. This will also verify the server name.
nc, err := nats.Connect("tls://nats.demo.io:4443")
// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup.
// We provide a helper method to make this case easier.
nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem"))
// If the server requires client certificate, there is an helper function for that too:
cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem")
nc, err = nats.Connect("tls://localhost:4443", cert)
// You can also supply a complete tls.Config
certFile := "./configs/certs/client-cert.pem"
keyFile := "./configs/certs/client-key.pem"
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
t.Fatalf("error parsing X509 certificate/key pair: %v", err)
}
config := &tls.Config{
ServerName: opts.Host,
Certificates: []tls.Certificate{cert},
RootCAs: pool,
MinVersion: tls.VersionTLS12,
}
nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config))
if err != nil {
t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err)
}
```
## Using Go Channels (netchan)
```go
nc, _ := nats.Connect(nats.DefaultURL)
ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
defer ec.Close()
type person struct {
Name string
Address string
Age int
}
recvCh := make(chan *person)
ec.BindRecvChan("hello", recvCh)
sendCh := make(chan *person)
ec.BindSendChan("hello", sendCh)
me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"}
// Send via Go channels
sendCh <- me
// Receive via Go channels
who := <- recvCh
```
## Wildcard Subscriptions
```go
// "*" matches any token, at any level of the subject.
nc.Subscribe("foo.*.baz", func(m *Msg) {
fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data));
})
nc.Subscribe("foo.bar.*", func(m *Msg) {
fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data));
})
// ">" matches any length of the tail of a subject, and can only be the last token
// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22'
nc.Subscribe("foo.>", func(m *Msg) {
fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data));
})
// Matches all of the above
nc.Publish("foo.bar.baz", []byte("Hello World"))
```
## Queue Groups
```go
// All subscriptions with the same queue name will form a queue group.
// Each message will be delivered to only one subscriber per queue group,
// using queuing semantics. You can have as many queue groups as you wish.
// Normal subscribers will continue to work as expected.
nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) {
received += 1;
})
```
## Advanced Usage
```go
// Normally, the library will return an error when trying to connect and
// there is no server running. The RetryOnFailedConnect option will set
// the connection in reconnecting state if it failed to connect right away.
nc, err := nats.Connect(nats.DefaultURL,
nats.RetryOnFailedConnect(true),
nats.MaxReconnects(10),
nats.ReconnectWait(time.Second),
nats.ReconnectHandler(func(_ *nats.Conn) {
// Note that this will be invoked for the first asynchronous connect.
}))
if err != nil {
// Should not return an error even if it can't connect, but you still
// need to check in case there are some configuration errors.
}
// Flush connection to server, returns when all messages have been processed.
nc.Flush()
fmt.Println("All clear!")
// FlushTimeout specifies a timeout value as well.
err := nc.FlushTimeout(1*time.Second)
if err != nil {
fmt.Println("All clear!")
} else {
fmt.Println("Flushed timed out!")
}
// Auto-unsubscribe after MAX_WANTED messages received
const MAX_WANTED = 10
sub, err := nc.Subscribe("foo")
sub.AutoUnsubscribe(MAX_WANTED)
// Multiple connections
nc1 := nats.Connect("nats://host1:4222")
nc2 := nats.Connect("nats://host2:4222")
nc1.Subscribe("foo", func(m *Msg) {
fmt.Printf("Received a message: %s\n", string(m.Data))
})
nc2.Publish("foo", []byte("Hello World!"));
```
## Clustered Usage
```go
var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224"
nc, err := nats.Connect(servers)
// Optionally set ReconnectWait and MaxReconnect attempts.
// This example means 10 seconds total per backend.
nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second))
// You can also add some jitter for the reconnection.
// This call will add up to 500 milliseconds for non TLS connections and 2 seconds for TLS connections.
// If not specified, the library defaults to 100 milliseconds and 1 second, respectively.
nc, err = nats.Connect(servers, nats.ReconnectJitter(500*time.Millisecond, 2*time.Second))
// You can also specify a custom reconnect delay handler. If set, the library will invoke it when it has tried
// all URLs in its list. The value returned will be used as the total sleep time, so add your own jitter.
// The library will pass the number of times it went through the whole list.
nc, err = nats.Connect(servers, nats.CustomReconnectDelay(func(attempts int) time.Duration {
return someBackoffFunction(attempts)
}))
// Optionally disable randomization of the server pool
nc, err = nats.Connect(servers, nats.DontRandomize())
// Setup callbacks to be notified on disconnects, reconnects and connection closed.
nc, err = nats.Connect(servers,
nats.DisconnectErrHandler(func(nc *nats.Conn, err error) {
fmt.Printf("Got disconnected! Reason: %q\n", err)
}),
nats.ReconnectHandler(func(nc *nats.Conn) {
fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl())
}),
nats.ClosedHandler(func(nc *nats.Conn) {
fmt.Printf("Connection closed. Reason: %q\n", nc.LastError())
})
)
// When connecting to a mesh of servers with auto-discovery capabilities,
// you may need to provide a username/password or token in order to connect
// to any server in that mesh when authentication is required.
// Instead of providing the credentials in the initial URL, you will use
// new option setters:
nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar"))
// For token based authentication:
nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken"))
// You can even pass the two at the same time in case one of the server
// in the mesh requires token instead of user name and password.
nc, err = nats.Connect("nats://localhost:4222",
nats.UserInfo("foo", "bar"),
nats.Token("S3cretT0ken"))
// Note that if credentials are specified in the initial URLs, they take
// precedence on the credentials specified through the options.
// For instance, in the connect call below, the client library will use
// the user "my" and password "pwd" to connect to localhost:4222, however,
// it will use username "foo" and password "bar" when (re)connecting to
// a different server URL that it got as part of the auto-discovery.
nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar"))
```
## Context support (+Go 1.7)
```go
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
nc, err := nats.Connect(nats.DefaultURL)
// Request with context
msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar"))
// Synchronous subscriber with context
sub, err := nc.SubscribeSync("foo")
msg, err := sub.NextMsgWithContext(ctx)
// Encoded Request with context
c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
type request struct {
Message string `json:"message"`
}
type response struct {
Code int `json:"code"`
}
req := &request{Message: "Hello"}
resp := &response{}
err := c.RequestWithContext(ctx, "foo", req, resp)
```
## Backwards compatibility
In the development of nats.go, we are committed to maintaining backward compatibility and ensuring a stable and reliable experience for all users. In general, we follow the standard go compatibility guidelines.
However, it's important to clarify our stance on certain types of changes:
- **Expanding structures:**
Adding new fields to structs is not considered a breaking change.
- **Adding methods to exported interfaces:**
Extending public interfaces with new methods is also not viewed as a breaking change within the context of this project. It is important to note that no unexported methods will be added to interfaces allowing users to implement them.
Additionally, this library always supports at least 2 latest minor Go versions. For example, if the latest Go version is 1.22, the library will support Go 1.21 and 1.22.
## License
Unless otherwise noted, the NATS source files are distributed
under the Apache Version 2.0 license found in the LICENSE file.
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats?ref=badge_large)

View File

@ -1,244 +0,0 @@
// Copyright 2016-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nats
import (
"context"
"reflect"
)
// RequestMsgWithContext takes a context, a subject and payload
// in bytes and request expecting a single response.
func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) {
if msg == nil {
return nil, ErrInvalidMsg
}
hdr, err := msg.headerBytes()
if err != nil {
return nil, err
}
return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data)
}
// RequestWithContext takes a context, a subject and payload
// in bytes and request expecting a single response.
func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) {
return nc.requestWithContext(ctx, subj, nil, data)
}
func (nc *Conn) requestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) {
if ctx == nil {
return nil, ErrInvalidContext
}
if nc == nil {
return nil, ErrInvalidConnection
}
// Check whether the context is done already before making
// the request.
if ctx.Err() != nil {
return nil, ctx.Err()
}
var m *Msg
var err error
// If user wants the old style.
if nc.useOldRequestStyle() {
m, err = nc.oldRequestWithContext(ctx, subj, hdr, data)
} else {
mch, token, err := nc.createNewRequestAndSend(subj, hdr, data)
if err != nil {
return nil, err
}
var ok bool
select {
case m, ok = <-mch:
if !ok {
return nil, ErrConnectionClosed
}
case <-ctx.Done():
nc.mu.Lock()
delete(nc.respMap, token)
nc.mu.Unlock()
return nil, ctx.Err()
}
}
// Check for no responder status.
if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
m, err = nil, ErrNoResponders
}
return m, err
}
// oldRequestWithContext utilizes inbox and subscription per request.
func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) {
inbox := nc.NewInbox()
ch := make(chan *Msg, RequestChanLen)
s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil)
if err != nil {
return nil, err
}
s.AutoUnsubscribe(1)
defer s.Unsubscribe()
err = nc.publish(subj, inbox, hdr, data)
if err != nil {
return nil, err
}
return s.NextMsgWithContext(ctx)
}
func (s *Subscription) nextMsgWithContext(ctx context.Context, pullSubInternal, waitIfNoMsg bool) (*Msg, error) {
if ctx == nil {
return nil, ErrInvalidContext
}
if s == nil {
return nil, ErrBadSubscription
}
if ctx.Err() != nil {
return nil, ctx.Err()
}
s.mu.Lock()
err := s.validateNextMsgState(pullSubInternal)
if err != nil {
s.mu.Unlock()
return nil, err
}
// snapshot
mch := s.mch
s.mu.Unlock()
var ok bool
var msg *Msg
// If something is available right away, let's optimize that case.
select {
case msg, ok = <-mch:
if !ok {
return nil, s.getNextMsgErr()
}
if err := s.processNextMsgDelivered(msg); err != nil {
return nil, err
}
return msg, nil
default:
// If internal and we don't want to wait, signal that there is no
// message in the internal queue.
if pullSubInternal && !waitIfNoMsg {
return nil, errNoMessages
}
}
select {
case msg, ok = <-mch:
if !ok {
return nil, s.getNextMsgErr()
}
if err := s.processNextMsgDelivered(msg); err != nil {
return nil, err
}
case <-ctx.Done():
return nil, ctx.Err()
}
return msg, nil
}
// NextMsgWithContext takes a context and returns the next message
// available to a synchronous subscriber, blocking until it is delivered
// or context gets canceled.
func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) {
return s.nextMsgWithContext(ctx, false, true)
}
// FlushWithContext will allow a context to control the duration
// of a Flush() call. This context should be non-nil and should
// have a deadline set. We will return an error if none is present.
func (nc *Conn) FlushWithContext(ctx context.Context) error {
if nc == nil {
return ErrInvalidConnection
}
if ctx == nil {
return ErrInvalidContext
}
_, ok := ctx.Deadline()
if !ok {
return ErrNoDeadlineContext
}
nc.mu.Lock()
if nc.isClosed() {
nc.mu.Unlock()
return ErrConnectionClosed
}
// Create a buffered channel to prevent chan send to block
// in processPong()
ch := make(chan struct{}, 1)
nc.sendPing(ch)
nc.mu.Unlock()
var err error
select {
case _, ok := <-ch:
if !ok {
err = ErrConnectionClosed
} else {
close(ch)
}
case <-ctx.Done():
err = ctx.Err()
}
if err != nil {
nc.removeFlushEntry(ch)
}
return err
}
// RequestWithContext will create an Inbox and perform a Request
// using the provided cancellation context with the Inbox reply
// for the data v. A response will be decoded into the vPtr last parameter.
func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v any, vPtr any) error {
if ctx == nil {
return ErrInvalidContext
}
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
m, err := c.Conn.RequestWithContext(ctx, subject, b)
if err != nil {
return err
}
if reflect.TypeOf(vPtr) == emptyMsgType {
mPtr := vPtr.(*Msg)
*mPtr = *m
} else {
err := c.Enc.Decode(m.Subject, m.Data, vPtr)
if err != nil {
return err
}
}
return nil
}

View File

@ -1,15 +0,0 @@
# External Dependencies
This file lists the dependencies used in this repository.
| Dependency | License |
|-----------------------------------|--------------|
| Go | BSD 3-Clause |
| github.com/golang/protobuf/proto | BSD-3-Clause |
| github.com/klauspost/compress | BSD-3-Clause |
| github.com/nats-io/nats-server/v2 | Apache-2.0 |
| github.com/nats-io/nkeys | Apache-2.0 |
| github.com/nats-io/nuid | Apache-2.0 |
| go.uber.org/goleak | MIT |
| golang.org/x/text | BSD-3-Clause |
| google.golang.org/protobuf | BSD-3-Clause |

View File

@ -1,269 +0,0 @@
// Copyright 2012-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nats
import (
"errors"
"fmt"
"reflect"
"sync"
"time"
// Default Encoders
"github.com/nats-io/nats.go/encoders/builtin"
)
// Encoder interface is for all register encoders
type Encoder interface {
Encode(subject string, v any) ([]byte, error)
Decode(subject string, data []byte, vPtr any) error
}
var encMap map[string]Encoder
var encLock sync.Mutex
// Indexed names into the Registered Encoders.
const (
JSON_ENCODER = "json"
GOB_ENCODER = "gob"
DEFAULT_ENCODER = "default"
)
func init() {
encMap = make(map[string]Encoder)
// Register json, gob and default encoder
RegisterEncoder(JSON_ENCODER, &builtin.JsonEncoder{})
RegisterEncoder(GOB_ENCODER, &builtin.GobEncoder{})
RegisterEncoder(DEFAULT_ENCODER, &builtin.DefaultEncoder{})
}
// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to
// a nats server and have an extendable encoder system that will encode and decode messages
// from raw Go types.
type EncodedConn struct {
Conn *Conn
Enc Encoder
}
// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered
// encoder.
func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) {
if c == nil {
return nil, errors.New("nats: Nil Connection")
}
if c.IsClosed() {
return nil, ErrConnectionClosed
}
ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)}
if ec.Enc == nil {
return nil, fmt.Errorf("no encoder registered for '%s'", encType)
}
return ec, nil
}
// RegisterEncoder will register the encType with the given Encoder. Useful for customization.
func RegisterEncoder(encType string, enc Encoder) {
encLock.Lock()
defer encLock.Unlock()
encMap[encType] = enc
}
// EncoderForType will return the registered Encoder for the encType.
func EncoderForType(encType string) Encoder {
encLock.Lock()
defer encLock.Unlock()
return encMap[encType]
}
// Publish publishes the data argument to the given subject. The data argument
// will be encoded using the associated encoder.
func (c *EncodedConn) Publish(subject string, v any) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
return c.Conn.publish(subject, _EMPTY_, nil, b)
}
// PublishRequest will perform a Publish() expecting a response on the
// reply subject. Use Request() for automatically waiting for a response
// inline.
func (c *EncodedConn) PublishRequest(subject, reply string, v any) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
return c.Conn.publish(subject, reply, nil, b)
}
// Request will create an Inbox and perform a Request() call
// with the Inbox reply for the data v. A response will be
// decoded into the vPtr Response.
func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Duration) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
m, err := c.Conn.Request(subject, b, timeout)
if err != nil {
return err
}
if reflect.TypeOf(vPtr) == emptyMsgType {
mPtr := vPtr.(*Msg)
*mPtr = *m
} else {
err = c.Enc.Decode(m.Subject, m.Data, vPtr)
}
return err
}
// Handler is a specific callback used for Subscribe. It is generalized to
// an any, but we will discover its format and arguments at runtime
// and perform the correct callback, including demarshaling encoded data
// back into the appropriate struct based on the signature of the Handler.
//
// Handlers are expected to have one of four signatures.
//
// type person struct {
// Name string `json:"name,omitempty"`
// Age uint `json:"age,omitempty"`
// }
//
// handler := func(m *Msg)
// handler := func(p *person)
// handler := func(subject string, o *obj)
// handler := func(subject, reply string, o *obj)
//
// These forms allow a callback to request a raw Msg ptr, where the processing
// of the message from the wire is untouched. Process a JSON representation
// and demarshal it into the given struct, e.g. person.
// There are also variants where the callback wants either the subject, or the
// subject and the reply subject.
type Handler any
// Dissect the cb Handler's signature
func argInfo(cb Handler) (reflect.Type, int) {
cbType := reflect.TypeOf(cb)
if cbType.Kind() != reflect.Func {
panic("nats: Handler needs to be a func")
}
numArgs := cbType.NumIn()
if numArgs == 0 {
return nil, numArgs
}
return cbType.In(numArgs - 1), numArgs
}
var emptyMsgType = reflect.TypeOf(&Msg{})
// Subscribe will create a subscription on the given subject and process incoming
// messages using the specified Handler. The Handler should be a func that matches
// a signature from the description of Handler from above.
func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) {
return c.subscribe(subject, _EMPTY_, cb)
}
// QueueSubscribe will create a queue subscription on the given subject and process
// incoming messages using the specified Handler. The Handler should be a func that
// matches a signature from the description of Handler from above.
func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) {
return c.subscribe(subject, queue, cb)
}
// Internal implementation that all public functions will use.
func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) {
if cb == nil {
return nil, errors.New("nats: Handler required for EncodedConn Subscription")
}
argType, numArgs := argInfo(cb)
if argType == nil {
return nil, errors.New("nats: Handler requires at least one argument")
}
cbValue := reflect.ValueOf(cb)
wantsRaw := (argType == emptyMsgType)
natsCB := func(m *Msg) {
var oV []reflect.Value
if wantsRaw {
oV = []reflect.Value{reflect.ValueOf(m)}
} else {
var oPtr reflect.Value
if argType.Kind() != reflect.Ptr {
oPtr = reflect.New(argType)
} else {
oPtr = reflect.New(argType.Elem())
}
if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
if c.Conn.Opts.AsyncErrorCB != nil {
c.Conn.ach.push(func() {
c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error()))
})
}
return
}
if argType.Kind() != reflect.Ptr {
oPtr = reflect.Indirect(oPtr)
}
// Callback Arity
switch numArgs {
case 1:
oV = []reflect.Value{oPtr}
case 2:
subV := reflect.ValueOf(m.Subject)
oV = []reflect.Value{subV, oPtr}
case 3:
subV := reflect.ValueOf(m.Subject)
replyV := reflect.ValueOf(m.Reply)
oV = []reflect.Value{subV, replyV, oPtr}
}
}
cbValue.Call(oV)
}
return c.Conn.subscribe(subject, queue, natsCB, nil, false, nil)
}
// FlushTimeout allows a Flush operation to have an associated timeout.
func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) {
return c.Conn.FlushTimeout(timeout)
}
// Flush will perform a round trip to the server and return when it
// receives the internal reply.
func (c *EncodedConn) Flush() error {
return c.Conn.Flush()
}
// Close will close the connection to the server. This call will release
// all blocking calls, such as Flush(), etc.
func (c *EncodedConn) Close() {
c.Conn.Close()
}
// Drain will put a connection into a drain state. All subscriptions will
// immediately be put into a drain state. Upon completion, the publishers
// will be drained and can not publish any additional messages. Upon draining
// of the publishers, the connection will be closed. Use the ClosedCB()
// option to know when the connection has moved from draining to closed.
func (c *EncodedConn) Drain() error {
return c.Conn.Drain()
}
// LastError reports the last error encountered via the Connection.
func (c *EncodedConn) LastError() error {
return c.Conn.LastError()
}

View File

@ -1,117 +0,0 @@
// Copyright 2012-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builtin
import (
"bytes"
"fmt"
"reflect"
"strconv"
"unsafe"
)
// DefaultEncoder implementation for EncodedConn.
// This encoder will leave []byte and string untouched, but will attempt to
// turn numbers into appropriate strings that can be decoded. It will also
// properly encoded and decode bools. If will encode a struct, but if you want
// to properly handle structures you should use JsonEncoder.
type DefaultEncoder struct {
// Empty
}
var trueB = []byte("true")
var falseB = []byte("false")
var nilB = []byte("")
// Encode
func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) {
switch arg := v.(type) {
case string:
bytes := *(*[]byte)(unsafe.Pointer(&arg))
return bytes, nil
case []byte:
return arg, nil
case bool:
if arg {
return trueB, nil
} else {
return falseB, nil
}
case nil:
return nilB, nil
default:
var buf bytes.Buffer
fmt.Fprintf(&buf, "%+v", arg)
return buf.Bytes(), nil
}
}
// Decode
func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr any) error {
// Figure out what it's pointing to...
sData := *(*string)(unsafe.Pointer(&data))
switch arg := vPtr.(type) {
case *string:
*arg = sData
return nil
case *[]byte:
*arg = data
return nil
case *int:
n, err := strconv.ParseInt(sData, 10, 64)
if err != nil {
return err
}
*arg = int(n)
return nil
case *int32:
n, err := strconv.ParseInt(sData, 10, 64)
if err != nil {
return err
}
*arg = int32(n)
return nil
case *int64:
n, err := strconv.ParseInt(sData, 10, 64)
if err != nil {
return err
}
*arg = int64(n)
return nil
case *float32:
n, err := strconv.ParseFloat(sData, 32)
if err != nil {
return err
}
*arg = float32(n)
return nil
case *float64:
n, err := strconv.ParseFloat(sData, 64)
if err != nil {
return err
}
*arg = float64(n)
return nil
case *bool:
b, err := strconv.ParseBool(sData)
if err != nil {
return err
}
*arg = b
return nil
default:
vt := reflect.TypeOf(arg).Elem()
return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt)
}
}

View File

@ -1,45 +0,0 @@
// Copyright 2013-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builtin
import (
"bytes"
"encoding/gob"
)
// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn.
// This encoder will use the builtin encoding/gob to Marshal
// and Unmarshal most types, including structs.
type GobEncoder struct {
// Empty
}
// FIXME(dlc) - This could probably be more efficient.
// Encode
func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) {
b := new(bytes.Buffer)
enc := gob.NewEncoder(b)
if err := enc.Encode(v); err != nil {
return nil, err
}
return b.Bytes(), nil
}
// Decode
func (ge *GobEncoder) Decode(subject string, data []byte, vPtr any) (err error) {
dec := gob.NewDecoder(bytes.NewBuffer(data))
err = dec.Decode(vPtr)
return
}

View File

@ -1,56 +0,0 @@
// Copyright 2012-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builtin
import (
"encoding/json"
"strings"
)
// JsonEncoder is a JSON Encoder implementation for EncodedConn.
// This encoder will use the builtin encoding/json to Marshal
// and Unmarshal most types, including structs.
type JsonEncoder struct {
// Empty
}
// Encode
func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) {
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
return b, nil
}
// Decode
func (je *JsonEncoder) Decode(subject string, data []byte, vPtr any) (err error) {
switch arg := vPtr.(type) {
case *string:
// If they want a string and it is a JSON string, strip quotes
// This allows someone to send a struct but receive as a plain string
// This cast should be efficient for Go 1.3 and beyond.
str := string(data)
if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) {
*arg = str[1 : len(str)-1]
} else {
*arg = str
}
case *[]byte:
*arg = data
default:
err = json.Unmarshal(data, arg)
}
return
}

View File

@ -1,23 +0,0 @@
module github.com/nats-io/nats.go
go 1.19
require (
github.com/golang/protobuf v1.4.2
github.com/klauspost/compress v1.17.6
github.com/nats-io/jwt v1.2.2
github.com/nats-io/nats-server/v2 v2.10.11
github.com/nats-io/nkeys v0.4.7
github.com/nats-io/nuid v1.0.1
go.uber.org/goleak v1.3.0
golang.org/x/text v0.14.0
google.golang.org/protobuf v1.23.0
)
require (
github.com/minio/highwayhash v1.0.2 // indirect
github.com/nats-io/jwt/v2 v2.5.3 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/time v0.5.0 // indirect
)

View File

@ -1,56 +0,0 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU=
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
github.com/nats-io/jwt/v2 v2.5.3 h1:/9SWvzc6hTfamcgXJ3uYRpgj+QuY2aLNqRiqrKcrpEo=
github.com/nats-io/jwt/v2 v2.5.3/go.mod h1:iysuPemFcc7p4IoYots3IuELSI4EDe9Y0bQMe+I3Bf4=
github.com/nats-io/nats-server/v2 v2.10.11 h1:yKUiLVincZISpo3A4YljJQ+HfLltGAgoNNJl99KL8I0=
github.com/nats-io/nats-server/v2 v2.10.11/go.mod h1:dXtOqVWzbMTEj+tUyC/itXjJhW37xh0tUBrTAlqAfx8=
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View File

@ -1,104 +0,0 @@
// Copyright 2020-2022 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"errors"
"fmt"
)
const (
AckDomainTokenPos = iota + 2
AckAccHashTokenPos
AckStreamTokenPos
AckConsumerTokenPos
AckNumDeliveredTokenPos
AckStreamSeqTokenPos
AckConsumerSeqTokenPos
AckTimestampSeqTokenPos
AckNumPendingTokenPos
)
var ErrInvalidSubjectFormat = errors.New("invalid format of ACK subject")
// Quick parser for positive numbers in ack reply encoding.
// NOTE: This parser does not detect uint64 overflow
func ParseNum(d string) (n uint64) {
if len(d) == 0 {
return 0
}
// ASCII numbers 0-9
const (
asciiZero = 48
asciiNine = 57
)
for _, dec := range d {
if dec < asciiZero || dec > asciiNine {
return 0
}
n = n*10 + uint64(dec) - asciiZero
}
return
}
func GetMetadataFields(subject string) ([]string, error) {
v1TokenCounts, v2TokenCounts := 9, 12
var start int
tokens := make([]string, 0, v2TokenCounts)
for i := 0; i < len(subject); i++ {
if subject[i] == '.' {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
//
// Newer server will include the domain name and account hash in the subject,
// and a token at the end.
//
// Old subject was:
// $JS.ACK.<stream>.<consumer>.<delivered>.<sseq>.<cseq>.<tm>.<pending>
//
// New subject would be:
// $JS.ACK.<domain>.<account hash>.<stream>.<consumer>.<delivered>.<sseq>.<cseq>.<tm>.<pending>.<a token with a random value>
//
// v1 has 9 tokens, v2 has 12, but we must not be strict on the 12th since
// it may be removed in the future. Also, the library has no use for it.
// The point is that a v2 ACK subject is valid if it has at least 11 tokens.
//
tokensLen := len(tokens)
// If lower than 9 or more than 9 but less than 11, report an error
if tokensLen < v1TokenCounts || (tokensLen > v1TokenCounts && tokensLen < v2TokenCounts-1) {
return nil, ErrInvalidSubjectFormat
}
if tokens[0] != "$JS" || tokens[1] != "ACK" {
return nil, fmt.Errorf("%w: subject should start with $JS.ACK", ErrInvalidSubjectFormat)
}
// For v1 style, we insert 2 empty tokens (domain and hash) so that the
// rest of the library references known fields at a constant location.
if tokensLen == v1TokenCounts {
// Extend the array (we know the backend is big enough)
tokens = append(tokens[:AckDomainTokenPos+2], tokens[AckDomainTokenPos:]...)
// Clear the domain and hash tokens
tokens[AckDomainTokenPos], tokens[AckAccHashTokenPos] = "", ""
} else if tokens[AckDomainTokenPos] == "_" {
// If domain is "_", replace with empty value.
tokens[AckDomainTokenPos] = ""
}
return tokens, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,151 +0,0 @@
// Copyright 2022-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"encoding/json"
"strings"
)
type (
apiResponse struct {
Type string `json:"type"`
Error *APIError `json:"error,omitempty"`
}
// apiPaged includes variables used to create paged responses from the JSON API
apiPaged struct {
Total int `json:"total"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
)
// Request API subjects for JetStream.
const (
// DefaultAPIPrefix is the default prefix for the JetStream API.
DefaultAPIPrefix = "$JS.API."
// jsDomainT is used to create JetStream API prefix by specifying only Domain
jsDomainT = "$JS.%s.API."
// jsExtDomainT is used to create a StreamSource External APIPrefix
jsExtDomainT = "$JS.%s.API"
// apiAccountInfo is for obtaining general information about JetStream.
apiAccountInfo = "INFO"
// apiConsumerCreateT is used to create consumers.
apiConsumerCreateT = "CONSUMER.CREATE.%s.%s"
// apiConsumerCreateT is used to create consumers.
// it accepts stream name, consumer name and filter subject
apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s"
// apiConsumerInfoT is used to create consumers.
apiConsumerInfoT = "CONSUMER.INFO.%s.%s"
// apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode.
apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s"
// apiConsumerDeleteT is used to delete consumers.
apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s"
// apiConsumerListT is used to return all detailed consumer information
apiConsumerListT = "CONSUMER.LIST.%s"
// apiConsumerNamesT is used to return a list with all consumer names for the stream.
apiConsumerNamesT = "CONSUMER.NAMES.%s"
// apiStreams can lookup a stream by subject.
apiStreams = "STREAM.NAMES"
// apiStreamCreateT is the endpoint to create new streams.
apiStreamCreateT = "STREAM.CREATE.%s"
// apiStreamInfoT is the endpoint to get information on a stream.
apiStreamInfoT = "STREAM.INFO.%s"
// apiStreamUpdateT is the endpoint to update existing streams.
apiStreamUpdateT = "STREAM.UPDATE.%s"
// apiStreamDeleteT is the endpoint to delete streams.
apiStreamDeleteT = "STREAM.DELETE.%s"
// apiStreamPurgeT is the endpoint to purge streams.
apiStreamPurgeT = "STREAM.PURGE.%s"
// apiStreamListT is the endpoint that will return all detailed stream information
apiStreamListT = "STREAM.LIST"
// apiMsgGetT is the endpoint to get a message.
apiMsgGetT = "STREAM.MSG.GET.%s"
// apiMsgGetT is the endpoint to perform a direct get of a message.
apiDirectMsgGetT = "DIRECT.GET.%s"
// apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject.
apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s"
// apiMsgDeleteT is the endpoint to remove a message.
apiMsgDeleteT = "STREAM.MSG.DELETE.%s"
)
func (js *jetStream) apiRequestJSON(ctx context.Context, subject string, resp any, data ...[]byte) (*jetStreamMsg, error) {
jsMsg, err := js.apiRequest(ctx, subject, data...)
if err != nil {
return nil, err
}
if err := json.Unmarshal(jsMsg.Data(), resp); err != nil {
return nil, err
}
return jsMsg, nil
}
// a RequestWithContext with tracing via TraceCB
func (js *jetStream) apiRequest(ctx context.Context, subj string, data ...[]byte) (*jetStreamMsg, error) {
var req []byte
if len(data) > 0 {
req = data[0]
}
if js.clientTrace != nil {
ctrace := js.clientTrace
if ctrace.RequestSent != nil {
ctrace.RequestSent(subj, req)
}
}
resp, err := js.conn.RequestWithContext(ctx, subj, req)
if err != nil {
return nil, err
}
if js.clientTrace != nil {
ctrace := js.clientTrace
if ctrace.ResponseReceived != nil {
ctrace.ResponseReceived(subj, resp.Data, resp.Header)
}
}
return js.toJSMsg(resp), nil
}
func apiSubj(prefix, subject string) string {
if prefix == "" {
return subject
}
var b strings.Builder
b.WriteString(prefix)
b.WriteString(subject)
return b.String()
}

View File

@ -1,331 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"strings"
"github.com/nats-io/nuid"
)
type (
// Consumer contains methods for fetching/processing messages from a stream,
// as well as fetching consumer info.
//
// This package provides two implementations of Consumer interface:
//
// - Standard named/ephemeral pull consumers. These consumers are created using
// CreateConsumer method on Stream or JetStream interface. They can be
// explicitly configured (using [ConsumerConfig]) and managed by the user,
// either from this package or externally.
//
// - Ordered consumers. These consumers are created using OrderedConsumer
// method on Stream or JetStream interface. They are managed by the library
// and provide a simple way to consume messages from a stream. Ordered
// consumers are ephemeral in-memory pull consumers and are resilient to
// deletes and restarts. They provide limited configuration options
// using [OrderedConsumerConfig].
//
// Consumer provides method for optimized continuous consumption of messages
// using Consume and Messages methods, as well as simple one-off messages
// retrieval using Fetch and Next methods.
Consumer interface {
// Fetch is used to retrieve up to a provided number of messages from a
// stream. This method will send a single request and deliver either all
// requested messages unless time out is met earlier. Fetch timeout
// defaults to 30 seconds and can be configured using FetchMaxWait
// option.
//
// By default, Fetch uses a 5s idle heartbeat for requests longer than
// 10 seconds. For shorter requests, the idle heartbeat is disabled.
// This can be configured using FetchHeartbeat option. If a client does
// not receive a heartbeat message from a stream for more than 2 times
// the idle heartbeat setting, Fetch will return [ErrNoHeartbeat].
//
// Fetch is non-blocking and returns MessageBatch, exposing a channel
// for delivered messages.
//
// Messages channel is always closed, thus it is safe to range over it
// without additional checks.
Fetch(batch int, opts ...FetchOpt) (MessageBatch, error)
// FetchBytes is used to retrieve up to a provided bytes from the
// stream. This method will send a single request and deliver the
// provided number of bytes unless time out is met earlier. FetchBytes
// timeout defaults to 30 seconds and can be configured using
// FetchMaxWait option.
//
// By default, FetchBytes uses a 5s idle heartbeat for requests longer than
// 10 seconds. For shorter requests, the idle heartbeat is disabled.
// This can be configured using FetchHeartbeat option. If a client does
// not receive a heartbeat message from a stream for more than 2 times
// the idle heartbeat setting, Fetch will return ErrNoHeartbeat.
//
// FetchBytes is non-blocking and returns MessageBatch, exposing a channel
// for delivered messages.
//
// Messages channel is always closed, thus it is safe to range over it
// without additional checks.
FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error)
// FetchNoWait is used to retrieve up to a provided number of messages
// from a stream. Unlike Fetch, FetchNoWait will only deliver messages
// that are currently available in the stream and will not wait for new
// messages to arrive, even if batch size is not met.
//
// FetchNoWait is non-blocking and returns MessageBatch, exposing a
// channel for delivered messages.
//
// Messages channel is always closed, thus it is safe to range over it
// without additional checks.
FetchNoWait(batch int) (MessageBatch, error)
// Consume will continuously receive messages and handle them
// with the provided callback function. Consume can be configured using
// PullConsumeOpt options:
//
// - Error handling and monitoring can be configured using ConsumeErrHandler
// option, which provides information about errors encountered during
// consumption (both transient and terminal)
// - Consume can be configured to stop after a certain number of
// messages is received using StopAfter option.
// - Consume can be optimized for throughput or memory usage using
// PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options.
// Unless there is a specific use case, these options should not be used.
//
// Consume returns a ConsumeContext, which can be used to stop or drain
// the consumer.
Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error)
// Messages returns MessagesContext, allowing continuously iterating
// over messages on a stream. Messages can be configured using
// PullMessagesOpt options:
//
// - Messages can be optimized for throughput or memory usage using
// PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options.
// Unless there is a specific use case, these options should not be used.
// - WithMessagesErrOnMissingHeartbeat can be used to enable/disable
// erroring out on MessagesContext.Next when a heartbeat is missing.
// This option is enabled by default.
Messages(opts ...PullMessagesOpt) (MessagesContext, error)
// Next is used to retrieve the next message from the consumer. This
// method will block until the message is retrieved or timeout is
// reached.
Next(opts ...FetchOpt) (Msg, error)
// Info fetches current ConsumerInfo from the server.
Info(context.Context) (*ConsumerInfo, error)
// CachedInfo returns ConsumerInfo currently cached on this consumer.
// This method does not perform any network requests. The cached
// ConsumerInfo is updated on every call to Info and Update.
CachedInfo() *ConsumerInfo
}
createConsumerRequest struct {
Stream string `json:"stream_name"`
Config *ConsumerConfig `json:"config"`
Action string `json:"action"`
}
)
// Info fetches current ConsumerInfo from the server.
func (p *pullConsumer) Info(ctx context.Context) (*ConsumerInfo, error) {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
infoSubject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiConsumerInfoT, p.stream, p.name))
var resp consumerInfoResponse
if _, err := p.jetStream.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return nil, ErrConsumerNotFound
}
return nil, resp.Error
}
if resp.Error == nil && resp.ConsumerInfo == nil {
return nil, ErrConsumerNotFound
}
p.info = resp.ConsumerInfo
return resp.ConsumerInfo, nil
}
// CachedInfo returns ConsumerInfo currently cached on this consumer.
// This method does not perform any network requests. The cached
// ConsumerInfo is updated on every call to Info and Update.
func (p *pullConsumer) CachedInfo() *ConsumerInfo {
return p.info
}
func upsertConsumer(ctx context.Context, js *jetStream, stream string, cfg ConsumerConfig, action string) (Consumer, error) {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
req := createConsumerRequest{
Stream: stream,
Config: &cfg,
Action: action,
}
reqJSON, err := json.Marshal(req)
if err != nil {
return nil, err
}
consumerName := cfg.Name
if consumerName == "" {
if cfg.Durable != "" {
consumerName = cfg.Durable
} else {
consumerName = generateConsName()
}
}
if err := validateConsumerName(consumerName); err != nil {
return nil, err
}
var ccSubj string
if cfg.FilterSubject != "" && len(cfg.FilterSubjects) == 0 {
if err := validateSubject(cfg.FilterSubject); err != nil {
return nil, err
}
ccSubj = apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject))
} else {
ccSubj = apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerCreateT, stream, consumerName))
}
var resp consumerInfoResponse
if _, err := js.apiRequestJSON(ctx, ccSubj, &resp, reqJSON); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
return nil, ErrStreamNotFound
}
return nil, resp.Error
}
// check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo
if len(cfg.FilterSubjects) != 0 && len(resp.Config.FilterSubjects) == 0 {
return nil, ErrConsumerMultipleFilterSubjectsNotSupported
}
return &pullConsumer{
jetStream: js,
stream: stream,
name: resp.Name,
durable: cfg.Durable != "",
info: resp.ConsumerInfo,
subscriptions: make(map[string]*pullSubscription),
}, nil
}
const (
consumerActionCreate = "create"
consumerActionUpdate = "update"
consumerActionCreateOrUpdate = ""
)
func generateConsName() string {
name := nuid.Next()
sha := sha256.New()
sha.Write([]byte(name))
b := sha.Sum(nil)
for i := 0; i < 8; i++ {
b[i] = rdigits[int(b[i]%base)]
}
return string(b[:8])
}
func getConsumer(ctx context.Context, js *jetStream, stream, name string) (Consumer, error) {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
if err := validateConsumerName(name); err != nil {
return nil, err
}
infoSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerInfoT, stream, name))
var resp consumerInfoResponse
if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return nil, ErrConsumerNotFound
}
return nil, resp.Error
}
if resp.Error == nil && resp.ConsumerInfo == nil {
return nil, ErrConsumerNotFound
}
cons := &pullConsumer{
jetStream: js,
stream: stream,
name: name,
durable: resp.Config.Durable != "",
info: resp.ConsumerInfo,
subscriptions: make(map[string]*pullSubscription, 0),
}
return cons, nil
}
func deleteConsumer(ctx context.Context, js *jetStream, stream, consumer string) error {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
if err := validateConsumerName(consumer); err != nil {
return err
}
deleteSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerDeleteT, stream, consumer))
var resp consumerDeleteResponse
if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil {
return err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return ErrConsumerNotFound
}
return resp.Error
}
return nil
}
func validateConsumerName(dur string) error {
if dur == "" {
return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, "name is required")
}
if strings.ContainsAny(dur, ">*. /\\") {
return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, dur)
}
return nil
}

View File

@ -1,460 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"encoding/json"
"fmt"
"time"
)
type (
// ConsumerInfo is the detailed information about a JetStream consumer.
ConsumerInfo struct {
// Stream specifies the name of the stream that the consumer is bound
// to.
Stream string `json:"stream_name"`
// Name represents the unique identifier for the consumer. This can be
// either set explicitly by the client or generated automatically if not
// set.
Name string `json:"name"`
// Created is the timestamp when the consumer was created.
Created time.Time `json:"created"`
// Config contains the configuration settings of the consumer, set when
// creating or updating the consumer.
Config ConsumerConfig `json:"config"`
// Delivered holds information about the most recently delivered
// message, including its sequence numbers and timestamp.
Delivered SequenceInfo `json:"delivered"`
// AckFloor indicates the message before the first unacknowledged
// message.
AckFloor SequenceInfo `json:"ack_floor"`
// NumAckPending is the number of messages that have been delivered but
// not yet acknowledged.
NumAckPending int `json:"num_ack_pending"`
// NumRedelivered counts the number of messages that have been
// redelivered and not yet acknowledged. Each message is counted only
// once, even if it has been redelivered multiple times. This count is
// reset when the message is eventually acknowledged.
NumRedelivered int `json:"num_redelivered"`
// NumWaiting is the count of active pull requests. It is only relevant
// for pull-based consumers.
NumWaiting int `json:"num_waiting"`
// NumPending is the number of messages that match the consumer's
// filter, but have not been delivered yet.
NumPending uint64 `json:"num_pending"`
// Cluster contains information about the cluster to which this consumer
// belongs (if applicable).
Cluster *ClusterInfo `json:"cluster,omitempty"`
// PushBound indicates whether at least one subscription exists for the
// delivery subject of this consumer. This is only applicable to
// push-based consumers.
PushBound bool `json:"push_bound,omitempty"`
// TimeStamp indicates when the info was gathered by the server.
TimeStamp time.Time `json:"ts"`
}
// ConsumerConfig is the configuration of a JetStream consumer.
ConsumerConfig struct {
// Name is an optional name for the consumer. If not set, one is
// generated automatically.
//
// Name cannot contain whitespace, ., *, >, path separators (forward or
// backwards slash), and non-printable characters.
Name string `json:"name,omitempty"`
// Durable is an optional durable name for the consumer. If both Durable
// and Name are set, they have to be equal. Unless InactiveThreshold is set, a
// durable consumer will not be cleaned up automatically.
//
// Durable cannot contain whitespace, ., *, >, path separators (forward or
// backwards slash), and non-printable characters.
Durable string `json:"durable_name,omitempty"`
// Description provides an optional description of the consumer.
Description string `json:"description,omitempty"`
// DeliverPolicy defines from which point to start delivering messages
// from the stream. Defaults to DeliverAllPolicy.
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
// OptStartSeq is an optional sequence number from which to start
// message delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartSequencePolicy.
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
// OptStartTime is an optional time from which to start message
// delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartTimePolicy.
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
// AckPolicy defines the acknowledgement policy for the consumer.
// Defaults to AckExplicitPolicy.
AckPolicy AckPolicy `json:"ack_policy"`
// AckWait defines how long the server will wait for an acknowledgement
// before resending a message. If not set, server default is 30 seconds.
AckWait time.Duration `json:"ack_wait,omitempty"`
// MaxDeliver defines the maximum number of delivery attempts for a
// message. Applies to any message that is re-sent due to ack policy.
// If not set, server default is -1 (unlimited).
MaxDeliver int `json:"max_deliver,omitempty"`
// BackOff specifies the optional back-off intervals for retrying
// message delivery after a failed acknowledgement. It overrides
// AckWait.
//
// BackOff only applies to messages not acknowledged in specified time,
// not messages that were nack'ed.
//
// The number of intervals specified must be lower or equal to
// MaxDeliver. If the number of intervals is lower, the last interval is
// used for all remaining attempts.
BackOff []time.Duration `json:"backoff,omitempty"`
// FilterSubject can be used to filter messages delivered from the
// stream. FilterSubject is exclusive with FilterSubjects.
FilterSubject string `json:"filter_subject,omitempty"`
// ReplayPolicy defines the rate at which messages are sent to the
// consumer. If ReplayOriginalPolicy is set, messages are sent in the
// same intervals in which they were stored on stream. This can be used
// e.g. to simulate production traffic in development environments. If
// ReplayInstantPolicy is set, messages are sent as fast as possible.
// Defaults to ReplayInstantPolicy.
ReplayPolicy ReplayPolicy `json:"replay_policy"`
// RateLimit specifies an optional maximum rate of message delivery in
// bits per second.
RateLimit uint64 `json:"rate_limit_bps,omitempty"`
// SampleFrequency is an optional frequency for sampling how often
// acknowledgements are sampled for observability. See
// https://docs.nats.io/running-a-nats-service/nats_admin/monitoring/monitoring_jetstream
SampleFrequency string `json:"sample_freq,omitempty"`
// MaxWaiting is a maximum number of pull requests waiting to be
// fulfilled. If not set, this will inherit settings from stream's
// ConsumerLimits or (if those are not set) from account settings. If
// neither are set, server default is 512.
MaxWaiting int `json:"max_waiting,omitempty"`
// MaxAckPending is a maximum number of outstanding unacknowledged
// messages. Once this limit is reached, the server will suspend sending
// messages to the consumer. If not set, server default is 1000
// seconds. Set to -1 for unlimited.
MaxAckPending int `json:"max_ack_pending,omitempty"`
// HeadersOnly indicates whether only headers of messages should be sent
// (and no payload). Defaults to false.
HeadersOnly bool `json:"headers_only,omitempty"`
// MaxRequestBatch is the optional maximum batch size a single pull
// request can make. When set with MaxRequestMaxBytes, the batch size
// will be constrained by whichever limit is hit first.
MaxRequestBatch int `json:"max_batch,omitempty"`
// MaxRequestExpires is the maximum duration a single pull request will
// wait for messages to be available to pull.
MaxRequestExpires time.Duration `json:"max_expires,omitempty"`
// MaxRequestMaxBytes is the optional maximum total bytes that can be
// requested in a given batch. When set with MaxRequestBatch, the batch
// size will be constrained by whichever limit is hit first.
MaxRequestMaxBytes int `json:"max_bytes,omitempty"`
// InactiveThreshold is a duration which instructs the server to clean
// up the consumer if it has been inactive for the specified duration.
// Durable consumers will not be cleaned up by default, but if
// InactiveThreshold is set, they will be. If not set, this will inherit
// settings from stream's ConsumerLimits. If neither are set, server
// default is 5 seconds.
//
// A consumer is considered inactive there are not pull requests
// received by the server (for pull consumers), or no interest detected
// on deliver subject (for push consumers), not if there are no
// messages to be delivered.
InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
// Replicas the number of replicas for the consumer's state. By default,
// consumers inherit the number of replicas from the stream.
Replicas int `json:"num_replicas"`
// MemoryStorage is a flag to force the consumer to use memory storage
// rather than inherit the storage type from the stream.
MemoryStorage bool `json:"mem_storage,omitempty"`
// FilterSubjects allows filtering messages from a stream by subject.
// This field is exclusive with FilterSubject. Requires nats-server
// v2.10.0 or later.
FilterSubjects []string `json:"filter_subjects,omitempty"`
// Metadata is a set of application-defined key-value pairs for
// associating metadata on the consumer. This feature requires
// nats-server v2.10.0 or later.
Metadata map[string]string `json:"metadata,omitempty"`
}
// OrderedConsumerConfig is the configuration of an ordered JetStream
// consumer. For more information, see [Ordered Consumers] in README
//
// [Ordered Consumers]: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md#ordered-consumers
OrderedConsumerConfig struct {
// FilterSubjects allows filtering messages from a stream by subject.
// This field is exclusive with FilterSubject. Requires nats-server
// v2.10.0 or later.
FilterSubjects []string `json:"filter_subjects,omitempty"`
// DeliverPolicy defines from which point to start delivering messages
// from the stream. Defaults to DeliverAllPolicy.
DeliverPolicy DeliverPolicy `json:"deliver_policy"`
// OptStartSeq is an optional sequence number from which to start
// message delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartSequencePolicy.
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
// OptStartTime is an optional time from which to start message
// delivery. Only applicable when DeliverPolicy is set to
// DeliverByStartTimePolicy.
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
// ReplayPolicy defines the rate at which messages are sent to the
// consumer. If ReplayOriginalPolicy is set, messages are sent in the
// same intervals in which they were stored on stream. This can be used
// e.g. to simulate production traffic in development environments. If
// ReplayInstantPolicy is set, messages are sent as fast as possible.
// Defaults to ReplayInstantPolicy.
ReplayPolicy ReplayPolicy `json:"replay_policy"`
// InactiveThreshold is a duration which instructs the server to clean
// up the consumer if it has been inactive for the specified duration.
// Defaults to 5s.
InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
// HeadersOnly indicates whether only headers of messages should be sent
// (and no payload). Defaults to false.
HeadersOnly bool `json:"headers_only,omitempty"`
// Maximum number of attempts for the consumer to be recreated in a
// single recreation cycle. Defaults to unlimited.
MaxResetAttempts int
}
// DeliverPolicy determines from which point to start delivering messages.
DeliverPolicy int
// AckPolicy determines how the consumer should acknowledge delivered
// messages.
AckPolicy int
// ReplayPolicy determines how the consumer should replay messages it
// already has queued in the stream.
ReplayPolicy int
// SequenceInfo has both the consumer and the stream sequence and last
// activity.
SequenceInfo struct {
Consumer uint64 `json:"consumer_seq"`
Stream uint64 `json:"stream_seq"`
Last *time.Time `json:"last_active,omitempty"`
}
)
const (
// DeliverAllPolicy starts delivering messages from the very beginning of a
// stream. This is the default.
DeliverAllPolicy DeliverPolicy = iota
// DeliverLastPolicy will start the consumer with the last sequence
// received.
DeliverLastPolicy
// DeliverNewPolicy will only deliver new messages that are sent after the
// consumer is created.
DeliverNewPolicy
// DeliverByStartSequencePolicy will deliver messages starting from a given
// sequence configured with OptStartSeq in ConsumerConfig.
DeliverByStartSequencePolicy
// DeliverByStartTimePolicy will deliver messages starting from a given time
// configured with OptStartTime in ConsumerConfig.
DeliverByStartTimePolicy
// DeliverLastPerSubjectPolicy will start the consumer with the last message
// for all subjects received.
DeliverLastPerSubjectPolicy
)
func (p *DeliverPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString("all"), jsonString("undefined"):
*p = DeliverAllPolicy
case jsonString("last"):
*p = DeliverLastPolicy
case jsonString("new"):
*p = DeliverNewPolicy
case jsonString("by_start_sequence"):
*p = DeliverByStartSequencePolicy
case jsonString("by_start_time"):
*p = DeliverByStartTimePolicy
case jsonString("last_per_subject"):
*p = DeliverLastPerSubjectPolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (p DeliverPolicy) MarshalJSON() ([]byte, error) {
switch p {
case DeliverAllPolicy:
return json.Marshal("all")
case DeliverLastPolicy:
return json.Marshal("last")
case DeliverNewPolicy:
return json.Marshal("new")
case DeliverByStartSequencePolicy:
return json.Marshal("by_start_sequence")
case DeliverByStartTimePolicy:
return json.Marshal("by_start_time")
case DeliverLastPerSubjectPolicy:
return json.Marshal("last_per_subject")
}
return nil, fmt.Errorf("nats: unknown deliver policy %v", p)
}
func (p DeliverPolicy) String() string {
switch p {
case DeliverAllPolicy:
return "all"
case DeliverLastPolicy:
return "last"
case DeliverNewPolicy:
return "new"
case DeliverByStartSequencePolicy:
return "by_start_sequence"
case DeliverByStartTimePolicy:
return "by_start_time"
case DeliverLastPerSubjectPolicy:
return "last_per_subject"
}
return ""
}
const (
// AckExplicitPolicy requires ack or nack for all messages.
AckExplicitPolicy AckPolicy = iota
// AckAllPolicy when acking a sequence number, this implicitly acks all
// sequences below this one as well.
AckAllPolicy
// AckNonePolicy requires no acks for delivered messages.
AckNonePolicy
)
func (p *AckPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString("none"):
*p = AckNonePolicy
case jsonString("all"):
*p = AckAllPolicy
case jsonString("explicit"):
*p = AckExplicitPolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (p AckPolicy) MarshalJSON() ([]byte, error) {
switch p {
case AckNonePolicy:
return json.Marshal("none")
case AckAllPolicy:
return json.Marshal("all")
case AckExplicitPolicy:
return json.Marshal("explicit")
}
return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p)
}
func (p AckPolicy) String() string {
switch p {
case AckNonePolicy:
return "AckNone"
case AckAllPolicy:
return "AckAll"
case AckExplicitPolicy:
return "AckExplicit"
}
return "Unknown AckPolicy"
}
const (
// ReplayInstantPolicy will replay messages as fast as possible.
ReplayInstantPolicy ReplayPolicy = iota
// ReplayOriginalPolicy will maintain the same timing as the messages were
// received.
ReplayOriginalPolicy
)
func (p *ReplayPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString("instant"):
*p = ReplayInstantPolicy
case jsonString("original"):
*p = ReplayOriginalPolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (p ReplayPolicy) MarshalJSON() ([]byte, error) {
switch p {
case ReplayOriginalPolicy:
return json.Marshal("original")
case ReplayInstantPolicy:
return json.Marshal("instant")
}
return nil, fmt.Errorf("nats: unknown replay policy %v", p)
}
func (p ReplayPolicy) String() string {
switch p {
case ReplayOriginalPolicy:
return "original"
case ReplayInstantPolicy:
return "instant"
}
return ""
}

View File

@ -1,421 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"errors"
"fmt"
)
type (
// JetStreamError is an error result that happens when using JetStream.
// In case of client-side error, [APIError] returns nil.
JetStreamError interface {
APIError() *APIError
error
}
jsError struct {
apiErr *APIError
message string
}
// APIError is included in all API responses if there was an error.
APIError struct {
Code int `json:"code"`
ErrorCode ErrorCode `json:"err_code"`
Description string `json:"description,omitempty"`
}
// ErrorCode represents error_code returned in response from JetStream API.
ErrorCode uint16
)
const (
JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039
JSErrCodeJetStreamNotEnabled ErrorCode = 10076
JSErrCodeStreamNotFound ErrorCode = 10059
JSErrCodeStreamNameInUse ErrorCode = 10058
JSErrCodeConsumerCreate ErrorCode = 10012
JSErrCodeConsumerNotFound ErrorCode = 10014
JSErrCodeConsumerNameExists ErrorCode = 10013
JSErrCodeConsumerAlreadyExists ErrorCode = 10105
JSErrCodeConsumerExists ErrorCode = 10148
JSErrCodeDuplicateFilterSubjects ErrorCode = 10136
JSErrCodeOverlappingFilterSubjects ErrorCode = 10138
JSErrCodeConsumerEmptyFilter ErrorCode = 10139
JSErrCodeConsumerDoesNotExist ErrorCode = 10149
JSErrCodeMessageNotFound ErrorCode = 10037
JSErrCodeBadRequest ErrorCode = 10003
JSErrCodeStreamWrongLastSequence ErrorCode = 10071
)
var (
// JetStream API errors
// ErrJetStreamNotEnabled is an error returned when JetStream is not
// enabled.
//
// Note: This error will not be returned in clustered mode, even if each
// server in the cluster does not have JetStream enabled. In clustered mode,
// requests will time out instead.
ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}}
// ErrJetStreamNotEnabledForAccount is an error returned when JetStream is
// not enabled for an account.
ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}}
// ErrStreamNotFound is an error returned when stream with given name does
// not exist.
ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}}
// ErrStreamNameAlreadyInUse is returned when a stream with given name
// already exists and has a different configuration.
ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}}
// ErrStreamSubjectTransformNotSupported is returned when the connected
// nats-server version does not support setting the stream subject
// transform. If this error is returned when executing CreateStream(), the
// stream with invalid configuration was already created in the server.
ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
// ErrStreamSourceSubjectTransformNotSupported is returned when the
// connected nats-server version does not support setting the stream source
// subject transform. If this error is returned when executing
// CreateStream(), the stream with invalid configuration was already created
// in the server.
ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
// ErrStreamSourceNotSupported is returned when the connected nats-server
// version does not support setting the stream sources. If this error is
// returned when executing CreateStream(), the stream with invalid
// configuration was already created in the server.
ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"}
// ErrStreamSourceMultipleFilterSubjectsNotSupported is returned when the
// connected nats-server version does not support setting the stream
// sources. If this error is returned when executing CreateStream(), the
// stream with invalid configuration was already created in the server.
ErrStreamSourceMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject filters not supported by nats-server"}
// ErrConsumerNotFound is an error returned when consumer with given name
// does not exist.
ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}}
// ErrConsumerExists is returned when attempting to create a consumer with
// CreateConsumer but a consumer with given name already exists.
ErrConsumerExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerExists, Description: "consumer already exists", Code: 400}}
// ErrConsumerNameExists is returned when attempting to update a consumer
// with UpdateConsumer but a consumer with given name does not exist.
ErrConsumerDoesNotExist JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerDoesNotExist, Description: "consumer does not exist", Code: 400}}
// ErrMsgNotFound is returned when message with provided sequence number
// does not exist.
ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}}
// ErrBadRequest is returned when invalid request is sent to JetStream API.
ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}}
// ErrConsumerCreate is returned when nats-server reports error when
// creating consumer (e.g. illegal update).
ErrConsumerCreate JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerCreate, Description: "could not create consumer", Code: 500}}
// ErrDuplicateFilterSubjects is returned when both FilterSubject and
// FilterSubjects are specified when creating consumer.
ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}}
// ErrDuplicateFilterSubjects is returned when filter subjects overlap when
// creating consumer.
ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}}
// ErrEmptyFilter is returned when a filter in FilterSubjects is empty.
ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}}
// Client errors
// ErrConsumerMultipleFilterSubjectsNotSupported is returned when the
// connected nats-server version does not support setting multiple filter
// subjects with filter_subjects field. If this error is returned when
// executing AddConsumer(), the consumer with invalid configuration was
// already created in the server.
ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"}
// ErrConsumerNotFound is an error returned when consumer with given name
// does not exist.
ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"}
// ErrInvalidJSAck is returned when JetStream ack from message publish is
// invalid.
ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"}
// ErrStreamNameRequired is returned when the provided stream name is empty.
ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"}
// ErrMsgAlreadyAckd is returned when attempting to acknowledge message more
// than once.
ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"}
// ErrNoStreamResponse is returned when there is no response from stream
// (e.g. no responders error).
ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"}
// ErrNotJSMessage is returned when attempting to get metadata from non
// JetStream message.
ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"}
// ErrInvalidStreamName is returned when the provided stream name is invalid
// (contains '.').
ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"}
// ErrInvalidSubject is returned when the provided subject name is invalid.
ErrInvalidSubject JetStreamError = &jsError{message: "invalid subject name"}
// ErrInvalidConsumerName is returned when the provided consumer name is
// invalid (contains '.').
ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"}
// ErrNoMessages is returned when no messages are currently available for a
// consumer.
ErrNoMessages JetStreamError = &jsError{message: "no messages"}
// ErrMaxBytesExceeded is returned when a message would exceed MaxBytes set
// on a pull request.
ErrMaxBytesExceeded JetStreamError = &jsError{message: "message size exceeds max bytes"}
// ErrConsumerDeleted is returned when attempting to send pull request to a
// consumer which does not exist.
ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"}
// ErrConsumerLeadershipChanged is returned when pending requests are no
// longer valid after leadership has changed.
ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "leadership change"}
// ErrHandlerRequired is returned when no handler func is provided in
// Stream().
ErrHandlerRequired JetStreamError = &jsError{message: "handler cannot be empty"}
// ErrEndOfData is returned when iterating over paged API from JetStream
// reaches end of data.
ErrEndOfData JetStreamError = &jsError{message: "end of data reached"}
// ErrNoHeartbeat is received when no message is received in IdleHeartbeat
// time (if set).
ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"}
// ErrConsumerHasActiveSubscription is returned when a consumer is already
// subscribed to a stream.
ErrConsumerHasActiveSubscription JetStreamError = &jsError{message: "consumer has active subscription"}
// ErrMsgNotBound is returned when given message is not bound to any
// subscription.
ErrMsgNotBound JetStreamError = &jsError{message: "message is not bound to subscription/connection"}
// ErrMsgNoReply is returned when attempting to reply to a message without a
// reply subject.
ErrMsgNoReply JetStreamError = &jsError{message: "message does not have a reply"}
// ErrMsgDeleteUnsuccessful is returned when an attempt to delete a message
// is unsuccessful.
ErrMsgDeleteUnsuccessful JetStreamError = &jsError{message: "message deletion unsuccessful"}
// ErrAsyncPublishReplySubjectSet is returned when reply subject is set on
// async message publish.
ErrAsyncPublishReplySubjectSet JetStreamError = &jsError{message: "reply subject should be empty"}
// ErrTooManyStalledMsgs is returned when too many outstanding async
// messages are waiting for ack.
ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"}
// ErrInvalidOption is returned when there is a collision between options.
ErrInvalidOption JetStreamError = &jsError{message: "invalid jetstream option"}
// ErrMsgIteratorClosed is returned when attempting to get message from a
// closed iterator.
ErrMsgIteratorClosed JetStreamError = &jsError{message: "messages iterator closed"}
// ErrOrderedConsumerReset is returned when resetting ordered consumer fails
// due to too many attempts.
ErrOrderedConsumerReset JetStreamError = &jsError{message: "recreating ordered consumer"}
// ErrOrderConsumerUsedAsFetch is returned when ordered consumer was already
// used to process messages using Fetch (or FetchBytes).
ErrOrderConsumerUsedAsFetch JetStreamError = &jsError{message: "ordered consumer initialized as fetch"}
// ErrOrderConsumerUsedAsConsume is returned when ordered consumer was
// already used to process messages using Consume or Messages.
ErrOrderConsumerUsedAsConsume JetStreamError = &jsError{message: "ordered consumer initialized as consume"}
// ErrOrderedConsumerConcurrentRequests is returned when attempting to run
// concurrent operations on ordered consumers.
ErrOrderedConsumerConcurrentRequests JetStreamError = &jsError{message: "cannot run concurrent processing using ordered consumer"}
// ErrOrderedConsumerNotCreated is returned when trying to get consumer info
// of an ordered consumer which was not yet created.
ErrOrderedConsumerNotCreated JetStreamError = &jsError{message: "consumer instance not yet created"}
// KeyValue Errors
// ErrKeyExists is returned when attempting to create a key that already
// exists.
ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"}
// ErrKeyValueConfigRequired is returned when attempting to create a bucket
// without a config.
ErrKeyValueConfigRequired JetStreamError = &jsError{message: "config required"}
// ErrInvalidBucketName is returned when attempting to create a bucket with
// an invalid name.
ErrInvalidBucketName JetStreamError = &jsError{message: "invalid bucket name"}
// ErrInvalidKey is returned when attempting to create a key with an invalid
// name.
ErrInvalidKey JetStreamError = &jsError{message: "invalid key"}
// ErrBucketExists is returned when attempting to create a bucket that
// already exists and has a different configuration.
ErrBucketExists JetStreamError = &jsError{message: "bucket name already in use"}
// ErrBucketNotFound is returned when attempting to access a bucket that
// does not exist.
ErrBucketNotFound JetStreamError = &jsError{message: "bucket not found"}
// ErrBadBucket is returned when attempting to access a bucket that is not a
// key-value store.
ErrBadBucket JetStreamError = &jsError{message: "bucket not valid key-value store"}
// ErrKeyNotFound is returned when attempting to access a key that does not
// exist.
ErrKeyNotFound JetStreamError = &jsError{message: "key not found"}
// ErrKeyDeleted is returned when attempting to access a key that was
// deleted.
ErrKeyDeleted JetStreamError = &jsError{message: "key was deleted"}
// ErrHistoryToLarge is returned when provided history limit is larger than
// 64.
ErrHistoryTooLarge JetStreamError = &jsError{message: "history limited to a max of 64"}
// ErrNoKeysFound is returned when no keys are found.
ErrNoKeysFound JetStreamError = &jsError{message: "no keys found"}
// ErrObjectConfigRequired is returned when attempting to create an object
// without a config.
ErrObjectConfigRequired JetStreamError = &jsError{message: "object-store config required"}
// ErrBadObjectMeta is returned when the meta information of an object is
// invalid.
ErrBadObjectMeta JetStreamError = &jsError{message: "object-store meta information invalid"}
// ErrObjectNotFound is returned when an object is not found.
ErrObjectNotFound JetStreamError = &jsError{message: "object not found"}
// ErrInvalidStoreName is returned when the name of an object-store is
// invalid.
ErrInvalidStoreName JetStreamError = &jsError{message: "invalid object-store name"}
// ErrDigestMismatch is returned when the digests of an object do not match.
ErrDigestMismatch JetStreamError = &jsError{message: "received a corrupt object, digests do not match"}
// ErrInvalidDigestFormat is returned when the digest hash of an object has
// an invalid format.
ErrInvalidDigestFormat JetStreamError = &jsError{message: "object digest hash has invalid format"}
// ErrNoObjectsFound is returned when no objects are found.
ErrNoObjectsFound JetStreamError = &jsError{message: "no objects found"}
// ErrObjectAlreadyExists is returned when an object with the same name
// already exists.
ErrObjectAlreadyExists JetStreamError = &jsError{message: "an object already exists with that name"}
// ErrNameRequired is returned when a name is required.
ErrNameRequired JetStreamError = &jsError{message: "name is required"}
// ErrLinkNotAllowed is returned when a link cannot be set when putting the
// object in a bucket.
ErrLinkNotAllowed JetStreamError = &jsError{message: "link cannot be set when putting the object in bucket"}
// ErrObjectRequired is returned when an object is required.
ErrObjectRequired = &jsError{message: "object required"}
// ErrNoLinkToDeleted is returned when it is not allowed to link to a
// deleted object.
ErrNoLinkToDeleted JetStreamError = &jsError{message: "not allowed to link to a deleted object"}
// ErrNoLinkToLink is returned when it is not allowed to link to another
// link.
ErrNoLinkToLink JetStreamError = &jsError{message: "not allowed to link to another link"}
// ErrCantGetBucket is returned when an invalid Get is attempted on an
// object that is a link to a bucket.
ErrCantGetBucket JetStreamError = &jsError{message: "invalid Get, object is a link to a bucket"}
// ErrBucketRequired is returned when a bucket is required.
ErrBucketRequired JetStreamError = &jsError{message: "bucket required"}
// ErrBucketMalformed is returned when a bucket is malformed.
ErrBucketMalformed JetStreamError = &jsError{message: "bucket malformed"}
// ErrUpdateMetaDeleted is returned when the meta information of a deleted
// object cannot be updated.
ErrUpdateMetaDeleted JetStreamError = &jsError{message: "cannot update meta for a deleted object"}
)
// Error prints the JetStream API error code and description.
func (e *APIError) Error() string {
return fmt.Sprintf("nats: API error: code=%d err_code=%d description=%s", e.Code, e.ErrorCode, e.Description)
}
// APIError implements the JetStreamError interface.
func (e *APIError) APIError() *APIError {
return e
}
// Is matches against an APIError.
func (e *APIError) Is(err error) bool {
if e == nil {
return false
}
// Extract internal APIError to match against.
var aerr *APIError
ok := errors.As(err, &aerr)
if !ok {
return ok
}
return e.ErrorCode == aerr.ErrorCode
}
func (err *jsError) APIError() *APIError {
return err.apiErr
}
func (err *jsError) Error() string {
if err.apiErr != nil && err.apiErr.Description != "" {
return err.apiErr.Error()
}
return fmt.Sprintf("nats: %s", err.message)
}
func (err *jsError) Unwrap() error {
// Allow matching to embedded APIError in case there is one.
if err.apiErr == nil {
return nil
}
return err.apiErr
}

File diff suppressed because it is too large Load Diff

View File

@ -1,408 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"fmt"
"time"
)
type pullOptFunc func(*consumeOpts) error
func (fn pullOptFunc) configureConsume(opts *consumeOpts) error {
return fn(opts)
}
func (fn pullOptFunc) configureMessages(opts *consumeOpts) error {
return fn(opts)
}
// WithClientTrace enables request/response API calls tracing.
func WithClientTrace(ct *ClientTrace) JetStreamOpt {
return func(opts *jsOpts) error {
opts.clientTrace = ct
return nil
}
}
// WithPublishAsyncErrHandler sets error handler for async message publish.
func WithPublishAsyncErrHandler(cb MsgErrHandler) JetStreamOpt {
return func(opts *jsOpts) error {
opts.publisherOpts.aecb = cb
return nil
}
}
// WithPublishAsyncMaxPending sets the maximum outstanding async publishes that
// can be inflight at one time.
func WithPublishAsyncMaxPending(max int) JetStreamOpt {
return func(opts *jsOpts) error {
if max < 1 {
return fmt.Errorf("%w: max ack pending should be >= 1", ErrInvalidOption)
}
opts.publisherOpts.maxpa = max
return nil
}
}
// WithPurgeSubject sets a specific subject for which messages on a stream will
// be purged
func WithPurgeSubject(subject string) StreamPurgeOpt {
return func(req *StreamPurgeRequest) error {
req.Subject = subject
return nil
}
}
// WithPurgeSequence is used to set a specific sequence number up to which (but
// not including) messages will be purged from a stream Can be combined with
// [WithPurgeSubject] option, but not with [WithPurgeKeep]
func WithPurgeSequence(sequence uint64) StreamPurgeOpt {
return func(req *StreamPurgeRequest) error {
if req.Keep != 0 {
return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption)
}
req.Sequence = sequence
return nil
}
}
// WithPurgeKeep sets the number of messages to be kept in the stream after
// purge. Can be combined with [WithPurgeSubject] option, but not with
// [WithPurgeSequence]
func WithPurgeKeep(keep uint64) StreamPurgeOpt {
return func(req *StreamPurgeRequest) error {
if req.Sequence != 0 {
return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption)
}
req.Keep = keep
return nil
}
}
// WithGetMsgSubject sets the stream subject from which the message should be
// retrieved. Server will return a first message with a seq >= to the input seq
// that has the specified subject.
func WithGetMsgSubject(subject string) GetMsgOpt {
return func(req *apiMsgGetRequest) error {
req.NextFor = subject
return nil
}
}
// PullMaxMessages limits the number of messages to be buffered in the client.
// If not provided, a default of 500 messages will be used.
// This option is exclusive with PullMaxBytes.
type PullMaxMessages int
func (max PullMaxMessages) configureConsume(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
}
opts.MaxMessages = int(max)
return nil
}
func (max PullMaxMessages) configureMessages(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
}
opts.MaxMessages = int(max)
return nil
}
// PullExpiry sets timeout on a single pull request, waiting until at least one
// message is available.
// If not provided, a default of 30 seconds will be used.
type PullExpiry time.Duration
func (exp PullExpiry) configureConsume(opts *consumeOpts) error {
expiry := time.Duration(exp)
if expiry < time.Second {
return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption)
}
opts.Expires = expiry
return nil
}
func (exp PullExpiry) configureMessages(opts *consumeOpts) error {
expiry := time.Duration(exp)
if expiry < time.Second {
return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption)
}
opts.Expires = expiry
return nil
}
// PullMaxBytes limits the number of bytes to be buffered in the client.
// If not provided, the limit is not set (max messages will be used instead).
// This option is exclusive with PullMaxMessages.
type PullMaxBytes int
func (max PullMaxBytes) configureConsume(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption)
}
opts.MaxBytes = int(max)
return nil
}
func (max PullMaxBytes) configureMessages(opts *consumeOpts) error {
if max <= 0 {
return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption)
}
opts.MaxBytes = int(max)
return nil
}
// PullThresholdMessages sets the message count on which Consume will trigger
// new pull request to the server. Defaults to 50% of MaxMessages.
type PullThresholdMessages int
func (t PullThresholdMessages) configureConsume(opts *consumeOpts) error {
opts.ThresholdMessages = int(t)
return nil
}
func (t PullThresholdMessages) configureMessages(opts *consumeOpts) error {
opts.ThresholdMessages = int(t)
return nil
}
// PullThresholdBytes sets the byte count on which Consume will trigger
// new pull request to the server. Defaults to 50% of MaxBytes (if set).
type PullThresholdBytes int
func (t PullThresholdBytes) configureConsume(opts *consumeOpts) error {
opts.ThresholdBytes = int(t)
return nil
}
func (t PullThresholdBytes) configureMessages(opts *consumeOpts) error {
opts.ThresholdBytes = int(t)
return nil
}
// PullHeartbeat sets the idle heartbeat duration for a pull subscription
// If a client does not receive a heartbeat message from a stream for more
// than the idle heartbeat setting, the subscription will be removed
// and error will be passed to the message handler.
// If not provided, a default PullExpiry / 2 will be used (capped at 30 seconds)
type PullHeartbeat time.Duration
func (hb PullHeartbeat) configureConsume(opts *consumeOpts) error {
hbTime := time.Duration(hb)
if hbTime < 500*time.Millisecond || hbTime > 30*time.Second {
return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption)
}
opts.Heartbeat = hbTime
return nil
}
func (hb PullHeartbeat) configureMessages(opts *consumeOpts) error {
hbTime := time.Duration(hb)
if hbTime < 500*time.Millisecond || hbTime > 30*time.Second {
return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption)
}
opts.Heartbeat = hbTime
return nil
}
// StopAfter sets the number of messages after which the consumer is
// automatically stopped and no more messages are pulled from the server.
type StopAfter int
func (nMsgs StopAfter) configureConsume(opts *consumeOpts) error {
if nMsgs <= 0 {
return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption)
}
opts.StopAfter = int(nMsgs)
return nil
}
func (nMsgs StopAfter) configureMessages(opts *consumeOpts) error {
if nMsgs <= 0 {
return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption)
}
opts.StopAfter = int(nMsgs)
return nil
}
// ConsumeErrHandler sets custom error handler invoked when an error was
// encountered while consuming messages It will be invoked for both terminal
// (Consumer Deleted, invalid request body) and non-terminal (e.g. missing
// heartbeats) errors.
func ConsumeErrHandler(cb ConsumeErrHandlerFunc) PullConsumeOpt {
return pullOptFunc(func(cfg *consumeOpts) error {
cfg.ErrHandler = cb
return nil
})
}
// WithMessagesErrOnMissingHeartbeat sets whether a missing heartbeat error
// should be reported when calling [MessagesContext.Next] (Default: true).
func WithMessagesErrOnMissingHeartbeat(hbErr bool) PullMessagesOpt {
return pullOptFunc(func(cfg *consumeOpts) error {
cfg.ReportMissingHeartbeats = hbErr
return nil
})
}
// FetchMaxWait sets custom timeout for fetching predefined batch of messages.
//
// If not provided, a default of 30 seconds will be used.
func FetchMaxWait(timeout time.Duration) FetchOpt {
return func(req *pullRequest) error {
if timeout <= 0 {
return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption)
}
req.Expires = timeout
return nil
}
}
// FetchHeartbeat sets custom heartbeat for individual fetch request. If a
// client does not receive a heartbeat message from a stream for more than 2
// times the idle heartbeat setting, Fetch will return [ErrNoHeartbeat].
//
// Heartbeat value has to be lower than FetchMaxWait / 2.
//
// If not provided, heartbeat will is set to 5s for requests with FetchMaxWait > 10s
// and disabled otherwise.
func FetchHeartbeat(hb time.Duration) FetchOpt {
return func(req *pullRequest) error {
if hb <= 0 {
return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption)
}
req.Heartbeat = hb
return nil
}
}
// WithDeletedDetails can be used to display the information about messages
// deleted from a stream on a stream info request
func WithDeletedDetails(deletedDetails bool) StreamInfoOpt {
return func(req *streamInfoRequest) error {
req.DeletedDetails = deletedDetails
return nil
}
}
// WithSubjectFilter can be used to display the information about messages
// stored on given subjects.
// NOTE: if the subject filter matches over 100k
// subjects, this will result in multiple requests to the server to retrieve all
// the information, and all of the returned subjects will be kept in memory.
func WithSubjectFilter(subject string) StreamInfoOpt {
return func(req *streamInfoRequest) error {
req.SubjectFilter = subject
return nil
}
}
// WithStreamListSubject can be used to filter results of ListStreams and
// StreamNames requests to only streams that have given subject in their
// configuration.
func WithStreamListSubject(subject string) StreamListOpt {
return func(req *streamsRequest) error {
req.Subject = subject
return nil
}
}
// WithMsgID sets the message ID used for deduplication.
func WithMsgID(id string) PublishOpt {
return func(opts *pubOpts) error {
opts.id = id
return nil
}
}
// WithExpectStream sets the expected stream the message should be published to.
// If the message is published to a different stream server will reject the
// message and publish will fail.
func WithExpectStream(stream string) PublishOpt {
return func(opts *pubOpts) error {
opts.stream = stream
return nil
}
}
// WithExpectLastSequence sets the expected sequence number the last message
// on a stream should have. If the last message has a different sequence number
// server will reject the message and publish will fail.
func WithExpectLastSequence(seq uint64) PublishOpt {
return func(opts *pubOpts) error {
opts.lastSeq = &seq
return nil
}
}
// WithExpectLastSequencePerSubject sets the expected sequence number the last
// message on a subject the message is published to. If the last message on a
// subject has a different sequence number server will reject the message and
// publish will fail.
func WithExpectLastSequencePerSubject(seq uint64) PublishOpt {
return func(opts *pubOpts) error {
opts.lastSubjectSeq = &seq
return nil
}
}
// WithExpectLastMsgID sets the expected message ID the last message on a stream
// should have. If the last message has a different message ID server will
// reject the message and publish will fail.
func WithExpectLastMsgID(id string) PublishOpt {
return func(opts *pubOpts) error {
opts.lastMsgID = id
return nil
}
}
// WithRetryWait sets the retry wait time when ErrNoResponders is encountered.
// Defaults to 250ms.
func WithRetryWait(dur time.Duration) PublishOpt {
return func(opts *pubOpts) error {
if dur <= 0 {
return fmt.Errorf("%w: retry wait should be more than 0", ErrInvalidOption)
}
opts.retryWait = dur
return nil
}
}
// WithRetryAttempts sets the retry number of attempts when ErrNoResponders is
// encountered. Defaults to 2
func WithRetryAttempts(num int) PublishOpt {
return func(opts *pubOpts) error {
if num < 0 {
return fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidOption)
}
opts.retryAttempts = num
return nil
}
}
// WithStallWait sets the max wait when the producer becomes stall producing
// messages. If a publish call is blocked for this long, ErrTooManyStalledMsgs
// is returned.
func WithStallWait(ttl time.Duration) PublishOpt {
return func(opts *pubOpts) error {
if ttl <= 0 {
return fmt.Errorf("%w: stall wait should be more than 0", ErrInvalidOption)
}
opts.stallWait = ttl
return nil
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,103 +0,0 @@
// Copyright 2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"fmt"
"time"
)
type watchOptFn func(opts *watchOpts) error
func (opt watchOptFn) configureWatcher(opts *watchOpts) error {
return opt(opts)
}
// IncludeHistory instructs the key watcher to include historical values as
// well (up to KeyValueMaxHistory).
func IncludeHistory() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
if opts.updatesOnly {
return fmt.Errorf("%w: include history can not be used with updates only", ErrInvalidOption)
}
opts.includeHistory = true
return nil
})
}
// UpdatesOnly instructs the key watcher to only include updates on values
// (without latest values when started).
func UpdatesOnly() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
if opts.includeHistory {
return fmt.Errorf("%w: updates only can not be used with include history", ErrInvalidOption)
}
opts.updatesOnly = true
return nil
})
}
// IgnoreDeletes will have the key watcher not pass any deleted keys.
func IgnoreDeletes() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
opts.ignoreDeletes = true
return nil
})
}
// MetaOnly instructs the key watcher to retrieve only the entry meta data, not
// the entry value.
func MetaOnly() WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
opts.metaOnly = true
return nil
})
}
// ResumeFromRevision instructs the key watcher to resume from a specific
// revision number.
func ResumeFromRevision(revision uint64) WatchOpt {
return watchOptFn(func(opts *watchOpts) error {
opts.resumeFromRevision = revision
return nil
})
}
// DeleteMarkersOlderThan indicates that delete or purge markers older than that
// will be deleted as part of [KeyValue.PurgeDeletes] operation, otherwise, only the data
// will be removed but markers that are recent will be kept.
// Note that if no option is specified, the default is 30 minutes. You can set
// this option to a negative value to instruct to always remove the markers,
// regardless of their age.
type DeleteMarkersOlderThan time.Duration
func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error {
opts.dmthr = time.Duration(ttl)
return nil
}
type deleteOptFn func(opts *deleteOpts) error
func (opt deleteOptFn) configureDelete(opts *deleteOpts) error {
return opt(opts)
}
// LastRevision deletes if the latest revision matches the provided one. If the
// provided revision is not the latest, the delete will return an error.
func LastRevision(revision uint64) KVDeleteOpt {
return deleteOptFn(func(opts *deleteOpts) error {
opts.revision = revision
return nil
})
}

View File

@ -1,457 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
"sync"
"time"
"github.com/nats-io/nats.go"
"github.com/nats-io/nats.go/internal/parser"
)
type (
// Msg contains methods to operate on a JetStream message. Metadata, Data,
// Headers, Subject and Reply can be used to retrieve the specific parts of
// the underlying message. Ack, DoubleAck, Nak, NakWithDelay, InProgress and
// Term are various flavors of ack requests.
Msg interface {
// Metadata returns [MsgMetadata] for a JetStream message.
Metadata() (*MsgMetadata, error)
// Data returns the message body.
Data() []byte
// Headers returns a map of headers for a message.
Headers() nats.Header
// Subject returns a subject on which a message was published/received.
Subject() string
// Reply returns a reply subject for a message.
Reply() string
// Ack acknowledges a message. This tells the server that the message was
// successfully processed and it can move on to the next message.
Ack() error
// DoubleAck acknowledges a message and waits for ack reply from the server.
// While it impacts performance, it is useful for scenarios where
// message loss is not acceptable.
DoubleAck(context.Context) error
// Nak negatively acknowledges a message. This tells the server to
// redeliver the message.
//
// Nak does not adhere to AckWait or Backoff configured on the consumer
// and triggers instant redelivery. For a delayed redelivery, use
// NakWithDelay.
Nak() error
// NakWithDelay negatively acknowledges a message. This tells the server
// to redeliver the message after the given delay.
NakWithDelay(delay time.Duration) error
// InProgress tells the server that this message is being worked on. It
// resets the redelivery timer on the server.
InProgress() error
// Term tells the server to not redeliver this message, regardless of
// the value of MaxDeliver.
Term() error
// TermWithReason tells the server to not redeliver this message, regardless of
// the value of MaxDeliver. The provided reason will be included in JetStream
// advisory event sent by the server.
//
// Note: This will only work with JetStream servers >= 2.10.4.
// For older servers, TermWithReason will be ignored by the server and the message
// will not be terminated.
TermWithReason(reason string) error
}
// MsgMetadata is the JetStream metadata associated with received messages.
MsgMetadata struct {
// Sequence is the sequence information for the message.
Sequence SequencePair
// NumDelivered is the number of times this message was delivered to the
// consumer.
NumDelivered uint64
// NumPending is the number of messages that match the consumer's
// filter, but have not been delivered yet.
NumPending uint64
// Timestamp is the time the message was originally stored on a stream.
Timestamp time.Time
// Stream is the stream name this message is stored on.
Stream string
// Consumer is the consumer name this message was delivered to.
Consumer string
// Domain is the domain this message was received on.
Domain string
}
// SequencePair includes the consumer and stream sequence numbers for a
// message.
SequencePair struct {
// Consumer is the consumer sequence number for message deliveries. This
// is the total number of messages the consumer has seen (including
// redeliveries).
Consumer uint64 `json:"consumer_seq"`
// Stream is the stream sequence number for a message.
Stream uint64 `json:"stream_seq"`
}
jetStreamMsg struct {
msg *nats.Msg
ackd bool
js *jetStream
sync.Mutex
}
ackOpts struct {
nakDelay time.Duration
termReason string
}
ackType []byte
)
const (
controlMsg = "100"
badRequest = "400"
noMessages = "404"
reqTimeout = "408"
maxBytesExceeded = "409"
noResponders = "503"
)
// Headers used when publishing messages.
const (
// MsgIdHeader is used to specify a user-defined message ID. It can be used
// e.g. for deduplication in conjunction with the Duplicates duration on
// ConsumerConfig or to provide optimistic concurrency safety together with
// [ExpectedLastMsgIDHeader].
//
// This can be set when publishing messages using [WithMsgID] option.
MsgIDHeader = "Nats-Msg-Id"
// ExpectedStreamHeader contains stream name and is used to assure that the
// published message is received by expected stream. Server will reject the
// message if it is not the case.
//
// This can be set when publishing messages using [WithExpectStream] option.
ExpectedStreamHeader = "Nats-Expected-Stream"
// ExpectedLastSeqHeader contains the expected last sequence number of the
// stream and can be used to apply optimistic concurrency control at stream
// level. Server will reject the message if it is not the case.
//
// This can be set when publishing messages using [WithExpectLastSequence]
// option. option.
ExpectedLastSeqHeader = "Nats-Expected-Last-Sequence"
// ExpectedLastSubjSeqHeader contains the expected last sequence number on
// the subject and can be used to apply optimistic concurrency control at
// subject level. Server will reject the message if it is not the case.
//
// This can be set when publishing messages using
// [WithExpectLastSequencePerSubject] option.
ExpectedLastSubjSeqHeader = "Nats-Expected-Last-Subject-Sequence"
// ExpectedLastMsgIDHeader contains the expected last message ID on the
// subject and can be used to apply optimistic concurrency control at
// stream level. Server will reject the message if it is not the case.
//
// This can be set when publishing messages using [WithExpectLastMsgID]
// option.
ExpectedLastMsgIDHeader = "Nats-Expected-Last-Msg-Id"
// MsgRollup is used to apply a purge of all prior messages in the stream
// ("all") or at the subject ("sub") before this message.
MsgRollup = "Nats-Rollup"
)
// Headers for republished messages and direct gets. Those headers are set by
// the server and should not be set by the client.
const (
// StreamHeader contains the stream name the message was republished from or
// the stream name the message was retrieved from using direct get.
StreamHeader = "Nats-Stream"
// SequenceHeader contains the original sequence number of the message.
SequenceHeader = "Nats-Sequence"
// TimeStampHeader contains the original timestamp of the message.
TimeStampHeaer = "Nats-Time-Stamp"
// SubjectHeader contains the original subject the message was published to.
SubjectHeader = "Nats-Subject"
// LastSequenceHeader contains the last sequence of the message having the
// same subject, otherwise zero if this is the first message for the
// subject.
LastSequenceHeader = "Nats-Last-Sequence"
)
// Rollups, can be subject only or all messages.
const (
// MsgRollupSubject is used to purge all messages before this message on the
// message subject.
MsgRollupSubject = "sub"
// MsgRollupAll is used to purge all messages before this message on the
// stream.
MsgRollupAll = "all"
)
var (
ackAck ackType = []byte("+ACK")
ackNak ackType = []byte("-NAK")
ackProgress ackType = []byte("+WPI")
ackTerm ackType = []byte("+TERM")
)
// Metadata returns [MsgMetadata] for a JetStream message.
func (m *jetStreamMsg) Metadata() (*MsgMetadata, error) {
if err := m.checkReply(); err != nil {
return nil, err
}
tokens, err := parser.GetMetadataFields(m.msg.Reply)
if err != nil {
return nil, fmt.Errorf("%w: %s", ErrNotJSMessage, err)
}
meta := &MsgMetadata{
Domain: tokens[parser.AckDomainTokenPos],
NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]),
NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]),
Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))),
Stream: tokens[parser.AckStreamTokenPos],
Consumer: tokens[parser.AckConsumerTokenPos],
}
meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos])
meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos])
return meta, nil
}
// Data returns the message body.
func (m *jetStreamMsg) Data() []byte {
return m.msg.Data
}
// Headers returns a map of headers for a message.
func (m *jetStreamMsg) Headers() nats.Header {
return m.msg.Header
}
// Subject returns a subject on which a message is published.
func (m *jetStreamMsg) Subject() string {
return m.msg.Subject
}
// Reply returns a reply subject for a JetStream message.
func (m *jetStreamMsg) Reply() string {
return m.msg.Reply
}
// Ack acknowledges a message. This tells the server that the message was
// successfully processed and it can move on to the next message.
func (m *jetStreamMsg) Ack() error {
return m.ackReply(context.Background(), ackAck, false, ackOpts{})
}
// DoubleAck acknowledges a message and waits for ack reply from the server.
// While it impacts performance, it is useful for scenarios where
// message loss is not acceptable.
func (m *jetStreamMsg) DoubleAck(ctx context.Context) error {
return m.ackReply(ctx, ackAck, true, ackOpts{})
}
// Nak negatively acknowledges a message. This tells the server to
// redeliver the message.
func (m *jetStreamMsg) Nak() error {
return m.ackReply(context.Background(), ackNak, false, ackOpts{})
}
// NakWithDelay negatively acknowledges a message. This tells the server
// to redeliver the message after the given delay.
func (m *jetStreamMsg) NakWithDelay(delay time.Duration) error {
return m.ackReply(context.Background(), ackNak, false, ackOpts{nakDelay: delay})
}
// InProgress tells the server that this message is being worked on. It
// resets the redelivery timer on the server.
func (m *jetStreamMsg) InProgress() error {
return m.ackReply(context.Background(), ackProgress, false, ackOpts{})
}
// Term tells the server to not redeliver this message, regardless of
// the value of MaxDeliver.
func (m *jetStreamMsg) Term() error {
return m.ackReply(context.Background(), ackTerm, false, ackOpts{})
}
// TermWithReason tells the server to not redeliver this message, regardless of
// the value of MaxDeliver. The provided reason will be included in JetStream
// advisory event sent by the server.
//
// Note: This will only work with JetStream servers >= 2.10.4.
// For older servers, TermWithReason will be ignored by the server and the message
// will not be terminated.
func (m *jetStreamMsg) TermWithReason(reason string) error {
return m.ackReply(context.Background(), ackTerm, false, ackOpts{termReason: reason})
}
func (m *jetStreamMsg) ackReply(ctx context.Context, ackType ackType, sync bool, opts ackOpts) error {
err := m.checkReply()
if err != nil {
return err
}
m.Lock()
if m.ackd {
m.Unlock()
return ErrMsgAlreadyAckd
}
m.Unlock()
if sync {
var cancel context.CancelFunc
ctx, cancel = wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
}
var body []byte
if opts.nakDelay > 0 {
body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, opts.nakDelay.Nanoseconds()))
} else if opts.termReason != "" {
body = []byte(fmt.Sprintf("%s %s", ackType, opts.termReason))
} else {
body = ackType
}
if sync {
_, err = m.js.conn.RequestWithContext(ctx, m.msg.Reply, body)
} else {
err = m.js.conn.Publish(m.msg.Reply, body)
}
if err != nil {
return err
}
// Mark that the message has been acked unless it is ackProgress
// which can be sent many times.
if !bytes.Equal(ackType, ackProgress) {
m.Lock()
m.ackd = true
m.Unlock()
}
return nil
}
func (m *jetStreamMsg) checkReply() error {
if m == nil || m.msg.Sub == nil {
return ErrMsgNotBound
}
if m.msg.Reply == "" {
return ErrMsgNoReply
}
return nil
}
// Returns if the given message is a user message or not, and if
// checkSts() is true, returns appropriate error based on the
// content of the status (404, etc..)
func checkMsg(msg *nats.Msg) (bool, error) {
// If payload or no header, consider this a user message
if len(msg.Data) > 0 || len(msg.Header) == 0 {
return true, nil
}
// Look for status header
val := msg.Header.Get("Status")
descr := msg.Header.Get("Description")
// If not present, then this is considered a user message
if val == "" {
return true, nil
}
switch val {
case badRequest:
return false, ErrBadRequest
case noResponders:
return false, nats.ErrNoResponders
case noMessages:
// 404 indicates that there are no messages.
return false, ErrNoMessages
case reqTimeout:
return false, nats.ErrTimeout
case controlMsg:
return false, nil
case maxBytesExceeded:
if strings.Contains(strings.ToLower(descr), "message size exceeds maxbytes") {
return false, ErrMaxBytesExceeded
}
if strings.Contains(strings.ToLower(descr), "consumer deleted") {
return false, ErrConsumerDeleted
}
if strings.Contains(strings.ToLower(descr), "leadership change") {
return false, ErrConsumerLeadershipChanged
}
}
return false, fmt.Errorf("nats: %s", msg.Header.Get("Description"))
}
func parsePending(msg *nats.Msg) (int, int, error) {
msgsLeftStr := msg.Header.Get("Nats-Pending-Messages")
var msgsLeft int
var err error
if msgsLeftStr != "" {
msgsLeft, err = strconv.Atoi(msgsLeftStr)
if err != nil {
return 0, 0, fmt.Errorf("nats: invalid format of Nats-Pending-Messages")
}
}
bytesLeftStr := msg.Header.Get("Nats-Pending-Bytes")
var bytesLeft int
if bytesLeftStr != "" {
bytesLeft, err = strconv.Atoi(bytesLeftStr)
if err != nil {
return 0, 0, fmt.Errorf("nats: invalid format of Nats-Pending-Bytes")
}
}
return msgsLeft, bytesLeft, nil
}
// toJSMsg converts core [nats.Msg] to [jetStreamMsg], exposing JetStream-specific operations
func (js *jetStream) toJSMsg(msg *nats.Msg) *jetStreamMsg {
return &jetStreamMsg{
msg: msg,
js: js,
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,41 +0,0 @@
// Copyright 2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
// GetObjectShowDeleted makes [ObjectStore.Get] return object even if it was
// marked as deleted.
func GetObjectShowDeleted() GetObjectOpt {
return func(opts *getObjectOpts) error {
opts.showDeleted = true
return nil
}
}
// GetObjectInfoShowDeleted makes [ObjectStore.GetInfo] return object info event
// if it was marked as deleted.
func GetObjectInfoShowDeleted() GetObjectInfoOpt {
return func(opts *getObjectInfoOpts) error {
opts.showDeleted = true
return nil
}
}
// ListObjectsShowDeleted makes [ObjectStore.ListObjects] also return deleted
// objects.
func ListObjectsShowDeleted() ListObjectsOpt {
return func(opts *listObjectOpts) error {
opts.showDeleted = true
return nil
}
}

View File

@ -1,624 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"errors"
"fmt"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nats.go"
)
type (
orderedConsumer struct {
jetStream *jetStream
cfg *OrderedConsumerConfig
stream string
currentConsumer *pullConsumer
cursor cursor
namePrefix string
serial int
consumerType consumerType
doReset chan struct{}
resetInProgress uint32
userErrHandler ConsumeErrHandlerFunc
stopAfter int
stopAfterMsgsLeft chan int
withStopAfter bool
runningFetch *fetchResult
sync.Mutex
}
orderedSubscription struct {
consumer *orderedConsumer
opts []PullMessagesOpt
done chan struct{}
closed uint32
}
cursor struct {
streamSeq uint64
deliverSeq uint64
}
consumerType int
)
const (
consumerTypeNotSet consumerType = iota
consumerTypeConsume
consumerTypeFetch
)
var errOrderedSequenceMismatch = errors.New("sequence mismatch")
// Consume can be used to continuously receive messages and handle them
// with the provided callback function. Consume cannot be used concurrently
// when using ordered consumer.
//
// See [Consumer.Consume] for more details.
func (c *orderedConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) {
if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil {
err := c.reset()
if err != nil {
return nil, err
}
} else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil {
return nil, ErrOrderedConsumerConcurrentRequests
}
if c.consumerType == consumerTypeFetch {
return nil, ErrOrderConsumerUsedAsFetch
}
c.consumerType = consumerTypeConsume
consumeOpts, err := parseConsumeOpts(true, opts...)
if err != nil {
return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
}
c.userErrHandler = consumeOpts.ErrHandler
opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial)))
if consumeOpts.StopAfter > 0 {
c.withStopAfter = true
c.stopAfter = consumeOpts.StopAfter
}
c.stopAfterMsgsLeft = make(chan int, 1)
if c.stopAfter > 0 {
opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
}
sub := &orderedSubscription{
consumer: c,
done: make(chan struct{}, 1),
}
internalHandler := func(serial int) func(msg Msg) {
return func(msg Msg) {
// handler is a noop if message was delivered for a consumer with different serial
if serial != c.serial {
return
}
meta, err := msg.Metadata()
if err != nil {
sub, ok := c.currentConsumer.getSubscription("")
if !ok {
return
}
c.errHandler(serial)(sub, err)
return
}
dseq := meta.Sequence.Consumer
if dseq != c.cursor.deliverSeq+1 {
sub, ok := c.currentConsumer.getSubscription("")
if !ok {
return
}
c.errHandler(serial)(sub, errOrderedSequenceMismatch)
return
}
c.cursor.deliverSeq = dseq
c.cursor.streamSeq = meta.Sequence.Stream
handler(msg)
}
}
_, err = c.currentConsumer.Consume(internalHandler(c.serial), opts...)
if err != nil {
return nil, err
}
go func() {
for {
select {
case <-c.doReset:
if err := c.reset(); err != nil {
sub, ok := c.currentConsumer.getSubscription("")
if !ok {
return
}
c.errHandler(c.serial)(sub, err)
}
if c.withStopAfter {
select {
case c.stopAfter = <-c.stopAfterMsgsLeft:
default:
}
if c.stopAfter <= 0 {
sub.Stop()
return
}
}
if c.stopAfter > 0 {
opts = opts[:len(opts)-2]
} else {
opts = opts[:len(opts)-1]
}
// overwrite the previous err handler to use the new serial
opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial)))
if c.withStopAfter {
opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
}
if _, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...); err != nil {
sub, ok := c.currentConsumer.getSubscription("")
if !ok {
return
}
c.errHandler(c.serial)(sub, err)
}
case <-sub.done:
return
case msgsLeft, ok := <-c.stopAfterMsgsLeft:
if !ok {
close(sub.done)
}
c.stopAfter = msgsLeft
return
}
}
}()
return sub, nil
}
func (c *orderedConsumer) errHandler(serial int) func(cc ConsumeContext, err error) {
return func(cc ConsumeContext, err error) {
c.Lock()
defer c.Unlock()
if c.userErrHandler != nil && !errors.Is(err, errOrderedSequenceMismatch) {
c.userErrHandler(cc, err)
}
if errors.Is(err, ErrNoHeartbeat) ||
errors.Is(err, errOrderedSequenceMismatch) ||
errors.Is(err, ErrConsumerDeleted) ||
errors.Is(err, ErrConsumerNotFound) {
// only reset if serial matches the current consumer serial and there is no reset in progress
if serial == c.serial && atomic.LoadUint32(&c.resetInProgress) == 0 {
atomic.StoreUint32(&c.resetInProgress, 1)
c.doReset <- struct{}{}
}
}
}
}
// Messages returns MessagesContext, allowing continuously iterating
// over messages on a stream. Messages cannot be used concurrently
// when using ordered consumer.
//
// See [Consumer.Messages] for more details.
func (c *orderedConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) {
if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil {
err := c.reset()
if err != nil {
return nil, err
}
} else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil {
return nil, ErrOrderedConsumerConcurrentRequests
}
if c.consumerType == consumerTypeFetch {
return nil, ErrOrderConsumerUsedAsFetch
}
c.consumerType = consumerTypeConsume
consumeOpts, err := parseMessagesOpts(true, opts...)
if err != nil {
return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
}
opts = append(opts, WithMessagesErrOnMissingHeartbeat(true))
c.stopAfterMsgsLeft = make(chan int, 1)
if consumeOpts.StopAfter > 0 {
c.withStopAfter = true
c.stopAfter = consumeOpts.StopAfter
}
c.userErrHandler = consumeOpts.ErrHandler
if c.stopAfter > 0 {
opts = append(opts, messagesStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
}
_, err = c.currentConsumer.Messages(opts...)
if err != nil {
return nil, err
}
sub := &orderedSubscription{
consumer: c,
opts: opts,
done: make(chan struct{}, 1),
}
return sub, nil
}
func (s *orderedSubscription) Next() (Msg, error) {
for {
currentConsumer := s.consumer.currentConsumer
sub, ok := currentConsumer.getSubscription("")
if !ok {
return nil, ErrMsgIteratorClosed
}
msg, err := sub.Next()
if err != nil {
if errors.Is(err, ErrMsgIteratorClosed) {
s.Stop()
return nil, err
}
if s.consumer.withStopAfter {
select {
case s.consumer.stopAfter = <-s.consumer.stopAfterMsgsLeft:
default:
}
if s.consumer.stopAfter <= 0 {
s.Stop()
return nil, ErrMsgIteratorClosed
}
s.opts[len(s.opts)-1] = StopAfter(s.consumer.stopAfter)
}
if err := s.consumer.reset(); err != nil {
return nil, err
}
_, err := s.consumer.currentConsumer.Messages(s.opts...)
if err != nil {
return nil, err
}
continue
}
meta, err := msg.Metadata()
if err != nil {
s.consumer.errHandler(s.consumer.serial)(sub, err)
continue
}
serial := serialNumberFromConsumer(meta.Consumer)
dseq := meta.Sequence.Consumer
if dseq != s.consumer.cursor.deliverSeq+1 {
s.consumer.errHandler(serial)(sub, errOrderedSequenceMismatch)
continue
}
s.consumer.cursor.deliverSeq = dseq
s.consumer.cursor.streamSeq = meta.Sequence.Stream
return msg, nil
}
}
func (s *orderedSubscription) Stop() {
if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
return
}
sub, ok := s.consumer.currentConsumer.getSubscription("")
if !ok {
return
}
s.consumer.currentConsumer.Lock()
defer s.consumer.currentConsumer.Unlock()
sub.Stop()
close(s.done)
}
func (s *orderedSubscription) Drain() {
if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
return
}
sub, ok := s.consumer.currentConsumer.getSubscription("")
if !ok {
return
}
s.consumer.currentConsumer.Lock()
defer s.consumer.currentConsumer.Unlock()
sub.Drain()
close(s.done)
}
// Fetch is used to retrieve up to a provided number of messages from a
// stream. This method will always send a single request and wait until
// either all messages are retrieved or request times out.
//
// It is not efficient to use Fetch with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) {
if c.consumerType == consumerTypeConsume {
return nil, ErrOrderConsumerUsedAsConsume
}
c.currentConsumer.Lock()
if c.runningFetch != nil {
if !c.runningFetch.done {
c.currentConsumer.Unlock()
return nil, ErrOrderedConsumerConcurrentRequests
}
c.cursor.streamSeq = c.runningFetch.sseq
}
c.currentConsumer.Unlock()
c.consumerType = consumerTypeFetch
err := c.reset()
if err != nil {
return nil, err
}
msgs, err := c.currentConsumer.Fetch(batch, opts...)
if err != nil {
return nil, err
}
c.runningFetch = msgs.(*fetchResult)
return msgs, nil
}
// FetchBytes is used to retrieve up to a provided bytes from the
// stream. This method will always send a single request and wait until
// provided number of bytes is exceeded or request times out.
//
// It is not efficient to use FetchBytes with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) {
if c.consumerType == consumerTypeConsume {
return nil, ErrOrderConsumerUsedAsConsume
}
if c.runningFetch != nil {
if !c.runningFetch.done {
return nil, ErrOrderedConsumerConcurrentRequests
}
c.cursor.streamSeq = c.runningFetch.sseq
}
c.consumerType = consumerTypeFetch
err := c.reset()
if err != nil {
return nil, err
}
msgs, err := c.currentConsumer.FetchBytes(maxBytes, opts...)
if err != nil {
return nil, err
}
c.runningFetch = msgs.(*fetchResult)
return msgs, nil
}
// FetchNoWait is used to retrieve up to a provided number of messages
// from a stream. This method will always send a single request and
// immediately return up to a provided number of messages or wait until
// at least one message is available or request times out.
//
// It is not efficient to use FetchNoWait with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) FetchNoWait(batch int) (MessageBatch, error) {
if c.consumerType == consumerTypeConsume {
return nil, ErrOrderConsumerUsedAsConsume
}
if c.runningFetch != nil && !c.runningFetch.done {
return nil, ErrOrderedConsumerConcurrentRequests
}
c.consumerType = consumerTypeFetch
err := c.reset()
if err != nil {
return nil, err
}
return c.currentConsumer.FetchNoWait(batch)
}
// Next is used to retrieve the next message from the stream. This
// method will block until the message is retrieved or timeout is
// reached.
//
// It is not efficient to use Next with on an ordered consumer, as it will
// reset the consumer for each subsequent Fetch call.
// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
func (c *orderedConsumer) Next(opts ...FetchOpt) (Msg, error) {
res, err := c.Fetch(1, opts...)
if err != nil {
return nil, err
}
msg := <-res.Messages()
if msg != nil {
return msg, nil
}
if res.Error() == nil {
return nil, nats.ErrTimeout
}
return nil, res.Error()
}
func serialNumberFromConsumer(name string) int {
if len(name) == 0 {
return 0
}
serial, err := strconv.Atoi(name[len(name)-1:])
if err != nil {
return 0
}
return serial
}
func (c *orderedConsumer) reset() error {
c.Lock()
defer c.Unlock()
defer atomic.StoreUint32(&c.resetInProgress, 0)
if c.currentConsumer != nil {
sub, ok := c.currentConsumer.getSubscription("")
c.currentConsumer.Lock()
if ok {
sub.Stop()
}
consName := c.currentConsumer.CachedInfo().Name
c.currentConsumer.Unlock()
var err error
for i := 0; ; i++ {
if c.cfg.MaxResetAttempts > 0 && i == c.cfg.MaxResetAttempts {
return fmt.Errorf("%w: maximum number of delete attempts reached: %s", ErrOrderedConsumerReset, err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
err = c.jetStream.DeleteConsumer(ctx, c.stream, consName)
cancel()
if err != nil {
if errors.Is(err, ErrConsumerNotFound) {
break
}
if errors.Is(err, nats.ErrTimeout) || errors.Is(err, context.DeadlineExceeded) {
continue
}
return err
}
break
}
}
seq := c.cursor.streamSeq + 1
c.cursor.deliverSeq = 0
consumerConfig := c.getConsumerConfigForSeq(seq)
var err error
var cons Consumer
for i := 0; ; i++ {
if c.cfg.MaxResetAttempts > 0 && i == c.cfg.MaxResetAttempts {
return fmt.Errorf("%w: maximum number of create consumer attempts reached: %s", ErrOrderedConsumerReset, err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
cons, err = c.jetStream.CreateOrUpdateConsumer(ctx, c.stream, *consumerConfig)
if err != nil {
if errors.Is(err, ErrConsumerNotFound) {
cancel()
break
}
if errors.Is(err, nats.ErrTimeout) || errors.Is(err, context.DeadlineExceeded) {
cancel()
continue
}
cancel()
return err
}
cancel()
break
}
c.currentConsumer = cons.(*pullConsumer)
return nil
}
func (c *orderedConsumer) getConsumerConfigForSeq(seq uint64) *ConsumerConfig {
c.serial++
name := fmt.Sprintf("%s_%d", c.namePrefix, c.serial)
cfg := &ConsumerConfig{
Name: name,
DeliverPolicy: DeliverByStartSequencePolicy,
OptStartSeq: seq,
AckPolicy: AckNonePolicy,
InactiveThreshold: 5 * time.Minute,
Replicas: 1,
HeadersOnly: c.cfg.HeadersOnly,
MemoryStorage: true,
}
if len(c.cfg.FilterSubjects) == 1 {
cfg.FilterSubject = c.cfg.FilterSubjects[0]
} else {
cfg.FilterSubjects = c.cfg.FilterSubjects
}
if seq != c.cfg.OptStartSeq+1 {
return cfg
}
// initial request, some options may be modified at that point
cfg.DeliverPolicy = c.cfg.DeliverPolicy
if c.cfg.DeliverPolicy == DeliverLastPerSubjectPolicy ||
c.cfg.DeliverPolicy == DeliverLastPolicy ||
c.cfg.DeliverPolicy == DeliverNewPolicy ||
c.cfg.DeliverPolicy == DeliverAllPolicy {
cfg.OptStartSeq = 0
}
if cfg.DeliverPolicy == DeliverLastPerSubjectPolicy && len(c.cfg.FilterSubjects) == 0 {
cfg.FilterSubjects = []string{">"}
}
if c.cfg.OptStartTime != nil {
cfg.OptStartSeq = 0
cfg.DeliverPolicy = DeliverByStartTimePolicy
cfg.OptStartTime = c.cfg.OptStartTime
}
if c.cfg.InactiveThreshold != 0 {
cfg.InactiveThreshold = c.cfg.InactiveThreshold
}
return cfg
}
func consumeStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullConsumeOpt {
return pullOptFunc(func(opts *consumeOpts) error {
opts.StopAfter = numMsgs
opts.stopAfterMsgsLeft = msgsLeftAfterStop
return nil
})
}
func messagesStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullMessagesOpt {
return pullOptFunc(func(opts *consumeOpts) error {
opts.StopAfter = numMsgs
opts.stopAfterMsgsLeft = msgsLeftAfterStop
return nil
})
}
// Info returns information about the ordered consumer.
// Note that this method will fetch the latest instance of the
// consumer from the server, which can be deleted by the library at any time.
func (c *orderedConsumer) Info(ctx context.Context) (*ConsumerInfo, error) {
c.Lock()
defer c.Unlock()
if c.currentConsumer == nil {
return nil, ErrOrderedConsumerNotCreated
}
infoSubject := apiSubj(c.jetStream.apiPrefix, fmt.Sprintf(apiConsumerInfoT, c.stream, c.currentConsumer.name))
var resp consumerInfoResponse
if _, err := c.jetStream.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
return nil, ErrConsumerNotFound
}
return nil, resp.Error
}
if resp.Error == nil && resp.ConsumerInfo == nil {
return nil, ErrConsumerNotFound
}
c.currentConsumer.info = resp.ConsumerInfo
return resp.ConsumerInfo, nil
}
// CachedInfo returns cached information about the consumer currently
// used by the ordered consumer. Cached info will be updated on every call
// to [Consumer.Info] or on consumer reset.
func (c *orderedConsumer) CachedInfo() *ConsumerInfo {
c.Lock()
defer c.Unlock()
if c.currentConsumer == nil {
return nil
}
return c.currentConsumer.info
}

View File

@ -1,581 +0,0 @@
// Copyright 2022-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"math/rand"
"strconv"
"strings"
"sync"
"time"
"github.com/nats-io/nats.go"
"github.com/nats-io/nuid"
)
type (
asyncPublisherOpts struct {
// For async publish error handling.
aecb MsgErrHandler
// Max async pub ack in flight
maxpa int
}
// PublishOpt are the options that can be passed to Publish methods.
PublishOpt func(*pubOpts) error
pubOpts struct {
id string
lastMsgID string // Expected last msgId
stream string // Expected stream name
lastSeq *uint64 // Expected last sequence
lastSubjectSeq *uint64 // Expected last sequence per subject
// Publish retries for NoResponders err.
retryWait time.Duration // Retry wait between attempts
retryAttempts int // Retry attempts
// stallWait is the max wait of a async pub ack.
stallWait time.Duration
// internal option to re-use existing paf in case of retry.
pafRetry *pubAckFuture
}
// PubAckFuture is a future for a PubAck.
// It can be used to wait for a PubAck or an error after an async publish.
PubAckFuture interface {
// Ok returns a receive only channel that can be used to get a PubAck.
Ok() <-chan *PubAck
// Err returns a receive only channel that can be used to get the error from an async publish.
Err() <-chan error
// Msg returns the message that was sent to the server.
Msg() *nats.Msg
}
pubAckFuture struct {
jsClient *jetStreamClient
msg *nats.Msg
retries int
maxRetries int
retryWait time.Duration
ack *PubAck
err error
errCh chan error
doneCh chan *PubAck
}
jetStreamClient struct {
asyncPublishContext
asyncPublisherOpts
}
// MsgErrHandler is used to process asynchronous errors from JetStream
// PublishAsync. It will return the original message sent to the server for
// possible retransmitting and the error encountered.
MsgErrHandler func(JetStream, *nats.Msg, error)
asyncPublishContext struct {
sync.RWMutex
replyPrefix string
replySub *nats.Subscription
acks map[string]*pubAckFuture
stallCh chan struct{}
doneCh chan struct{}
rr *rand.Rand
// channel to signal when server is disconnected or conn is closed
connStatusCh chan (nats.Status)
}
pubAckResponse struct {
apiResponse
*PubAck
}
// PubAck is an ack received after successfully publishing a message.
PubAck struct {
// Stream is the stream name the message was published to.
Stream string `json:"stream"`
// Sequence is the stream sequence number of the message.
Sequence uint64 `json:"seq"`
// Duplicate indicates whether the message was a duplicate.
// Duplicate can be detected using the [MsgIDHeader] and [StreamConfig.Duplicates].
Duplicate bool `json:"duplicate,omitempty"`
// Domain is the domain the message was published to.
Domain string `json:"domain,omitempty"`
}
)
const (
// Default time wait between retries on Publish if err is ErrNoResponders.
DefaultPubRetryWait = 250 * time.Millisecond
// Default number of retries
DefaultPubRetryAttempts = 2
)
const (
statusHdr = "Status"
rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
base = 62
)
// Publish performs a synchronous publish to a stream and waits for ack
// from server. It accepts subject name (which must be bound to a stream)
// and message payload.
func (js *jetStream) Publish(ctx context.Context, subj string, data []byte, opts ...PublishOpt) (*PubAck, error) {
return js.PublishMsg(ctx, &nats.Msg{Subject: subj, Data: data}, opts...)
}
// PublishMsg performs a synchronous publish to a stream and waits for
// ack from server. It accepts subject name (which must be bound to a
// stream) and nats.Message.
func (js *jetStream) PublishMsg(ctx context.Context, m *nats.Msg, opts ...PublishOpt) (*PubAck, error) {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
o := pubOpts{
retryWait: DefaultPubRetryWait,
retryAttempts: DefaultPubRetryAttempts,
}
if len(opts) > 0 {
if m.Header == nil {
m.Header = nats.Header{}
}
for _, opt := range opts {
if err := opt(&o); err != nil {
return nil, err
}
}
}
if o.stallWait > 0 {
return nil, fmt.Errorf("%w: stall wait cannot be set to sync publish", ErrInvalidOption)
}
if o.id != "" {
m.Header.Set(MsgIDHeader, o.id)
}
if o.lastMsgID != "" {
m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID)
}
if o.stream != "" {
m.Header.Set(ExpectedStreamHeader, o.stream)
}
if o.lastSeq != nil {
m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10))
}
if o.lastSubjectSeq != nil {
m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10))
}
var resp *nats.Msg
var err error
resp, err = js.conn.RequestMsgWithContext(ctx, m)
if err != nil {
for r := 0; errors.Is(err, nats.ErrNoResponders) && (r < o.retryAttempts || o.retryAttempts < 0); r++ {
// To protect against small blips in leadership changes etc, if we get a no responders here retry.
select {
case <-ctx.Done():
case <-time.After(o.retryWait):
}
resp, err = js.conn.RequestMsgWithContext(ctx, m)
}
if err != nil {
if errors.Is(err, nats.ErrNoResponders) {
return nil, ErrNoStreamResponse
}
return nil, err
}
}
var ackResp pubAckResponse
if err := json.Unmarshal(resp.Data, &ackResp); err != nil {
return nil, ErrInvalidJSAck
}
if ackResp.Error != nil {
return nil, fmt.Errorf("nats: %w", ackResp.Error)
}
if ackResp.PubAck == nil || ackResp.PubAck.Stream == "" {
return nil, ErrInvalidJSAck
}
return ackResp.PubAck, nil
}
// PublishAsync performs an asynchronous publish to a stream and returns
// [PubAckFuture] interface. It accepts subject name (which must be bound
// to a stream) and message payload.
func (js *jetStream) PublishAsync(subj string, data []byte, opts ...PublishOpt) (PubAckFuture, error) {
return js.PublishMsgAsync(&nats.Msg{Subject: subj, Data: data}, opts...)
}
// PublishMsgAsync performs an asynchronous publish to a stream and
// returns [PubAckFuture] interface. It accepts subject name (which must
// be bound to a stream) and nats.Message.
func (js *jetStream) PublishMsgAsync(m *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) {
o := pubOpts{
retryWait: DefaultPubRetryWait,
retryAttempts: DefaultPubRetryAttempts,
}
if len(opts) > 0 {
if m.Header == nil {
m.Header = nats.Header{}
}
for _, opt := range opts {
if err := opt(&o); err != nil {
return nil, err
}
}
}
defaultStallWait := 200 * time.Millisecond
stallWait := defaultStallWait
if o.stallWait > 0 {
stallWait = o.stallWait
}
if o.id != "" {
m.Header.Set(MsgIDHeader, o.id)
}
if o.lastMsgID != "" {
m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID)
}
if o.stream != "" {
m.Header.Set(ExpectedStreamHeader, o.stream)
}
if o.lastSeq != nil {
m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10))
}
if o.lastSubjectSeq != nil {
m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10))
}
paf := o.pafRetry
if paf == nil && m.Reply != "" {
return nil, ErrAsyncPublishReplySubjectSet
}
var id string
// register new paf if not retrying
if paf == nil {
var err error
m.Reply, err = js.newAsyncReply()
defer func() { m.Reply = "" }()
if err != nil {
return nil, fmt.Errorf("nats: error creating async reply handler: %s", err)
}
id = m.Reply[js.replyPrefixLen:]
paf = &pubAckFuture{msg: m, jsClient: js.publisher, maxRetries: o.retryAttempts, retryWait: o.retryWait}
numPending, maxPending := js.registerPAF(id, paf)
if maxPending > 0 && numPending > maxPending {
select {
case <-js.asyncStall():
case <-time.After(stallWait):
js.clearPAF(id)
return nil, ErrTooManyStalledMsgs
}
}
} else {
// when retrying, get the ID from existing reply subject
id = m.Reply[js.replyPrefixLen:]
}
if err := js.conn.PublishMsg(m); err != nil {
js.clearPAF(id)
return nil, err
}
return paf, nil
}
// For quick token lookup etc.
const (
aReplyTokensize = 6
)
func (js *jetStream) newAsyncReply() (string, error) {
js.publisher.Lock()
if js.publisher.replySub == nil {
// Create our wildcard reply subject.
sha := sha256.New()
sha.Write([]byte(nuid.Next()))
b := sha.Sum(nil)
for i := 0; i < aReplyTokensize; i++ {
b[i] = rdigits[int(b[i]%base)]
}
js.publisher.replyPrefix = fmt.Sprintf("%s%s.", js.replyPrefix, b[:aReplyTokensize])
sub, err := js.conn.Subscribe(fmt.Sprintf("%s*", js.publisher.replyPrefix), js.handleAsyncReply)
if err != nil {
js.publisher.Unlock()
return "", err
}
js.publisher.replySub = sub
js.publisher.rr = rand.New(rand.NewSource(time.Now().UnixNano()))
}
if js.publisher.connStatusCh == nil {
js.publisher.connStatusCh = js.conn.StatusChanged(nats.RECONNECTING, nats.CLOSED)
go js.resetPendingAcksOnReconnect()
}
var sb strings.Builder
sb.WriteString(js.publisher.replyPrefix)
rn := js.publisher.rr.Int63()
var b [aReplyTokensize]byte
for i, l := 0, rn; i < len(b); i++ {
b[i] = rdigits[l%base]
l /= base
}
sb.Write(b[:])
js.publisher.Unlock()
return sb.String(), nil
}
// Handle an async reply from PublishAsync.
func (js *jetStream) handleAsyncReply(m *nats.Msg) {
if len(m.Subject) <= js.replyPrefixLen {
return
}
id := m.Subject[js.replyPrefixLen:]
js.publisher.Lock()
paf := js.getPAF(id)
if paf == nil {
js.publisher.Unlock()
return
}
doErr := func(err error) {
paf.err = err
if paf.errCh != nil {
paf.errCh <- paf.err
}
cb := js.publisher.asyncPublisherOpts.aecb
js.publisher.Unlock()
if cb != nil {
paf.msg.Reply = ""
cb(js, paf.msg, err)
}
}
// Process no responders etc.
if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
if paf.retries < paf.maxRetries {
paf.retries++
paf.msg.Reply = m.Subject
time.AfterFunc(paf.retryWait, func() {
js.publisher.Lock()
paf := js.getPAF(id)
js.publisher.Unlock()
if paf == nil {
return
}
_, err := js.PublishMsgAsync(paf.msg, func(po *pubOpts) error {
po.pafRetry = paf
return nil
})
if err != nil {
js.publisher.Lock()
doErr(err)
}
})
js.publisher.Unlock()
return
}
delete(js.publisher.acks, id)
doErr(ErrNoStreamResponse)
return
}
// Remove
delete(js.publisher.acks, id)
// Check on anyone stalled and waiting.
if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.asyncPublisherOpts.maxpa {
close(js.publisher.stallCh)
js.publisher.stallCh = nil
}
// Check on anyone waiting on done status.
if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 {
dch := js.publisher.doneCh
js.publisher.doneCh = nil
// Defer here so error is processed and can be checked.
defer close(dch)
}
var pa pubAckResponse
if err := json.Unmarshal(m.Data, &pa); err != nil {
doErr(ErrInvalidJSAck)
return
}
if pa.Error != nil {
doErr(pa.Error)
return
}
if pa.PubAck == nil || pa.PubAck.Stream == "" {
doErr(ErrInvalidJSAck)
return
}
// So here we have received a proper puback.
paf.ack = pa.PubAck
if paf.doneCh != nil {
paf.doneCh <- paf.ack
}
js.publisher.Unlock()
}
func (js *jetStream) resetPendingAcksOnReconnect() {
js.publisher.Lock()
connStatusCh := js.publisher.connStatusCh
js.publisher.Unlock()
for {
newStatus, ok := <-connStatusCh
if !ok || newStatus == nats.CLOSED {
return
}
js.publisher.Lock()
errCb := js.publisher.asyncPublisherOpts.aecb
for id, paf := range js.publisher.acks {
paf.err = nats.ErrDisconnected
if paf.errCh != nil {
paf.errCh <- paf.err
}
if errCb != nil {
js.publisher.Unlock()
// clear reply subject so that new one is created on republish
paf.msg.Reply = ""
errCb(js, paf.msg, nats.ErrDisconnected)
js.publisher.Lock()
}
delete(js.publisher.acks, id)
}
if js.publisher.doneCh != nil {
close(js.publisher.doneCh)
js.publisher.doneCh = nil
}
js.publisher.Unlock()
}
}
// registerPAF will register for a PubAckFuture.
func (js *jetStream) registerPAF(id string, paf *pubAckFuture) (int, int) {
js.publisher.Lock()
if js.publisher.acks == nil {
js.publisher.acks = make(map[string]*pubAckFuture)
}
js.publisher.acks[id] = paf
np := len(js.publisher.acks)
maxpa := js.publisher.asyncPublisherOpts.maxpa
js.publisher.Unlock()
return np, maxpa
}
// Lock should be held.
func (js *jetStream) getPAF(id string) *pubAckFuture {
if js.publisher.acks == nil {
return nil
}
return js.publisher.acks[id]
}
// clearPAF will remove a PubAckFuture that was registered.
func (js *jetStream) clearPAF(id string) {
js.publisher.Lock()
delete(js.publisher.acks, id)
js.publisher.Unlock()
}
func (js *jetStream) asyncStall() <-chan struct{} {
js.publisher.Lock()
if js.publisher.stallCh == nil {
js.publisher.stallCh = make(chan struct{})
}
stc := js.publisher.stallCh
js.publisher.Unlock()
return stc
}
func (paf *pubAckFuture) Ok() <-chan *PubAck {
paf.jsClient.Lock()
defer paf.jsClient.Unlock()
if paf.doneCh == nil {
paf.doneCh = make(chan *PubAck, 1)
if paf.ack != nil {
paf.doneCh <- paf.ack
}
}
return paf.doneCh
}
func (paf *pubAckFuture) Err() <-chan error {
paf.jsClient.Lock()
defer paf.jsClient.Unlock()
if paf.errCh == nil {
paf.errCh = make(chan error, 1)
if paf.err != nil {
paf.errCh <- paf.err
}
}
return paf.errCh
}
func (paf *pubAckFuture) Msg() *nats.Msg {
paf.jsClient.RLock()
defer paf.jsClient.RUnlock()
return paf.msg
}
// PublishAsyncPending returns the number of async publishes outstanding
// for this context.
func (js *jetStream) PublishAsyncPending() int {
js.publisher.RLock()
defer js.publisher.RUnlock()
return len(js.publisher.acks)
}
// PublishAsyncComplete returns a channel that will be closed when all
// outstanding asynchronously published messages are acknowledged by the
// server.
func (js *jetStream) PublishAsyncComplete() <-chan struct{} {
js.publisher.Lock()
defer js.publisher.Unlock()
if js.publisher.doneCh == nil {
js.publisher.doneCh = make(chan struct{})
}
dch := js.publisher.doneCh
if len(js.publisher.acks) == 0 {
close(js.publisher.doneCh)
js.publisher.doneCh = nil
}
return dch
}

File diff suppressed because it is too large Load Diff

View File

@ -1,719 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"time"
"github.com/nats-io/nats.go"
"github.com/nats-io/nuid"
)
type (
// Stream contains CRUD methods on a consumer via [ConsumerManager], as well
// as operations on an existing stream. It allows fetching and removing
// messages from a stream, as well as purging a stream.
Stream interface {
ConsumerManager
// Info returns StreamInfo from the server.
Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error)
// CachedInfo returns ConsumerInfo currently cached on this stream.
// This method does not perform any network requests. The cached
// StreamInfo is updated on every call to Info and Update.
CachedInfo() *StreamInfo
// Purge removes messages from a stream. It is a destructive operation.
// Use with caution. See StreamPurgeOpt for available options.
Purge(ctx context.Context, opts ...StreamPurgeOpt) error
// GetMsg retrieves a raw stream message stored in JetStream by sequence number.
GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error)
// GetLastMsgForSubject retrieves the last raw stream message stored in
// JetStream on a given subject subject.
GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error)
// DeleteMsg deletes a message from a stream.
// On the server, the message is marked as erased, but not overwritten.
DeleteMsg(ctx context.Context, seq uint64) error
// SecureDeleteMsg deletes a message from a stream. The deleted message
// is overwritten with random data. As a result, this operation is slower
// than DeleteMsg.
SecureDeleteMsg(ctx context.Context, seq uint64) error
}
// ConsumerManager provides CRUD API for managing consumers. It is
// available as a part of [Stream] interface. CreateConsumer,
// UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a
// [Consumer] interface, allowing to operate on a consumer (e.g. consume
// messages).
ConsumerManager interface {
// CreateOrUpdateConsumer creates a consumer on a given stream with
// given config. If consumer already exists, it will be updated (if
// possible). Consumer interface is returned, allowing to operate on a
// consumer (e.g. fetch messages).
CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
// CreateConsumer creates a consumer on a given stream with given
// config. If consumer already exists and the provided configuration
// differs from its configuration, ErrConsumerExists is returned. If the
// provided configuration is the same as the existing consumer, the
// existing consumer is returned. Consumer interface is returned,
// allowing to operate on a consumer (e.g. fetch messages).
CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
// UpdateConsumer updates an existing consumer. If consumer does not
// exist, ErrConsumerDoesNotExist is returned. Consumer interface is
// returned, allowing to operate on a consumer (e.g. fetch messages).
UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
// are managed by the library and provide a simple way to consume
// messages from a stream. Ordered consumers are ephemeral in-memory
// pull consumers and are resilient to deletes and restarts.
OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error)
// Consumer returns an interface to an existing consumer, allowing processing
// of messages. If consumer does not exist, ErrConsumerNotFound is
// returned.
Consumer(ctx context.Context, consumer string) (Consumer, error)
// DeleteConsumer removes a consumer with given name from a stream.
// If consumer does not exist, ErrConsumerNotFound is returned.
DeleteConsumer(ctx context.Context, consumer string) error
// ListConsumers returns ConsumerInfoLister enabling iterating over a
// channel of consumer infos.
ListConsumers(context.Context) ConsumerInfoLister
// ConsumerNames returns a ConsumerNameLister enabling iterating over a
// channel of consumer names.
ConsumerNames(context.Context) ConsumerNameLister
}
RawStreamMsg struct {
Subject string
Sequence uint64
Header nats.Header
Data []byte
Time time.Time
}
stream struct {
name string
info *StreamInfo
jetStream *jetStream
}
// StreamInfoOpt is a function setting options for [Stream.Info]
StreamInfoOpt func(*streamInfoRequest) error
streamInfoRequest struct {
apiPaged
DeletedDetails bool `json:"deleted_details,omitempty"`
SubjectFilter string `json:"subjects_filter,omitempty"`
}
consumerInfoResponse struct {
apiResponse
*ConsumerInfo
}
// StreamPurgeOpt is a function setting options for [Stream.Purge]
StreamPurgeOpt func(*StreamPurgeRequest) error
// StreamPurgeRequest is an API request body to purge a stream.
StreamPurgeRequest struct {
// Purge up to but not including sequence.
Sequence uint64 `json:"seq,omitempty"`
// Subject to match against messages for the purge command.
Subject string `json:"filter,omitempty"`
// Number of messages to keep.
Keep uint64 `json:"keep,omitempty"`
}
streamPurgeResponse struct {
apiResponse
Success bool `json:"success,omitempty"`
Purged uint64 `json:"purged"`
}
consumerDeleteResponse struct {
apiResponse
Success bool `json:"success,omitempty"`
}
// GetMsgOpt is a function setting options for [Stream.GetMsg]
GetMsgOpt func(*apiMsgGetRequest) error
apiMsgGetRequest struct {
Seq uint64 `json:"seq,omitempty"`
LastFor string `json:"last_by_subj,omitempty"`
NextFor string `json:"next_by_subj,omitempty"`
}
// apiMsgGetResponse is the response for a Stream get request.
apiMsgGetResponse struct {
apiResponse
Message *storedMsg `json:"message,omitempty"`
}
// storedMsg is a raw message stored in JetStream.
storedMsg struct {
Subject string `json:"subject"`
Sequence uint64 `json:"seq"`
Header []byte `json:"hdrs,omitempty"`
Data []byte `json:"data,omitempty"`
Time time.Time `json:"time"`
}
msgDeleteRequest struct {
Seq uint64 `json:"seq"`
NoErase bool `json:"no_erase,omitempty"`
}
msgDeleteResponse struct {
apiResponse
Success bool `json:"success,omitempty"`
}
// ConsumerInfoLister is used to iterate over a channel of consumer infos.
// Err method can be used to check for errors encountered during iteration.
// Info channel is always closed and therefore can be used in a range loop.
ConsumerInfoLister interface {
Info() <-chan *ConsumerInfo
Err() error
}
// ConsumerNameLister is used to iterate over a channel of consumer names.
// Err method can be used to check for errors encountered during iteration.
// Name channel is always closed and therefore can be used in a range loop.
ConsumerNameLister interface {
Name() <-chan string
Err() error
}
consumerLister struct {
js *jetStream
offset int
pageInfo *apiPaged
consumers chan *ConsumerInfo
names chan string
err error
}
consumerListResponse struct {
apiResponse
apiPaged
Consumers []*ConsumerInfo `json:"consumers"`
}
consumerNamesResponse struct {
apiResponse
apiPaged
Consumers []string `json:"consumers"`
}
)
// CreateOrUpdateConsumer creates a consumer on a given stream with
// given config. If consumer already exists, it will be updated (if
// possible). Consumer interface is returned, allowing to operate on a
// consumer (e.g. fetch messages).
func (s *stream) CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionCreateOrUpdate)
}
// CreateConsumer creates a consumer on a given stream with given
// config. If consumer already exists and the provided configuration
// differs from its configuration, ErrConsumerExists is returned. If the
// provided configuration is the same as the existing consumer, the
// existing consumer is returned. Consumer interface is returned,
// allowing to operate on a consumer (e.g. fetch messages).
func (s *stream) CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionCreate)
}
// UpdateConsumer updates an existing consumer. If consumer does not
// exist, ErrConsumerDoesNotExist is returned. Consumer interface is
// returned, allowing to operate on a consumer (e.g. fetch messages).
func (s *stream) UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionUpdate)
}
// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
// are managed by the library and provide a simple way to consume
// messages from a stream. Ordered consumers are ephemeral in-memory
// pull consumers and are resilient to deletes and restarts.
func (s *stream) OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) {
oc := &orderedConsumer{
jetStream: s.jetStream,
cfg: &cfg,
stream: s.name,
namePrefix: nuid.Next(),
doReset: make(chan struct{}, 1),
}
if cfg.OptStartSeq != 0 {
oc.cursor.streamSeq = cfg.OptStartSeq - 1
}
err := oc.reset()
if err != nil {
return nil, err
}
return oc, nil
}
// Consumer returns an interface to an existing consumer, allowing processing
// of messages. If consumer does not exist, ErrConsumerNotFound is
// returned.
func (s *stream) Consumer(ctx context.Context, name string) (Consumer, error) {
return getConsumer(ctx, s.jetStream, s.name, name)
}
// DeleteConsumer removes a consumer with given name from a stream.
// If consumer does not exist, ErrConsumerNotFound is returned.
func (s *stream) DeleteConsumer(ctx context.Context, name string) error {
return deleteConsumer(ctx, s.jetStream, s.name, name)
}
// Info returns StreamInfo from the server.
func (s *stream) Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
var infoReq *streamInfoRequest
for _, opt := range opts {
if infoReq == nil {
infoReq = &streamInfoRequest{}
}
if err := opt(infoReq); err != nil {
return nil, err
}
}
var req []byte
var err error
var subjectMap map[string]uint64
var offset int
infoSubject := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiStreamInfoT, s.name))
var info *StreamInfo
for {
if infoReq != nil {
if infoReq.SubjectFilter != "" {
if subjectMap == nil {
subjectMap = make(map[string]uint64)
}
infoReq.Offset = offset
}
req, err = json.Marshal(infoReq)
if err != nil {
return nil, err
}
}
var resp streamInfoResponse
if _, err = s.jetStream.apiRequestJSON(ctx, infoSubject, &resp, req); err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
return nil, ErrStreamNotFound
}
return nil, resp.Error
}
info = resp.StreamInfo
var total int
if resp.Total != 0 {
total = resp.Total
}
if len(resp.StreamInfo.State.Subjects) > 0 {
for subj, msgs := range resp.StreamInfo.State.Subjects {
subjectMap[subj] = msgs
}
offset = len(subjectMap)
}
if total == 0 || total <= offset {
info.State.Subjects = nil
// we don't want to store subjects in cache
cached := *info
s.info = &cached
info.State.Subjects = subjectMap
break
}
}
return info, nil
}
// CachedInfo returns ConsumerInfo currently cached on this stream.
// This method does not perform any network requests. The cached
// StreamInfo is updated on every call to Info and Update.
func (s *stream) CachedInfo() *StreamInfo {
return s.info
}
// Purge removes messages from a stream. It is a destructive operation.
// Use with caution. See StreamPurgeOpt for available options.
func (s *stream) Purge(ctx context.Context, opts ...StreamPurgeOpt) error {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
var purgeReq StreamPurgeRequest
for _, opt := range opts {
if err := opt(&purgeReq); err != nil {
return err
}
}
var req []byte
var err error
req, err = json.Marshal(purgeReq)
if err != nil {
return err
}
purgeSubject := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiStreamPurgeT, s.name))
var resp streamPurgeResponse
if _, err = s.jetStream.apiRequestJSON(ctx, purgeSubject, &resp, req); err != nil {
return err
}
if resp.Error != nil {
return resp.Error
}
return nil
}
// GetMsg retrieves a raw stream message stored in JetStream by sequence number.
func (s *stream) GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) {
req := &apiMsgGetRequest{Seq: seq}
for _, opt := range opts {
if err := opt(req); err != nil {
return nil, err
}
}
return s.getMsg(ctx, req)
}
// GetLastMsgForSubject retrieves the last raw stream message stored in
// JetStream on a given subject subject.
func (s *stream) GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) {
return s.getMsg(ctx, &apiMsgGetRequest{LastFor: subject})
}
func (s *stream) getMsg(ctx context.Context, mreq *apiMsgGetRequest) (*RawStreamMsg, error) {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
req, err := json.Marshal(mreq)
if err != nil {
return nil, err
}
var gmSubj string
// handle direct gets
if s.info.Config.AllowDirect {
if mreq.LastFor != "" {
gmSubj = apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiDirectMsgGetLastBySubjectT, s.name, mreq.LastFor))
r, err := s.jetStream.apiRequest(ctx, gmSubj, nil)
if err != nil {
return nil, err
}
return convertDirectGetMsgResponseToMsg(s.name, r.msg)
}
gmSubj = apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiDirectMsgGetT, s.name))
r, err := s.jetStream.apiRequest(ctx, gmSubj, req)
if err != nil {
return nil, err
}
return convertDirectGetMsgResponseToMsg(s.name, r.msg)
}
var resp apiMsgGetResponse
dsSubj := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiMsgGetT, s.name))
_, err = s.jetStream.apiRequestJSON(ctx, dsSubj, &resp, req)
if err != nil {
return nil, err
}
if resp.Error != nil {
if resp.Error.ErrorCode == JSErrCodeMessageNotFound {
return nil, ErrMsgNotFound
}
return nil, resp.Error
}
msg := resp.Message
var hdr nats.Header
if len(msg.Header) > 0 {
hdr, err = nats.DecodeHeadersMsg(msg.Header)
if err != nil {
return nil, err
}
}
return &RawStreamMsg{
Subject: msg.Subject,
Sequence: msg.Sequence,
Header: hdr,
Data: msg.Data,
Time: msg.Time,
}, nil
}
func convertDirectGetMsgResponseToMsg(name string, r *nats.Msg) (*RawStreamMsg, error) {
// Check for 404/408. We would get a no-payload message and a "Status" header
if len(r.Data) == 0 {
val := r.Header.Get(statusHdr)
if val != "" {
switch val {
case noMessages:
return nil, ErrMsgNotFound
default:
desc := r.Header.Get("Description")
if desc == "" {
desc = "unable to get message"
}
return nil, fmt.Errorf("nats: %s", desc)
}
}
}
// Check for headers that give us the required information to
// reconstruct the message.
if len(r.Header) == 0 {
return nil, fmt.Errorf("nats: response should have headers")
}
stream := r.Header.Get(StreamHeader)
if stream == "" {
return nil, fmt.Errorf("nats: missing stream header")
}
seqStr := r.Header.Get(SequenceHeader)
if seqStr == "" {
return nil, fmt.Errorf("nats: missing sequence header")
}
seq, err := strconv.ParseUint(seqStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err)
}
timeStr := r.Header.Get(TimeStampHeaer)
if timeStr == "" {
return nil, fmt.Errorf("nats: missing timestamp header")
}
tm, err := time.Parse(time.RFC3339Nano, timeStr)
if err != nil {
return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err)
}
subj := r.Header.Get(SubjectHeader)
if subj == "" {
return nil, fmt.Errorf("nats: missing subject header")
}
return &RawStreamMsg{
Subject: subj,
Sequence: seq,
Header: r.Header,
Data: r.Data,
Time: tm,
}, nil
}
// DeleteMsg deletes a message from a stream.
// On the server, the message is marked as erased, but not overwritten.
func (s *stream) DeleteMsg(ctx context.Context, seq uint64) error {
return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq, NoErase: true})
}
// SecureDeleteMsg deletes a message from a stream. The deleted message
// is overwritten with random data. As a result, this operation is slower
// than DeleteMsg.
func (s *stream) SecureDeleteMsg(ctx context.Context, seq uint64) error {
return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq})
}
func (s *stream) deleteMsg(ctx context.Context, req *msgDeleteRequest) error {
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
r, err := json.Marshal(req)
if err != nil {
return err
}
subj := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiMsgDeleteT, s.name))
var resp msgDeleteResponse
if _, err = s.jetStream.apiRequestJSON(ctx, subj, &resp, r); err != nil {
return err
}
if !resp.Success {
return fmt.Errorf("%w: %s", ErrMsgDeleteUnsuccessful, err)
}
return nil
}
// ListConsumers returns ConsumerInfoLister enabling iterating over a
// channel of consumer infos.
func (s *stream) ListConsumers(ctx context.Context) ConsumerInfoLister {
l := &consumerLister{
js: s.jetStream,
consumers: make(chan *ConsumerInfo),
}
go func() {
defer close(l.consumers)
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
for {
page, err := l.consumerInfos(ctx, s.name)
if err != nil && !errors.Is(err, ErrEndOfData) {
l.err = err
return
}
for _, info := range page {
select {
case <-ctx.Done():
l.err = ctx.Err()
return
default:
}
if info != nil {
l.consumers <- info
}
}
if errors.Is(err, ErrEndOfData) {
return
}
}
}()
return l
}
func (s *consumerLister) Info() <-chan *ConsumerInfo {
return s.consumers
}
func (s *consumerLister) Err() error {
return s.err
}
// ConsumerNames returns a ConsumerNameLister enabling iterating over a
// channel of consumer names.
func (s *stream) ConsumerNames(ctx context.Context) ConsumerNameLister {
l := &consumerLister{
js: s.jetStream,
names: make(chan string),
}
go func() {
defer close(l.names)
ctx, cancel := wrapContextWithoutDeadline(ctx)
if cancel != nil {
defer cancel()
}
for {
page, err := l.consumerNames(ctx, s.name)
if err != nil && !errors.Is(err, ErrEndOfData) {
l.err = err
return
}
for _, info := range page {
select {
case l.names <- info:
case <-ctx.Done():
l.err = ctx.Err()
return
}
}
if errors.Is(err, ErrEndOfData) {
return
}
}
}()
return l
}
func (s *consumerLister) Name() <-chan string {
return s.names
}
// consumerInfos fetches the next ConsumerInfo page
func (s *consumerLister) consumerInfos(ctx context.Context, stream string) ([]*ConsumerInfo, error) {
if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
return nil, ErrEndOfData
}
req, err := json.Marshal(
apiPagedRequest{Offset: s.offset},
)
if err != nil {
return nil, err
}
slSubj := apiSubj(s.js.apiPrefix, fmt.Sprintf(apiConsumerListT, stream))
var resp consumerListResponse
_, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
s.pageInfo = &resp.apiPaged
s.offset += len(resp.Consumers)
return resp.Consumers, nil
}
// consumerNames fetches the next consumer names page
func (s *consumerLister) consumerNames(ctx context.Context, stream string) ([]string, error) {
if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
return nil, ErrEndOfData
}
req, err := json.Marshal(
apiPagedRequest{Offset: s.offset},
)
if err != nil {
return nil, err
}
slSubj := apiSubj(s.js.apiPrefix, fmt.Sprintf(apiConsumerNamesT, stream))
var resp consumerNamesResponse
_, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req)
if err != nil {
return nil, err
}
if resp.Error != nil {
return nil, resp.Error
}
s.pageInfo = &resp.apiPaged
s.offset += len(resp.Consumers)
return resp.Consumers, nil
}

View File

@ -1,606 +0,0 @@
// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jetstream
import (
"encoding/json"
"fmt"
"strings"
"time"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)
type (
// StreamInfo shows config and current state for this stream.
StreamInfo struct {
// Config contains the configuration settings of the stream, set when
// creating or updating the stream.
Config StreamConfig `json:"config"`
// Created is the timestamp when the stream was created.
Created time.Time `json:"created"`
// State provides the state of the stream at the time of request,
// including metrics like the number of messages in the stream, total
// bytes, etc.
State StreamState `json:"state"`
// Cluster contains information about the cluster to which this stream
// belongs (if applicable).
Cluster *ClusterInfo `json:"cluster,omitempty"`
// Mirror contains information about another stream this one is
// mirroring. Mirroring is used to create replicas of another stream's
// data. This field is omitted if the stream is not mirroring another
// stream.
Mirror *StreamSourceInfo `json:"mirror,omitempty"`
// Sources is a list of source streams from which this stream collects
// data.
Sources []*StreamSourceInfo `json:"sources,omitempty"`
// TimeStamp indicates when the info was gathered by the server.
TimeStamp time.Time `json:"ts"`
}
// StreamConfig is the configuration of a JetStream stream.
StreamConfig struct {
// Name is the name of the stream. It is required and must be unique
// across the JetStream account.
//
// Name Names cannot contain whitespace, ., *, >, path separators
// (forward or backwards slash), and non-printable characters.
Name string `json:"name"`
// Description is an optional description of the stream.
Description string `json:"description,omitempty"`
// Subjects is a list of subjects that the stream is listening on.
// Wildcards are supported. Subjects cannot be set if the stream is
// created as a mirror.
Subjects []string `json:"subjects,omitempty"`
// Retention defines the message retention policy for the stream.
// Defaults to LimitsPolicy.
Retention RetentionPolicy `json:"retention"`
// MaxConsumers specifies the maximum number of consumers allowed for
// the stream.
MaxConsumers int `json:"max_consumers"`
// MaxMsgs is the maximum number of messages the stream will store.
// After reaching the limit, stream adheres to the discard policy.
// If not set, server default is -1 (unlimited).
MaxMsgs int64 `json:"max_msgs"`
// MaxBytes is the maximum total size of messages the stream will store.
// After reaching the limit, stream adheres to the discard policy.
// If not set, server default is -1 (unlimited).
MaxBytes int64 `json:"max_bytes"`
// Discard defines the policy for handling messages when the stream
// reaches its limits in terms of number of messages or total bytes.
Discard DiscardPolicy `json:"discard"`
// DiscardNewPerSubject is a flag to enable discarding new messages per
// subject when limits are reached. Requires DiscardPolicy to be
// DiscardNew and the MaxMsgsPerSubject to be set.
DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"`
// MaxAge is the maximum age of messages that the stream will retain.
MaxAge time.Duration `json:"max_age"`
// MaxMsgsPerSubject is the maximum number of messages per subject that
// the stream will retain.
MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"`
// MaxMsgSize is the maximum size of any single message in the stream.
MaxMsgSize int32 `json:"max_msg_size,omitempty"`
// Storage specifies the type of storage backend used for the stream
// (file or memory).
Storage StorageType `json:"storage"`
// Replicas is the number of stream replicas in clustered JetStream.
// Defaults to 1, maximum is 5.
Replicas int `json:"num_replicas"`
// NoAck is a flag to disable acknowledging messages received by this
// stream.
//
// If set to true, publish methods from the JetStream client will not
// work as expected, since they rely on acknowledgements. Core NATS
// publish methods should be used instead. Note that this will make
// message delivery less reliable.
NoAck bool `json:"no_ack,omitempty"`
// Duplicates is the window within which to track duplicate messages.
// If not set, server default is 2 minutes.
Duplicates time.Duration `json:"duplicate_window,omitempty"`
// Placement is used to declare where the stream should be placed via
// tags and/or an explicit cluster name.
Placement *Placement `json:"placement,omitempty"`
// Mirror defines the configuration for mirroring another stream.
Mirror *StreamSource `json:"mirror,omitempty"`
// Sources is a list of other streams this stream sources messages from.
Sources []*StreamSource `json:"sources,omitempty"`
// Sealed streams do not allow messages to be published or deleted via limits or API,
// sealed streams can not be unsealed via configuration update. Can only
// be set on already created streams via the Update API.
Sealed bool `json:"sealed,omitempty"`
// DenyDelete restricts the ability to delete messages from a stream via
// the API. Defaults to false.
DenyDelete bool `json:"deny_delete,omitempty"`
// DenyPurge restricts the ability to purge messages from a stream via
// the API. Defaults to false.
DenyPurge bool `json:"deny_purge,omitempty"`
// AllowRollup allows the use of the Nats-Rollup header to replace all
// contents of a stream, or subject in a stream, with a single new
// message.
AllowRollup bool `json:"allow_rollup_hdrs,omitempty"`
// Compression specifies the message storage compression algorithm.
// Defaults to NoCompression.
Compression StoreCompression `json:"compression"`
// FirstSeq is the initial sequence number of the first message in the
// stream.
FirstSeq uint64 `json:"first_seq,omitempty"`
// SubjectTransform allows applying a transformation to matching
// messages' subjects.
SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"`
// RePublish allows immediate republishing a message to the configured
// subject after it's stored.
RePublish *RePublish `json:"republish,omitempty"`
// AllowDirect enables direct access to individual messages using direct
// get API. Defaults to false.
AllowDirect bool `json:"allow_direct"`
// MirrorDirect enables direct access to individual messages from the
// origin stream using direct get API. Defaults to false.
MirrorDirect bool `json:"mirror_direct"`
// ConsumerLimits defines limits of certain values that consumers can
// set, defaults for those who don't set these settings
ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"`
// Metadata is a set of application-defined key-value pairs for
// associating metadata on the stream. This feature requires nats-server
// v2.10.0 or later.
Metadata map[string]string `json:"metadata,omitempty"`
// Template identifies the template that manages the Stream. DEPRECATED:
// This feature is no longer supported.
Template string `json:"template_owner,omitempty"`
}
// StreamSourceInfo shows information about an upstream stream
// source/mirror.
StreamSourceInfo struct {
// Name is the name of the stream that is being replicated.
Name string `json:"name"`
// Lag informs how many messages behind the source/mirror operation is.
// This will only show correctly if there is active communication
// with stream/mirror.
Lag uint64 `json:"lag"`
// Active informs when last the mirror or sourced stream had activity.
// Value will be -1 when there has been no activity.
Active time.Duration `json:"active"`
// FilterSubject is the subject filter defined for this source/mirror.
FilterSubject string `json:"filter_subject,omitempty"`
// SubjectTransforms is a list of subject transforms defined for this
// source/mirror.
SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
}
// StreamState is the state of a JetStream stream at the time of request.
StreamState struct {
// Msgs is the number of messages stored in the stream.
Msgs uint64 `json:"messages"`
// Bytes is the number of bytes stored in the stream.
Bytes uint64 `json:"bytes"`
// FirstSeq is the sequence number of the first message in the stream.
FirstSeq uint64 `json:"first_seq"`
// FirstTime is the timestamp of the first message in the stream.
FirstTime time.Time `json:"first_ts"`
// LastSeq is the sequence number of the last message in the stream.
LastSeq uint64 `json:"last_seq"`
// LastTime is the timestamp of the last message in the stream.
LastTime time.Time `json:"last_ts"`
// Consumers is the number of consumers on the stream.
Consumers int `json:"consumer_count"`
// Deleted is a list of sequence numbers that have been removed from the
// stream. This field will only be returned if the stream has been
// fetched with the DeletedDetails option.
Deleted []uint64 `json:"deleted"`
// NumDeleted is the number of messages that have been removed from the
// stream. Only deleted messages causing a gap in stream sequence numbers
// are counted. Messages deleted at the beginning or end of the stream
// are not counted.
NumDeleted int `json:"num_deleted"`
// NumSubjects is the number of unique subjects the stream has received
// messages on.
NumSubjects uint64 `json:"num_subjects"`
// Subjects is a map of subjects the stream has received messages on
// with message count per subject. This field will only be returned if
// the stream has been fetched with the SubjectFilter option.
Subjects map[string]uint64 `json:"subjects"`
}
// ClusterInfo shows information about the underlying set of servers that
// make up the stream or consumer.
ClusterInfo struct {
// Name is the name of the cluster.
Name string `json:"name,omitempty"`
// Leader is the server name of the RAFT leader.
Leader string `json:"leader,omitempty"`
// Replicas is the list of members of the RAFT cluster
Replicas []*PeerInfo `json:"replicas,omitempty"`
}
// PeerInfo shows information about the peers in the cluster that are
// supporting the stream or consumer.
PeerInfo struct {
// Name is the server name of the peer.
Name string `json:"name"`
// Current indicates if the peer is up to date and synchronized with the
// leader.
Current bool `json:"current"`
// Offline indicates if the peer is considered offline by the group.
Offline bool `json:"offline,omitempty"`
// Active it the duration since this peer was last seen.
Active time.Duration `json:"active"`
// Lag is the number of uncommitted operations this peer is behind the
// leader.
Lag uint64 `json:"lag,omitempty"`
}
// SubjectTransformConfig is for applying a subject transform (to matching
// messages) before doing anything else when a new message is received.
SubjectTransformConfig struct {
// Source is the subject pattern to match incoming messages against.
Source string `json:"src"`
// Destination is the subject pattern to remap the subject to.
Destination string `json:"dest"`
}
// RePublish is for republishing messages once committed to a stream. The
// original subject is remapped from the subject pattern to the destination
// pattern.
RePublish struct {
// Source is the subject pattern to match incoming messages against.
Source string `json:"src,omitempty"`
// Destination is the subject pattern to republish the subject to.
Destination string `json:"dest"`
// HeadersOnly is a flag to indicate that only the headers should be
// republished.
HeadersOnly bool `json:"headers_only,omitempty"`
}
// Placement is used to guide placement of streams in clustered JetStream.
Placement struct {
// Cluster is the name of the cluster to which the stream should be
// assigned.
Cluster string `json:"cluster"`
// Tags are used to match streams to servers in the cluster. A stream
// will be assigned to a server with a matching tag.
Tags []string `json:"tags,omitempty"`
}
// StreamSource dictates how streams can source from other streams.
StreamSource struct {
// Name is the name of the stream to source from.
Name string `json:"name"`
// OptStartSeq is the sequence number to start sourcing from.
OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
// OptStartTime is the timestamp of messages to start sourcing from.
OptStartTime *time.Time `json:"opt_start_time,omitempty"`
// FilterSubject is the subject filter used to only replicate messages
// with matching subjects.
FilterSubject string `json:"filter_subject,omitempty"`
// SubjectTransforms is a list of subject transforms to apply to
// matching messages.
//
// Subject transforms on sources and mirrors are also used as subject
// filters with optional transformations.
SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
// External is a configuration referencing a stream source in another
// account or JetStream domain.
External *ExternalStream `json:"external,omitempty"`
// Domain is used to configure a stream source in another JetStream
// domain. This setting will set the External field with the appropriate
// APIPrefix.
Domain string `json:"-"`
}
// ExternalStream allows you to qualify access to a stream source in another
// account.
ExternalStream struct {
// APIPrefix is the subject prefix that imports the other account/domain
// $JS.API.CONSUMER.> subjects.
APIPrefix string `json:"api"`
// DeliverPrefix is the delivery subject to use for the push consumer.
DeliverPrefix string `json:"deliver"`
}
// StreamConsumerLimits are the limits for a consumer on a stream. These can
// be overridden on a per consumer basis.
StreamConsumerLimits struct {
// InactiveThreshold is a duration which instructs the server to clean
// up the consumer if it has been inactive for the specified duration.
InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
// MaxAckPending is a maximum number of outstanding unacknowledged
// messages for a consumer.
MaxAckPending int `json:"max_ack_pending,omitempty"`
}
// DiscardPolicy determines how to proceed when limits of messages or bytes
// are reached.
DiscardPolicy int
// RetentionPolicy determines how messages in a stream are retained.
RetentionPolicy int
// StorageType determines how messages are stored for retention.
StorageType int
// StoreCompression determines how messages are compressed.
StoreCompression uint8
)
const (
// LimitsPolicy (default) means that messages are retained until any given
// limit is reached. This could be one of MaxMsgs, MaxBytes, or MaxAge.
LimitsPolicy RetentionPolicy = iota
// InterestPolicy specifies that when all known observables have
// acknowledged a message it can be removed.
InterestPolicy
// WorkQueuePolicy specifies that when the first worker or subscriber
// acknowledges the message it can be removed.
WorkQueuePolicy
)
const (
// DiscardOld will remove older messages to return to the limits. This is
// the default.
DiscardOld DiscardPolicy = iota
// DiscardNew will fail to store new messages once the limits are reached.
DiscardNew
)
const (
limitsPolicyString = "limits"
interestPolicyString = "interest"
workQueuePolicyString = "workqueue"
)
func (rp RetentionPolicy) String() string {
switch rp {
case LimitsPolicy:
return "Limits"
case InterestPolicy:
return "Interest"
case WorkQueuePolicy:
return "WorkQueue"
default:
return "Unknown Retention Policy"
}
}
func (rp RetentionPolicy) MarshalJSON() ([]byte, error) {
switch rp {
case LimitsPolicy:
return json.Marshal(limitsPolicyString)
case InterestPolicy:
return json.Marshal(interestPolicyString)
case WorkQueuePolicy:
return json.Marshal(workQueuePolicyString)
default:
return nil, fmt.Errorf("nats: can not marshal %v", rp)
}
}
func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString(limitsPolicyString):
*rp = LimitsPolicy
case jsonString(interestPolicyString):
*rp = InterestPolicy
case jsonString(workQueuePolicyString):
*rp = WorkQueuePolicy
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func (dp DiscardPolicy) String() string {
switch dp {
case DiscardOld:
return "DiscardOld"
case DiscardNew:
return "DiscardNew"
default:
return "Unknown Discard Policy"
}
}
func (dp DiscardPolicy) MarshalJSON() ([]byte, error) {
switch dp {
case DiscardOld:
return json.Marshal("old")
case DiscardNew:
return json.Marshal("new")
default:
return nil, fmt.Errorf("nats: can not marshal %v", dp)
}
}
func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error {
switch strings.ToLower(string(data)) {
case jsonString("old"):
*dp = DiscardOld
case jsonString("new"):
*dp = DiscardNew
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
const (
// FileStorage specifies on disk storage. It's the default.
FileStorage StorageType = iota
// MemoryStorage specifies in memory only.
MemoryStorage
)
const (
memoryStorageString = "memory"
fileStorageString = "file"
)
func (st StorageType) String() string {
caser := cases.Title(language.AmericanEnglish)
switch st {
case MemoryStorage:
return caser.String(memoryStorageString)
case FileStorage:
return caser.String(fileStorageString)
default:
return "Unknown Storage Type"
}
}
func (st StorageType) MarshalJSON() ([]byte, error) {
switch st {
case MemoryStorage:
return json.Marshal(memoryStorageString)
case FileStorage:
return json.Marshal(fileStorageString)
default:
return nil, fmt.Errorf("nats: can not marshal %v", st)
}
}
func (st *StorageType) UnmarshalJSON(data []byte) error {
switch string(data) {
case jsonString(memoryStorageString):
*st = MemoryStorage
case jsonString(fileStorageString):
*st = FileStorage
default:
return fmt.Errorf("nats: can not unmarshal %q", data)
}
return nil
}
func jsonString(s string) string {
return "\"" + s + "\""
}
const (
// NoCompression disables compression on the stream. This is the default.
NoCompression StoreCompression = iota
// S2Compression enables S2 compression on the stream.
S2Compression
)
func (alg StoreCompression) String() string {
switch alg {
case NoCompression:
return "None"
case S2Compression:
return "S2"
default:
return "Unknown StoreCompression"
}
}
func (alg StoreCompression) MarshalJSON() ([]byte, error) {
var str string
switch alg {
case S2Compression:
str = "s2"
case NoCompression:
str = "none"
default:
return nil, fmt.Errorf("unknown compression algorithm")
}
return json.Marshal(str)
}
func (alg *StoreCompression) UnmarshalJSON(b []byte) error {
var str string
if err := json.Unmarshal(b, &str); err != nil {
return err
}
switch str {
case "s2":
*alg = S2Compression
case "none":
*alg = NoCompression
default:
return fmt.Errorf("unknown compression algorithm")
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,245 +0,0 @@
// Copyright 2020-2023 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nats
import (
"errors"
"fmt"
)
var (
// API errors
// ErrJetStreamNotEnabled is an error returned when JetStream is not enabled for an account.
//
// Note: This error will not be returned in clustered mode, even if each
// server in the cluster does not have JetStream enabled. In clustered mode,
// requests will time out instead.
ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}}
// ErrJetStreamNotEnabledForAccount is an error returned when JetStream is not enabled for an account.
ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}}
// ErrStreamNotFound is an error returned when stream with given name does not exist.
ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}}
// ErrStreamNameAlreadyInUse is returned when a stream with given name already exists and has a different configuration.
ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}}
// ErrStreamSubjectTransformNotSupported is returned when the connected nats-server version does not support setting
// the stream subject transform. If this error is returned when executing AddStream(), the stream with invalid
// configuration was already created in the server.
ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
// ErrStreamSourceSubjectTransformNotSupported is returned when the connected nats-server version does not support setting
// the stream source subject transform. If this error is returned when executing AddStream(), the stream with invalid
// configuration was already created in the server.
ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
// ErrStreamSourceNotSupported is returned when the connected nats-server version does not support setting
// the stream sources. If this error is returned when executing AddStream(), the stream with invalid
// configuration was already created in the server.
ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"}
// ErrStreamSourceMultipleSubjectTransformsNotSupported is returned when the connected nats-server version does not support setting
// the stream sources. If this error is returned when executing AddStream(), the stream with invalid
// configuration was already created in the server.
ErrStreamSourceMultipleSubjectTransformsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject transforms not supported by nats-server"}
// ErrConsumerNotFound is an error returned when consumer with given name does not exist.
ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}}
// ErrMsgNotFound is returned when message with provided sequence number does npt exist.
ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}}
// ErrBadRequest is returned when invalid request is sent to JetStream API.
ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}}
// ErrDuplicateFilterSubjects is returned when both FilterSubject and FilterSubjects are specified when creating consumer.
ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}}
// ErrDuplicateFilterSubjects is returned when filter subjects overlap when creating consumer.
ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}}
// ErrEmptyFilter is returned when a filter in FilterSubjects is empty.
ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}}
// Client errors
// ErrConsumerNameAlreadyInUse is an error returned when consumer with given name already exists.
ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"}
// ErrConsumerNotActive is an error returned when consumer is not active.
ErrConsumerNotActive JetStreamError = &jsError{message: "consumer not active"}
// ErrInvalidJSAck is returned when JetStream ack from message publish is invalid.
ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"}
// ErrStreamConfigRequired is returned when empty stream configuration is supplied to add/update stream.
ErrStreamConfigRequired JetStreamError = &jsError{message: "stream configuration is required"}
// ErrStreamNameRequired is returned when the provided stream name is empty.
ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"}
// ErrConsumerNameRequired is returned when the provided consumer durable name is empty.
ErrConsumerNameRequired JetStreamError = &jsError{message: "consumer name is required"}
// ErrConsumerMultipleFilterSubjectsNotSupported is returned when the connected nats-server version does not support setting
// multiple filter subjects with filter_subjects field. If this error is returned when executing AddConsumer(), the consumer with invalid
// configuration was already created in the server.
ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"}
// ErrConsumerConfigRequired is returned when empty consumer consuguration is supplied to add/update consumer.
ErrConsumerConfigRequired JetStreamError = &jsError{message: "consumer configuration is required"}
// ErrPullSubscribeToPushConsumer is returned when attempting to use PullSubscribe on push consumer.
ErrPullSubscribeToPushConsumer JetStreamError = &jsError{message: "cannot pull subscribe to push based consumer"}
// ErrPullSubscribeRequired is returned when attempting to use subscribe methods not suitable for pull consumers for pull consumers.
ErrPullSubscribeRequired JetStreamError = &jsError{message: "must use pull subscribe to bind to pull based consumer"}
// ErrMsgAlreadyAckd is returned when attempting to acknowledge message more than once.
ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"}
// ErrNoStreamResponse is returned when there is no response from stream (e.g. no responders error).
ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"}
// ErrNotJSMessage is returned when attempting to get metadata from non JetStream message .
ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"}
// ErrInvalidStreamName is returned when the provided stream name is invalid (contains '.' or ' ').
ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"}
// ErrInvalidConsumerName is returned when the provided consumer name is invalid (contains '.' or ' ').
ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"}
// ErrInvalidFilterSubject is returned when the provided filter subject is invalid.
ErrInvalidFilterSubject JetStreamError = &jsError{message: "invalid filter subject"}
// ErrNoMatchingStream is returned when stream lookup by subject is unsuccessful.
ErrNoMatchingStream JetStreamError = &jsError{message: "no stream matches subject"}
// ErrSubjectMismatch is returned when the provided subject does not match consumer's filter subject.
ErrSubjectMismatch JetStreamError = &jsError{message: "subject does not match consumer"}
// ErrContextAndTimeout is returned when attempting to use both context and timeout.
ErrContextAndTimeout JetStreamError = &jsError{message: "context and timeout can not both be set"}
// ErrCantAckIfConsumerAckNone is returned when attempting to ack a message for consumer with AckNone policy set.
ErrCantAckIfConsumerAckNone JetStreamError = &jsError{message: "cannot acknowledge a message for a consumer with AckNone policy"}
// ErrConsumerDeleted is returned when attempting to send pull request to a consumer which does not exist
ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"}
// ErrConsumerLeadershipChanged is returned when pending requests are no longer valid after leadership has changed
ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "Leadership Changed"}
// ErrNoHeartbeat is returned when no heartbeat is received from server when sending requests with pull consumer.
ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"}
// ErrSubscriptionClosed is returned when attempting to send pull request to a closed subscription
ErrSubscriptionClosed JetStreamError = &jsError{message: "subscription closed"}
// DEPRECATED: ErrInvalidDurableName is no longer returned and will be removed in future releases.
// Use ErrInvalidConsumerName instead.
ErrInvalidDurableName = errors.New("nats: invalid durable name")
)
// Error code represents JetStream error codes returned by the API
type ErrorCode uint16
const (
JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039
JSErrCodeJetStreamNotEnabled ErrorCode = 10076
JSErrCodeInsufficientResourcesErr ErrorCode = 10023
JSErrCodeStreamNotFound ErrorCode = 10059
JSErrCodeStreamNameInUse ErrorCode = 10058
JSErrCodeConsumerNotFound ErrorCode = 10014
JSErrCodeConsumerNameExists ErrorCode = 10013
JSErrCodeConsumerAlreadyExists ErrorCode = 10105
JSErrCodeDuplicateFilterSubjects ErrorCode = 10136
JSErrCodeOverlappingFilterSubjects ErrorCode = 10138
JSErrCodeConsumerEmptyFilter ErrorCode = 10139
JSErrCodeMessageNotFound ErrorCode = 10037
JSErrCodeBadRequest ErrorCode = 10003
JSStreamInvalidConfig ErrorCode = 10052
JSErrCodeStreamWrongLastSequence ErrorCode = 10071
)
// APIError is included in all API responses if there was an error.
type APIError struct {
Code int `json:"code"`
ErrorCode ErrorCode `json:"err_code"`
Description string `json:"description,omitempty"`
}
// Error prints the JetStream API error code and description
func (e *APIError) Error() string {
return fmt.Sprintf("nats: %s", e.Description)
}
// APIError implements the JetStreamError interface.
func (e *APIError) APIError() *APIError {
return e
}
// Is matches against an APIError.
func (e *APIError) Is(err error) bool {
if e == nil {
return false
}
// Extract internal APIError to match against.
var aerr *APIError
ok := errors.As(err, &aerr)
if !ok {
return ok
}
return e.ErrorCode == aerr.ErrorCode
}
// JetStreamError is an error result that happens when using JetStream.
// In case of client-side error, `APIError()` returns nil
type JetStreamError interface {
APIError() *APIError
error
}
type jsError struct {
apiErr *APIError
message string
}
func (err *jsError) APIError() *APIError {
return err.apiErr
}
func (err *jsError) Error() string {
if err.apiErr != nil && err.apiErr.Description != "" {
return err.apiErr.Error()
}
return fmt.Sprintf("nats: %s", err.message)
}
func (err *jsError) Unwrap() error {
// Allow matching to embedded APIError in case there is one.
if err.apiErr == nil {
return nil
}
return err.apiErr
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More