-
-
-
-
- -
-
Software Modules:
-Currently, telecommunications giants and startups, publishing new software daily, slow delivery cycles and manual and time-consuming quality assurance processes make it difficult for integrators and service providers to compete. USP "Software Module Management" allows a containerized approach to the development of software for embedded devices, making it possible to drastically reduce the chance of error in software updates, it also facilitates the integration of third parties in a device, still keeping the firmware part isolated from Vendor.
-
-
-
-
-
---------------------------------------------------------------------------------------------------------------------------------------------------------
-
-
Bibliographic sources: MU-461.pdf, TR-369.html, USP Training Session SlidesTR-369.html
+
Documentation
+
Official Website
+
Official Documentation
diff --git a/agent/oktopus-stomp-obuspa.txt b/agent/oktopus-stomp-obuspa.txt
new file mode 100644
index 0000000..9afa288
--- /dev/null
+++ b/agent/oktopus-stomp-obuspa.txt
@@ -0,0 +1,59 @@
+##########################################################################################################
+#
+# This file contains a factory reset database in text format
+#
+# If no USP database exists when OB-USP-AGENT starts, then OB-USP-AGENT will create a database containing
+# the parameters specified in a text file located by the '-r' option.
+# Example:
+# obuspa -p -v 4 -r factory_reset_example.txt
+#
+# Each line of this file contains either a comment (denoted by '#' at the start of the line)
+# or a USP data model parameter and its factory reset value.
+# The parameter and value are separated by whitespace.
+# The value may optionally be enclosed in speech marks "" (this is the only way to specify an empty string)
+#
+##########################################################################################################
+
+Device.LocalAgent.EndpointID "oktopus-0-stomp"
+
+#
+# The following parameters will definitely need modifying
+#
+Device.LocalAgent.Controller.1.EndpointID "oktopusController"
+Device.STOMP.Connection.1.Host "127.0.0.1"
+Device.STOMP.Connection.1.Username "test"
+Device.STOMP.Connection.1.Password "password"
+
+#
+# The following parameters may be modified
+#
+Device.LocalAgent.MTP.1.Alias "cpe-1"
+Device.LocalAgent.MTP.1.Enable "true"
+Device.LocalAgent.MTP.1.Protocol "STOMP"
+Device.LocalAgent.MTP.1.STOMP.Reference "Device.STOMP.Connection.1"
+Device.LocalAgent.MTP.1.STOMP.Destination "oktopus/usp/v1/agent"
+Device.LocalAgent.Controller.1.Alias "cpe-1"
+Device.LocalAgent.Controller.1.Enable "true"
+Device.LocalAgent.Controller.1.AssignedRole "Device.LocalAgent.ControllerTrust.Role.1"
+Device.LocalAgent.Controller.1.PeriodicNotifInterval "300"
+Device.LocalAgent.Controller.1.PeriodicNotifTime "0001-01-01T00:00:00Z"
+Device.LocalAgent.Controller.1.USPNotifRetryMinimumWaitInterval "5"
+Device.LocalAgent.Controller.1.USPNotifRetryIntervalMultiplier "2000"
+Device.LocalAgent.Controller.1.ControllerCode ""
+Device.LocalAgent.Controller.1.MTP.1.Alias "cpe-1"
+Device.LocalAgent.Controller.1.MTP.1.Enable "true"
+Device.LocalAgent.Controller.1.MTP.1.Protocol "STOMP"
+Device.LocalAgent.Controller.1.MTP.1.STOMP.Reference "Device.STOMP.Connection.1"
+Device.LocalAgent.Controller.1.MTP.1.STOMP.Destination "controller-notify-dest""
+Device.STOMP.Connection.1.Alias "cpe-1"
+Device.STOMP.Connection.1.Enable "true"
+Device.STOMP.Connection.1.Port "61613"
+Device.STOMP.Connection.1.EnableEncryption "false"
+Device.STOMP.Connection.1.VirtualHost "/"
+Device.STOMP.Connection.1.EnableHeartbeats "true"
+Device.STOMP.Connection.1.OutgoingHeartbeat "30000"
+Device.STOMP.Connection.1.IncomingHeartbeat "300000"
+Device.STOMP.Connection.1.ServerRetryInitialInterval "60"
+Device.STOMP.Connection.1.ServerRetryIntervalMultiplier "2000"
+Device.STOMP.Connection.1.ServerRetryMaxInterval "30720"
+Internal.Reboot.Cause "LocalFactoryReset"
\ No newline at end of file
diff --git a/backend/services/controller/internal/api/api.go b/backend/services/controller/internal/api/api.go
index 404e900..d0b6898 100644
--- a/backend/services/controller/internal/api/api.go
+++ b/backend/services/controller/internal/api/api.go
@@ -17,37 +17,30 @@ import (
)
type Api struct {
- port string
- js jetstream.JetStream
- nc *nats.Conn
- bridge bridge.Bridge
- db db.Database
- kv jetstream.KeyValue
- ctx context.Context
- enterpise config.Enterprise
+ port string
+ js jetstream.JetStream
+ nc *nats.Conn
+ bridge bridge.Bridge
+ db db.Database
+ kv jetstream.KeyValue
+ ctx context.Context
}
const REQUEST_TIMEOUT = time.Second * 30
func NewApi(c *config.Config, js jetstream.JetStream, nc *nats.Conn, bridge bridge.Bridge, d db.Database, kv jetstream.KeyValue) Api {
return Api{
- port: c.RestApi.Port,
- js: js,
- nc: nc,
- ctx: c.RestApi.Ctx,
- bridge: bridge,
- db: d,
- kv: kv,
- enterpise: c.Enterprise,
+ port: c.RestApi.Port,
+ js: js,
+ nc: nc,
+ ctx: c.RestApi.Ctx,
+ bridge: bridge,
+ db: d,
+ kv: kv,
}
}
func (a *Api) StartApi() {
-
- if a.enterpise.SupportPassword != "" && a.enterpise.SupportEmail != "" {
- go registerEnterpriseSupport(a.enterpise.SupportEmail, a.enterpise.SupportPassword, a.db)
- }
-
r := mux.NewRouter()
authentication := r.PathPrefix("/api/auth").Subrouter()
authentication.HandleFunc("/login", a.generateToken).Methods("PUT")
@@ -57,13 +50,6 @@ func (a *Api) StartApi() {
authentication.HandleFunc("/password", a.changePassword).Methods("PUT")
authentication.HandleFunc("/admin/register", a.registerAdminUser).Methods("POST")
authentication.HandleFunc("/admin/exists", a.adminUserExists).Methods("GET")
- if a.enterpise.Enable {
- mapRoutes := r.PathPrefix("/api/map").Subrouter()
- mapRoutes.HandleFunc("", a.devicesLocation).Methods("GET")
- mapRoutes.Use(func(handler http.Handler) http.Handler {
- return middleware.Middleware(handler)
- })
- }
iot := r.PathPrefix("/api/device").Subrouter()
iot.HandleFunc("/alias", a.setDeviceAlias).Methods("PUT")
iot.HandleFunc("/auth", a.deviceAuth).Methods("GET", "POST", "DELETE")
@@ -74,7 +60,7 @@ func (a *Api) StartApi() {
iot.HandleFunc("/cwmp/{sn}/addObject", a.cwmpAddObjectMsg).Methods("PUT")
iot.HandleFunc("/cwmp/{sn}/deleteObject", a.cwmpDeleteObjectMsg).Methods("PUT")
iot.HandleFunc("", a.retrieveDevices).Methods("GET")
- iot.HandleFunc("/{id}", a.retrieveDevices).Methods("GET")
+ iot.HandleFunc("/filterOptions", a.filterOptions).Methods("GET")
iot.HandleFunc("/{sn}/{mtp}/get", a.deviceGetMsg).Methods("PUT")
iot.HandleFunc("/{sn}/{mtp}/add", a.deviceCreateMsg).Methods("PUT")
iot.HandleFunc("/{sn}/{mtp}/del", a.deviceDeleteMsg).Methods("PUT")
@@ -84,13 +70,6 @@ func (a *Api) StartApi() {
iot.HandleFunc("/{sn}/{mtp}/instances", a.deviceGetParameterInstances).Methods("PUT")
iot.HandleFunc("/{sn}/{mtp}/operate", a.deviceOperateMsg).Methods("PUT")
iot.HandleFunc("/{sn}/{mtp}/fw_update", a.deviceFwUpdate).Methods("PUT") //TODO: put it to work and generalize for usp and cwmp
- if a.enterpise.Enable {
- iot.HandleFunc("/{sn}/sitesurvey", a.deviceSiteSurvey).Methods("GET")
- iot.HandleFunc("/{sn}/connecteddevices", a.deviceConnectedDevices).Methods("GET")
- iot.HandleFunc("/{sn}/traceroute", a.deviceTraceRoute).Methods("GET", "PUT")
- iot.HandleFunc("/{sn}/speedtest", a.deviceSpeedTest).Methods("PUT")
- iot.HandleFunc("/{sn}/ping", a.devicePing).Methods("PUT", "GET")
- }
iot.HandleFunc("/{sn}/wifi", a.deviceWifi).Methods("PUT", "GET")
dash := r.PathPrefix("/api/info").Subrouter()
dash.HandleFunc("/vendors", a.vendorsInfo).Methods("GET")
@@ -131,32 +110,3 @@ func (a *Api) StartApi() {
}()
log.Println("Running REST API at port", a.port)
}
-
-func registerEnterpriseSupport(email, password string, d db.Database) {
-
- user := db.User{
- Email: email,
- Password: password,
- Name: "Enterprise Support",
- Level: db.AdminUser,
- }
-
- for {
- if err := user.HashPassword(password); err != nil {
- return
- }
-
- err := d.RegisterUser(user)
- if err != nil {
- if err == db.ErrorUserExists {
- log.Println("Enterprise support user already registered.")
- return
- }
- log.Println("Error to register enterprise support user:", err)
- time.Sleep(time.Second * 5)
- continue
- }
- log.Println("Enterprise support user registered successfully.")
- return
- }
-}
diff --git a/backend/services/controller/internal/api/device.go b/backend/services/controller/internal/api/device.go
index 527886c..a24485f 100644
--- a/backend/services/controller/internal/api/device.go
+++ b/backend/services/controller/internal/api/device.go
@@ -6,9 +6,11 @@ import (
"log"
"net/http"
"strconv"
+ "strings"
"github.com/leandrofars/oktopus/internal/bridge"
"github.com/leandrofars/oktopus/internal/db"
+ "github.com/leandrofars/oktopus/internal/entity"
local "github.com/leandrofars/oktopus/internal/nats"
"github.com/leandrofars/oktopus/internal/utils"
"github.com/nats-io/nats.go/jetstream"
@@ -16,6 +18,33 @@ import (
)
func (a *Api) retrieveDevices(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodDelete {
+ id := r.URL.Query().Get("id")
+ if id == "" {
+ w.WriteHeader(http.StatusBadRequest)
+ err := json.NewEncoder(w).Encode("No id provided")
+ if err != nil {
+ log.Println(err)
+ }
+ return
+ }
+
+ ids := strings.Split(id, ",")
+
+ msg, err := bridge.NatsReq[int64](local.NATS_ADAPTER_SUBJECT+"devices.delete", utils.Marshall(ids), w, a.nc)
+ if err != nil {
+ return
+ }
+
+ err = json.NewEncoder(w).Encode(map[string]interface{}{
+ "number_of_deleted_devices": msg.Msg,
+ })
+ if err != nil {
+ log.Println(err)
+ }
+ return
+ }
+
const PAGE_SIZE_LIMIT = 50
const PAGE_SIZE_DEFAULT = 20
@@ -33,9 +62,36 @@ func (a *Api) retrieveDevices(w http.ResponseWriter, r *http.Request) {
return
}
+ statusOrderFromUser := r.URL.Query().Get("statusOrder")
+ var statusOrder int
+ if statusOrderFromUser != "" {
+ if statusOrderFromUser == "asc" {
+ statusOrder = 1
+ } else if statusOrderFromUser == "desc" {
+ statusOrder = -1
+ } else {
+ w.WriteHeader(http.StatusBadRequest)
+ json.NewEncoder(w).Encode("Status order must be 'asc' or 'desc'")
+ return
+ }
+ } else {
+ statusOrder = 1
+ }
+
+ sort := bson.M{}
+ sort["status"] = statusOrder
+
+ version := r.URL.Query().Get("version")
+ vendor := r.URL.Query().Get("vendor")
+ productClass := r.URL.Query().Get("type")
+ alias := r.URL.Query().Get("alias")
+ model := r.URL.Query().Get("model")
+ status := r.URL.Query().Get("status")
+
// Get devices with pagination
page_n := r.URL.Query().Get("page_number")
page_s := r.URL.Query().Get("page_size")
+
var err error
var page_number int64
@@ -70,52 +126,69 @@ func (a *Api) retrieveDevices(w http.ResponseWriter, r *http.Request) {
page_size = PAGE_SIZE_DEFAULT
}
- total, err := getDeviceCount(w, a.nc)
- if err != nil {
- return
- }
-
skip := page_number * (page_size - 1)
- if total < page_size {
- skip = 0
+
+ filter := map[string]interface{}{
+ "status_order": statusOrder,
+ "limit": page_size,
+ "skip": skip,
}
- //TODO: fix status ordering
- statusOrder := r.URL.Query().Get("status")
- if statusOrder != "" {
- if statusOrder == "asc" {
- statusOrder = "1"
- } else if statusOrder == "desc" {
- statusOrder = "-1"
- } else {
+ if version != "" {
+ filter["version"] = version
+ }
+ if vendor != "" {
+ filter["vendor"] = vendor
+ }
+ if productClass != "" {
+ filter["productClass"] = productClass
+ }
+ if alias != "" {
+ filter["alias"] = alias
+ }
+ if model != "" {
+ filter["model"] = model
+ }
+ if status != "" {
+ fmtStatus, err := strconv.Atoi(status)
+ if err != nil {
w.WriteHeader(http.StatusBadRequest)
- json.NewEncoder(w).Encode("Status order must be 'asc' or 'desc'")
+ json.NewEncoder(w).Encode("Status must be an integer")
return
}
- }
-
- sort := bson.M{}
- sort["status"] = 1
-
- //TODO: Create filters
-
- filter := bson.A{
- //bson.M{"$match": filter},
- bson.M{"$sort": sort}, // shows online devices first
- bson.M{"$skip": skip},
- bson.M{"$limit": page_size},
+ filter["status"] = fmtStatus
}
devices, err := getDevices(w, filter, a.nc)
if err != nil {
+ log.Println("Error getting devices", err)
+ return
+ }
+
+ if devices.Total == 0 {
+ w.WriteHeader(http.StatusNotFound)
+ err := json.NewEncoder(w).Encode("No devices found")
+ if err != nil {
+ log.Println(err)
+ }
+ return
+ }
+
+ if skip >= devices.Total {
+ w.WriteHeader(http.StatusBadRequest)
+ err := json.NewEncoder(w).Encode("Page number is out of range")
+ if err != nil {
+ log.Println(err)
+ }
return
}
err = json.NewEncoder(w).Encode(map[string]interface{}{
- "pages": total / page_size,
+ "pages": devices.Total / page_size,
"page": page_number,
"size": page_size,
- "devices": devices,
+ "devices": devices.Devices,
+ "total": devices.Total,
})
if err != nil {
log.Println(err)
@@ -283,3 +356,14 @@ func (a *Api) setDeviceAlias(w http.ResponseWriter, r *http.Request) {
return
}
}
+
+func (a *Api) filterOptions(w http.ResponseWriter, r *http.Request) {
+
+ resp, err := bridge.NatsReq[entity.FilterOptions](local.NATS_ADAPTER_SUBJECT+"devices.filterOptions", nil, w, a.nc)
+ if err != nil {
+ return
+ }
+
+ w.WriteHeader(resp.Code)
+ w.Write(utils.Marshall(resp.Msg))
+}
diff --git a/backend/services/controller/internal/api/enterprise.go b/backend/services/controller/internal/api/enterprise.go
deleted file mode 100644
index 1e09d44..0000000
--- a/backend/services/controller/internal/api/enterprise.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package api
-
-import (
- "io"
- "net/http"
-
- "github.com/gorilla/mux"
- "github.com/leandrofars/oktopus/internal/bridge"
- "github.com/leandrofars/oktopus/internal/entity"
- "github.com/leandrofars/oktopus/internal/utils"
-)
-
-func (a *Api) getEnterpriseResource(
- resource string,
- action string,
- device *entity.Device,
- sn string,
- w http.ResponseWriter,
- body []byte,
- protocol, datamodel string,
-) error {
- model, err := cwmpGetDeviceModel(device, w)
- if err != nil {
- return err
- }
-
- err = bridge.NatsEnterpriseInteraction("enterprise.v1."+protocol+"."+datamodel+"."+model+"."+sn+"."+resource+"."+action, body, w, a.nc)
- return err
-}
-
-func (a *Api) getMapsResource(
- action string,
- w http.ResponseWriter,
- body []byte,
-) error {
-
- err := bridge.NatsEnterpriseInteraction("geolocation.v1."+action, body, w, a.nc)
- return err
-}
-
-func (a *Api) devicesLocation(w http.ResponseWriter, r *http.Request) {
- if r.Method == http.MethodGet {
- a.getMapsResource("get", w, []byte{})
- }
-}
-
-func (a *Api) deviceSiteSurvey(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- sn := vars["sn"]
-
- device, err := getDeviceInfo(w, sn, a.nc)
- if err != nil {
- return
- }
-
- if r.Method == http.MethodGet {
-
- if device.Cwmp == entity.Online {
- a.getEnterpriseResource("sitesurvey", "get", device, sn, w, []byte{}, "cwmp", "098")
- return
- }
-
- if device.Mqtt == entity.Online || device.Stomp == entity.Online || device.Websockets == entity.Online {
- w.WriteHeader(http.StatusNotImplemented)
- w.Write(utils.Marshall("This feature is only working with CWMP devices"))
- return
- }
-
- w.WriteHeader(http.StatusBadRequest)
- w.Write(utils.Marshall("Device is Offline"))
- }
-}
-
-func (a *Api) deviceConnectedDevices(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- sn := vars["sn"]
-
- device, err := getDeviceInfo(w, sn, a.nc)
- if err != nil {
- return
- }
-
- if r.Method == http.MethodGet {
-
- if device.Cwmp == entity.Online {
- a.getEnterpriseResource("connecteddevices", "get", device, sn, w, []byte{}, "cwmp", "098")
- return
- }
-
- if device.Mqtt == entity.Online || device.Stomp == entity.Online || device.Websockets == entity.Online {
- w.WriteHeader(http.StatusNotImplemented)
- w.Write(utils.Marshall("This feature is only working with CWMP devices"))
- return
- }
-
- w.WriteHeader(http.StatusBadRequest)
- w.Write(utils.Marshall("Device is Offline"))
- }
-}
-
-func (a *Api) deviceTraceRoute(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- sn := vars["sn"]
-
- device, err := getDeviceInfo(w, sn, a.nc)
- if err != nil {
- return
- }
-
- if r.Method == http.MethodGet {
- if device.Cwmp == entity.Online {
- w.WriteHeader(http.StatusNotImplemented)
- w.Write(utils.Marshall("Get traceroute configuration is not implemented yet"))
- }
- }
-
- if r.Method == http.MethodPut {
- if device.Cwmp == entity.Online {
- a.getEnterpriseResource("traceroute", "set", device, sn, w, []byte{}, "cwmp", "098")
- return
- }
- }
-
- if device.Mqtt == entity.Online || device.Stomp == entity.Online || device.Websockets == entity.Online {
- w.WriteHeader(http.StatusNotImplemented)
- w.Write(utils.Marshall("This feature is only working with CWMP devices"))
- return
- }
-
- w.WriteHeader(http.StatusBadRequest)
- w.Write(utils.Marshall("Device is Offline"))
-}
-
-func (a *Api) deviceSpeedTest(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- sn := vars["sn"]
-
- device, err := getDeviceInfo(w, sn, a.nc)
- if err != nil {
- return
- }
-
- payload, err := io.ReadAll(r.Body)
- if err != nil {
- w.WriteHeader(http.StatusBadRequest)
- w.Write(utils.Marshall("Error reading request body"))
- return
-
- }
-
- if device.Cwmp == entity.Online {
- a.getEnterpriseResource("speedTest", "set", device, sn, w, payload, "cwmp", "098")
- return
- }
-
- if device.Mqtt == entity.Online || device.Stomp == entity.Online || device.Websockets == entity.Online {
- w.WriteHeader(http.StatusNotImplemented)
- w.Write(utils.Marshall("This feature is only working with CWMP devices"))
- return
- }
-
- w.WriteHeader(http.StatusBadRequest)
- w.Write(utils.Marshall("Device is Offline"))
-}
-
-func (a *Api) devicePing(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- sn := vars["sn"]
-
- device, err := getDeviceInfo(w, sn, a.nc)
- if err != nil {
- return
- }
-
- if device.Cwmp != entity.Online {
- w.WriteHeader(http.StatusBadRequest)
- w.Write(utils.Marshall("Device is Offline"))
- }
-
- if r.Method == http.MethodGet {
- if device.Cwmp == entity.Online {
- a.getEnterpriseResource("ping", "get", device, sn, w, []byte{}, "cwmp", "098")
- return
- }
- } else {
- payload, err := io.ReadAll(r.Body)
- if err != nil {
- w.WriteHeader(http.StatusBadRequest)
- w.Write(utils.Marshall("Error reading request body"))
- return
-
- }
-
- if device.Cwmp == entity.Online {
- a.getEnterpriseResource("ping", "set", device, sn, w, payload, "cwmp", "098")
- return
- }
- }
-
- if device.Mqtt == entity.Online || device.Stomp == entity.Online || device.Websockets == entity.Online {
- w.WriteHeader(http.StatusNotImplemented)
- w.Write(utils.Marshall("This feature is only working with CWMP devices"))
- return
- }
-}
diff --git a/backend/services/controller/internal/api/user.go b/backend/services/controller/internal/api/user.go
index b67d4b5..3e31726 100644
--- a/backend/services/controller/internal/api/user.go
+++ b/backend/services/controller/internal/api/user.go
@@ -176,7 +176,7 @@ func (a *Api) registerAdminUser(w http.ResponseWriter, r *http.Request) {
utils.MarshallEncoder(err, w)
}
- if !adminUserExists(users, a.enterpise.SupportEmail) {
+ if !adminUserExists(users) {
var user db.User
err = json.NewDecoder(r.Body).Decode(&user)
if err != nil {
@@ -235,14 +235,14 @@ func (a *Api) registerAdminUser(w http.ResponseWriter, r *http.Request) {
}
}
-func adminUserExists(users []map[string]interface{}, supportEmail string) bool {
+func adminUserExists(users []map[string]interface{}) bool {
if len(users) == 0 {
return false
}
for _, x := range users {
- if db.UserLevels(x["level"].(int32)) == db.AdminUser && x["email"].(string) != supportEmail {
+ if db.UserLevels(x["level"].(int32)) == db.AdminUser {
return true
}
}
@@ -257,7 +257,7 @@ func (a *Api) adminUserExists(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
return
}
- adminExits := adminUserExists(users, a.enterpise.SupportEmail)
+ adminExits := adminUserExists(users)
json.NewEncoder(w).Encode(adminExits)
return
}
diff --git a/backend/services/controller/internal/api/utils.go b/backend/services/controller/internal/api/utils.go
index 7a81f88..7df48aa 100644
--- a/backend/services/controller/internal/api/utils.go
+++ b/backend/services/controller/internal/api/utils.go
@@ -10,7 +10,6 @@ import (
local "github.com/leandrofars/oktopus/internal/nats"
"github.com/leandrofars/oktopus/internal/utils"
"github.com/nats-io/nats.go"
- "go.mongodb.org/mongo-driver/bson/primitive"
)
var errInvalidMtp = errors.New("Invalid MTP, valid options are: mqtt, ws, stomp")
@@ -105,8 +104,8 @@ func getDeviceCount(w http.ResponseWriter, nc *nats.Conn) (int64, error) {
return msg.Msg, err
}
-func getDevices(w http.ResponseWriter, filter primitive.A, nc *nats.Conn) (*[]entity.Device, error) {
- msg, err := bridge.NatsReq[[]entity.Device](
+func getDevices(w http.ResponseWriter, filter map[string]interface{}, nc *nats.Conn) (*entity.DevicesList, error) {
+ msg, err := bridge.NatsReq[entity.DevicesList](
local.NATS_ADAPTER_SUBJECT+"devices.retrieve",
utils.Marshall(filter),
w,
diff --git a/backend/services/controller/internal/api/wifi.go b/backend/services/controller/internal/api/wifi.go
index e6b2213..e1de563 100644
--- a/backend/services/controller/internal/api/wifi.go
+++ b/backend/services/controller/internal/api/wifi.go
@@ -1,7 +1,6 @@
package api
import (
- "io"
"log"
"net/http"
"strings"
@@ -213,11 +212,6 @@ func (a *Api) deviceWifi(w http.ResponseWriter, r *http.Request) {
if device.Cwmp == entity.Online {
- if a.enterpise.Enable {
- a.getEnterpriseResource("wifi", "get", device, sn, w, []byte{}, "cwmp", "098")
- return
- }
-
var (
NUMBER_OF_WIFI_PARAMS_TO_GET = 5
)
@@ -347,17 +341,6 @@ func (a *Api) deviceWifi(w http.ResponseWriter, r *http.Request) {
if device.Cwmp == entity.Online {
- if a.enterpise.Enable {
- payload, err := io.ReadAll(r.Body)
- if err != nil {
- w.WriteHeader(http.StatusInternalServerError)
- w.Write(utils.Marshall(err.Error()))
- return
- }
- a.getEnterpriseResource("wifi", "set", device, sn, w, payload, "cwmp", "098")
- return
- }
-
var body []WiFi
err := utils.MarshallDecoder(&body, r.Body)
diff --git a/backend/services/controller/internal/bridge/bridge.go b/backend/services/controller/internal/bridge/bridge.go
index 16f1b43..905cb80 100644
--- a/backend/services/controller/internal/bridge/bridge.go
+++ b/backend/services/controller/internal/bridge/bridge.go
@@ -153,7 +153,6 @@ func NatsReq[T entity.DataType](
err = json.Unmarshal(msg.Data, &answer)
if err != nil {
-
var errMsg *entity.MsgAnswer[*string]
err = json.Unmarshal(msg.Data, &errMsg)
diff --git a/backend/services/controller/internal/config/config.go b/backend/services/controller/internal/config/config.go
index e90dc5e..a67c563 100644
--- a/backend/services/controller/internal/config/config.go
+++ b/backend/services/controller/internal/config/config.go
@@ -30,17 +30,10 @@ type RestApi struct {
Ctx context.Context
}
-type Enterprise struct {
- Enable bool
- SupportPassword string
- SupportEmail string
-}
-
type Config struct {
- RestApi RestApi
- Nats Nats
- Mongo Mongo
- Enterprise Enterprise
+ RestApi RestApi
+ Nats Nats
+ Mongo Mongo
}
type Tls struct {
@@ -62,9 +55,6 @@ func NewConfig() *Config {
serverCA := flag.String("server_ca", lookupEnvOrString("SERVER_CA", "rootCA.pem"), "server CA file to TLS connection")
flApiPort := flag.String("api_port", lookupEnvOrString("REST_API_PORT", "8000"), "Rest api port")
mongoUri := flag.String("mongo_uri", lookupEnvOrString("MONGO_URI", "mongodb://localhost:27017"), "uri for mongodb server")
- enterpise := flag.Bool("enterprise", lookupEnvOrBool("ENTERPRISE", false), "enterprise version enable")
- enterprise_support_password := flag.String("enterprise_support_password", lookupEnvOrString("ENTERPRISE_SUPPORT_PASSWORD", ""), "enterprise support password")
- enterpise_support_email := flag.String("enterprise_support_email", lookupEnvOrString("ENTERPRISE_SUPPORT_EMAIL", ""), "enterprise support email")
flHelp := flag.Bool("help", false, "Help")
/*
@@ -103,11 +93,6 @@ func NewConfig() *Config {
Uri: *mongoUri,
Ctx: ctx,
},
- Enterprise: Enterprise{
- Enable: *enterpise,
- SupportPassword: *enterprise_support_password,
- SupportEmail: *enterpise_support_email,
- },
}
}
diff --git a/backend/services/controller/internal/entity/device.go b/backend/services/controller/internal/entity/device.go
index 12948a7..840d045 100644
--- a/backend/services/controller/internal/entity/device.go
+++ b/backend/services/controller/internal/entity/device.go
@@ -29,3 +29,15 @@ type StatusCount struct {
Status int `bson:"_id" json:"status"`
Count int `bson:"count" json:"count"`
}
+
+type DevicesList struct {
+ Devices []Device `json:"devices" bson:"documents"`
+ Total int64 `json:"total"`
+}
+
+type FilterOptions struct {
+ Models []string `json:"models"`
+ ProductClasses []string `json:"productClasses"`
+ Vendors []string `json:"vendors"`
+ Versions []string `json:"versions"`
+}
diff --git a/backend/services/controller/internal/entity/msg.go b/backend/services/controller/internal/entity/msg.go
index 768e4bb..760cfd9 100644
--- a/backend/services/controller/internal/entity/msg.go
+++ b/backend/services/controller/internal/entity/msg.go
@@ -3,7 +3,7 @@ package entity
import "time"
type DataType interface {
- []map[string]interface{} | *string | Device | int64 | []Device | []VendorsCount | []ProductClassCount | []StatusCount | time.Duration | []byte
+ []map[string]interface{} | *string | Device | int64 | []Device | []VendorsCount | []ProductClassCount | []StatusCount | time.Duration | []byte | []string | FilterOptions | DevicesList
}
type MsgAnswer[T DataType] struct {
diff --git a/backend/services/mtp/ws/internal/usp_message/usp-msg-1-2.pb.go b/backend/services/controller/internal/usp/usp_msg/usp-msg-1-3.pb.go
old mode 100755
new mode 100644
similarity index 74%
rename from backend/services/mtp/ws/internal/usp_message/usp-msg-1-2.pb.go
rename to backend/services/controller/internal/usp/usp_msg/usp-msg-1-3.pb.go
index e63ed42..fd8ae56
--- a/backend/services/mtp/ws/internal/usp_message/usp-msg-1-2.pb.go
+++ b/backend/services/controller/internal/usp/usp_msg/usp-msg-1-3.pb.go
@@ -1,13 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.31.0
// protoc v3.21.12
-// source: usp-msg-1-2.proto
+// source: usp-msg-1-3.proto
//**************************************************************************
// TR-369 USP Message Protocol Buffer Schema
//
-// Copyright (c) 2017-2018, Broadband Forum
+// Copyright (c) 2017-2022, Broadband Forum
//
// The undersigned members have elected to grant the copyright to
// their contributed material used in this software:
@@ -51,13 +51,8 @@
// Any moral rights which are necessary to exercise under the above
// license grant are also deemed granted under this license.
//
-// | Version | Name | Date |
-// | TR-369 1.0.0 | User Services Platform | APR, 2018 |
-// | TR-369 1.0.1 | User Services Platform | JUN, 2018 |
-// | TR-369 1.0.2 | User Services Platform | OCT, 2018 |
-// | TR-369 1.1 | User Services Platform | SEP, 2019 |
-//
-// BBF software release registry: http://www.broadband-forum.org/software
+// BBF software release registry:
+// https://www.broadband-forum.org/software-releases
//**************************************************************************
package usp_msg
@@ -98,6 +93,10 @@ const (
Header_NOTIFY_RESP Header_MsgType = 16
Header_GET_SUPPORTED_PROTO Header_MsgType = 17
Header_GET_SUPPORTED_PROTO_RESP Header_MsgType = 18
+ Header_REGISTER Header_MsgType = 19
+ Header_REGISTER_RESP Header_MsgType = 20
+ Header_DEREGISTER Header_MsgType = 21
+ Header_DEREGISTER_RESP Header_MsgType = 22
)
// Enum value maps for Header_MsgType.
@@ -122,6 +121,10 @@ var (
16: "NOTIFY_RESP",
17: "GET_SUPPORTED_PROTO",
18: "GET_SUPPORTED_PROTO_RESP",
+ 19: "REGISTER",
+ 20: "REGISTER_RESP",
+ 21: "DEREGISTER",
+ 22: "DEREGISTER_RESP",
}
Header_MsgType_value = map[string]int32{
"ERROR": 0,
@@ -143,6 +146,10 @@ var (
"NOTIFY_RESP": 16,
"GET_SUPPORTED_PROTO": 17,
"GET_SUPPORTED_PROTO_RESP": 18,
+ "REGISTER": 19,
+ "REGISTER_RESP": 20,
+ "DEREGISTER": 21,
+ "DEREGISTER_RESP": 22,
}
)
@@ -157,11 +164,11 @@ func (x Header_MsgType) String() string {
}
func (Header_MsgType) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_msg_1_2_proto_enumTypes[0].Descriptor()
+ return file_usp_msg_1_3_proto_enumTypes[0].Descriptor()
}
func (Header_MsgType) Type() protoreflect.EnumType {
- return &file_usp_msg_1_2_proto_enumTypes[0]
+ return &file_usp_msg_1_3_proto_enumTypes[0]
}
func (x Header_MsgType) Number() protoreflect.EnumNumber {
@@ -170,7 +177,7 @@ func (x Header_MsgType) Number() protoreflect.EnumNumber {
// Deprecated: Use Header_MsgType.Descriptor instead.
func (Header_MsgType) EnumDescriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{1, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{1, 0}
}
type GetSupportedDMResp_ParamAccessType int32
@@ -206,11 +213,11 @@ func (x GetSupportedDMResp_ParamAccessType) String() string {
}
func (GetSupportedDMResp_ParamAccessType) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_msg_1_2_proto_enumTypes[1].Descriptor()
+ return file_usp_msg_1_3_proto_enumTypes[1].Descriptor()
}
func (GetSupportedDMResp_ParamAccessType) Type() protoreflect.EnumType {
- return &file_usp_msg_1_2_proto_enumTypes[1]
+ return &file_usp_msg_1_3_proto_enumTypes[1]
}
func (x GetSupportedDMResp_ParamAccessType) Number() protoreflect.EnumNumber {
@@ -219,7 +226,7 @@ func (x GetSupportedDMResp_ParamAccessType) Number() protoreflect.EnumNumber {
// Deprecated: Use GetSupportedDMResp_ParamAccessType.Descriptor instead.
func (GetSupportedDMResp_ParamAccessType) EnumDescriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 0}
}
type GetSupportedDMResp_ObjAccessType int32
@@ -258,11 +265,11 @@ func (x GetSupportedDMResp_ObjAccessType) String() string {
}
func (GetSupportedDMResp_ObjAccessType) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_msg_1_2_proto_enumTypes[2].Descriptor()
+ return file_usp_msg_1_3_proto_enumTypes[2].Descriptor()
}
func (GetSupportedDMResp_ObjAccessType) Type() protoreflect.EnumType {
- return &file_usp_msg_1_2_proto_enumTypes[2]
+ return &file_usp_msg_1_3_proto_enumTypes[2]
}
func (x GetSupportedDMResp_ObjAccessType) Number() protoreflect.EnumNumber {
@@ -271,7 +278,7 @@ func (x GetSupportedDMResp_ObjAccessType) Number() protoreflect.EnumNumber {
// Deprecated: Use GetSupportedDMResp_ObjAccessType.Descriptor instead.
func (GetSupportedDMResp_ObjAccessType) EnumDescriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 1}
}
type GetSupportedDMResp_ParamValueType int32
@@ -331,11 +338,11 @@ func (x GetSupportedDMResp_ParamValueType) String() string {
}
func (GetSupportedDMResp_ParamValueType) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_msg_1_2_proto_enumTypes[3].Descriptor()
+ return file_usp_msg_1_3_proto_enumTypes[3].Descriptor()
}
func (GetSupportedDMResp_ParamValueType) Type() protoreflect.EnumType {
- return &file_usp_msg_1_2_proto_enumTypes[3]
+ return &file_usp_msg_1_3_proto_enumTypes[3]
}
func (x GetSupportedDMResp_ParamValueType) Number() protoreflect.EnumNumber {
@@ -344,7 +351,7 @@ func (x GetSupportedDMResp_ParamValueType) Number() protoreflect.EnumNumber {
// Deprecated: Use GetSupportedDMResp_ParamValueType.Descriptor instead.
func (GetSupportedDMResp_ParamValueType) EnumDescriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 2}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 2}
}
type GetSupportedDMResp_ValueChangeType int32
@@ -380,11 +387,11 @@ func (x GetSupportedDMResp_ValueChangeType) String() string {
}
func (GetSupportedDMResp_ValueChangeType) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_msg_1_2_proto_enumTypes[4].Descriptor()
+ return file_usp_msg_1_3_proto_enumTypes[4].Descriptor()
}
func (GetSupportedDMResp_ValueChangeType) Type() protoreflect.EnumType {
- return &file_usp_msg_1_2_proto_enumTypes[4]
+ return &file_usp_msg_1_3_proto_enumTypes[4]
}
func (x GetSupportedDMResp_ValueChangeType) Number() protoreflect.EnumNumber {
@@ -393,7 +400,7 @@ func (x GetSupportedDMResp_ValueChangeType) Number() protoreflect.EnumNumber {
// Deprecated: Use GetSupportedDMResp_ValueChangeType.Descriptor instead.
func (GetSupportedDMResp_ValueChangeType) EnumDescriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 3}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 3}
}
type GetSupportedDMResp_CmdType int32
@@ -429,11 +436,11 @@ func (x GetSupportedDMResp_CmdType) String() string {
}
func (GetSupportedDMResp_CmdType) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_msg_1_2_proto_enumTypes[5].Descriptor()
+ return file_usp_msg_1_3_proto_enumTypes[5].Descriptor()
}
func (GetSupportedDMResp_CmdType) Type() protoreflect.EnumType {
- return &file_usp_msg_1_2_proto_enumTypes[5]
+ return &file_usp_msg_1_3_proto_enumTypes[5]
}
func (x GetSupportedDMResp_CmdType) Number() protoreflect.EnumNumber {
@@ -442,7 +449,7 @@ func (x GetSupportedDMResp_CmdType) Number() protoreflect.EnumNumber {
// Deprecated: Use GetSupportedDMResp_CmdType.Descriptor instead.
func (GetSupportedDMResp_CmdType) EnumDescriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 4}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 4}
}
type Msg struct {
@@ -457,7 +464,7 @@ type Msg struct {
func (x *Msg) Reset() {
*x = Msg{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[0]
+ mi := &file_usp_msg_1_3_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -470,7 +477,7 @@ func (x *Msg) String() string {
func (*Msg) ProtoMessage() {}
func (x *Msg) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[0]
+ mi := &file_usp_msg_1_3_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -483,7 +490,7 @@ func (x *Msg) ProtoReflect() protoreflect.Message {
// Deprecated: Use Msg.ProtoReflect.Descriptor instead.
func (*Msg) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{0}
}
func (x *Msg) GetHeader() *Header {
@@ -512,7 +519,7 @@ type Header struct {
func (x *Header) Reset() {
*x = Header{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[1]
+ mi := &file_usp_msg_1_3_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -525,7 +532,7 @@ func (x *Header) String() string {
func (*Header) ProtoMessage() {}
func (x *Header) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[1]
+ mi := &file_usp_msg_1_3_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -538,7 +545,7 @@ func (x *Header) ProtoReflect() protoreflect.Message {
// Deprecated: Use Header.ProtoReflect.Descriptor instead.
func (*Header) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{1}
}
func (x *Header) GetMsgId() string {
@@ -571,7 +578,7 @@ type Body struct {
func (x *Body) Reset() {
*x = Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[2]
+ mi := &file_usp_msg_1_3_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -584,7 +591,7 @@ func (x *Body) String() string {
func (*Body) ProtoMessage() {}
func (x *Body) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[2]
+ mi := &file_usp_msg_1_3_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -597,7 +604,7 @@ func (x *Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use Body.ProtoReflect.Descriptor instead.
func (*Body) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{2}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{2}
}
func (m *Body) GetMsgBody() isBody_MsgBody {
@@ -666,13 +673,15 @@ type Request struct {
// *Request_Operate
// *Request_Notify
// *Request_GetSupportedProtocol
+ // *Request_Register
+ // *Request_Deregister
ReqType isRequest_ReqType `protobuf_oneof:"req_type"`
}
func (x *Request) Reset() {
*x = Request{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[3]
+ mi := &file_usp_msg_1_3_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -685,7 +694,7 @@ func (x *Request) String() string {
func (*Request) ProtoMessage() {}
func (x *Request) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[3]
+ mi := &file_usp_msg_1_3_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -698,7 +707,7 @@ func (x *Request) ProtoReflect() protoreflect.Message {
// Deprecated: Use Request.ProtoReflect.Descriptor instead.
func (*Request) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{3}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{3}
}
func (m *Request) GetReqType() isRequest_ReqType {
@@ -771,6 +780,20 @@ func (x *Request) GetGetSupportedProtocol() *GetSupportedProtocol {
return nil
}
+func (x *Request) GetRegister() *Register {
+ if x, ok := x.GetReqType().(*Request_Register); ok {
+ return x.Register
+ }
+ return nil
+}
+
+func (x *Request) GetDeregister() *Deregister {
+ if x, ok := x.GetReqType().(*Request_Deregister); ok {
+ return x.Deregister
+ }
+ return nil
+}
+
type isRequest_ReqType interface {
isRequest_ReqType()
}
@@ -811,6 +834,14 @@ type Request_GetSupportedProtocol struct {
GetSupportedProtocol *GetSupportedProtocol `protobuf:"bytes,9,opt,name=get_supported_protocol,json=getSupportedProtocol,proto3,oneof"`
}
+type Request_Register struct {
+ Register *Register `protobuf:"bytes,10,opt,name=register,proto3,oneof"`
+}
+
+type Request_Deregister struct {
+ Deregister *Deregister `protobuf:"bytes,11,opt,name=deregister,proto3,oneof"`
+}
+
func (*Request_Get) isRequest_ReqType() {}
func (*Request_GetSupportedDm) isRequest_ReqType() {}
@@ -829,6 +860,10 @@ func (*Request_Notify) isRequest_ReqType() {}
func (*Request_GetSupportedProtocol) isRequest_ReqType() {}
+func (*Request_Register) isRequest_ReqType() {}
+
+func (*Request_Deregister) isRequest_ReqType() {}
+
type Response struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -845,13 +880,15 @@ type Response struct {
// *Response_OperateResp
// *Response_NotifyResp
// *Response_GetSupportedProtocolResp
+ // *Response_RegisterResp
+ // *Response_DeregisterResp
RespType isResponse_RespType `protobuf_oneof:"resp_type"`
}
func (x *Response) Reset() {
*x = Response{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[4]
+ mi := &file_usp_msg_1_3_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -864,7 +901,7 @@ func (x *Response) String() string {
func (*Response) ProtoMessage() {}
func (x *Response) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[4]
+ mi := &file_usp_msg_1_3_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -877,7 +914,7 @@ func (x *Response) ProtoReflect() protoreflect.Message {
// Deprecated: Use Response.ProtoReflect.Descriptor instead.
func (*Response) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{4}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{4}
}
func (m *Response) GetRespType() isResponse_RespType {
@@ -950,6 +987,20 @@ func (x *Response) GetGetSupportedProtocolResp() *GetSupportedProtocolResp {
return nil
}
+func (x *Response) GetRegisterResp() *RegisterResp {
+ if x, ok := x.GetRespType().(*Response_RegisterResp); ok {
+ return x.RegisterResp
+ }
+ return nil
+}
+
+func (x *Response) GetDeregisterResp() *DeregisterResp {
+ if x, ok := x.GetRespType().(*Response_DeregisterResp); ok {
+ return x.DeregisterResp
+ }
+ return nil
+}
+
type isResponse_RespType interface {
isResponse_RespType()
}
@@ -990,6 +1041,14 @@ type Response_GetSupportedProtocolResp struct {
GetSupportedProtocolResp *GetSupportedProtocolResp `protobuf:"bytes,9,opt,name=get_supported_protocol_resp,json=getSupportedProtocolResp,proto3,oneof"`
}
+type Response_RegisterResp struct {
+ RegisterResp *RegisterResp `protobuf:"bytes,10,opt,name=register_resp,json=registerResp,proto3,oneof"`
+}
+
+type Response_DeregisterResp struct {
+ DeregisterResp *DeregisterResp `protobuf:"bytes,11,opt,name=deregister_resp,json=deregisterResp,proto3,oneof"`
+}
+
func (*Response_GetResp) isResponse_RespType() {}
func (*Response_GetSupportedDmResp) isResponse_RespType() {}
@@ -1008,6 +1067,10 @@ func (*Response_NotifyResp) isResponse_RespType() {}
func (*Response_GetSupportedProtocolResp) isResponse_RespType() {}
+func (*Response_RegisterResp) isResponse_RespType() {}
+
+func (*Response_DeregisterResp) isResponse_RespType() {}
+
type Error struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1021,7 +1084,7 @@ type Error struct {
func (x *Error) Reset() {
*x = Error{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[5]
+ mi := &file_usp_msg_1_3_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1034,7 +1097,7 @@ func (x *Error) String() string {
func (*Error) ProtoMessage() {}
func (x *Error) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[5]
+ mi := &file_usp_msg_1_3_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1047,7 +1110,7 @@ func (x *Error) ProtoReflect() protoreflect.Message {
// Deprecated: Use Error.ProtoReflect.Descriptor instead.
func (*Error) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{5}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{5}
}
func (x *Error) GetErrCode() uint32 {
@@ -1083,7 +1146,7 @@ type Get struct {
func (x *Get) Reset() {
*x = Get{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[6]
+ mi := &file_usp_msg_1_3_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1096,7 +1159,7 @@ func (x *Get) String() string {
func (*Get) ProtoMessage() {}
func (x *Get) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[6]
+ mi := &file_usp_msg_1_3_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1109,7 +1172,7 @@ func (x *Get) ProtoReflect() protoreflect.Message {
// Deprecated: Use Get.ProtoReflect.Descriptor instead.
func (*Get) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{6}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{6}
}
func (x *Get) GetParamPaths() []string {
@@ -1137,7 +1200,7 @@ type GetResp struct {
func (x *GetResp) Reset() {
*x = GetResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[7]
+ mi := &file_usp_msg_1_3_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1150,7 +1213,7 @@ func (x *GetResp) String() string {
func (*GetResp) ProtoMessage() {}
func (x *GetResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[7]
+ mi := &file_usp_msg_1_3_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1163,7 +1226,7 @@ func (x *GetResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetResp.ProtoReflect.Descriptor instead.
func (*GetResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{7}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{7}
}
func (x *GetResp) GetReqPathResults() []*GetResp_RequestedPathResult {
@@ -1188,7 +1251,7 @@ type GetSupportedDM struct {
func (x *GetSupportedDM) Reset() {
*x = GetSupportedDM{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[8]
+ mi := &file_usp_msg_1_3_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1201,7 +1264,7 @@ func (x *GetSupportedDM) String() string {
func (*GetSupportedDM) ProtoMessage() {}
func (x *GetSupportedDM) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[8]
+ mi := &file_usp_msg_1_3_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1214,7 +1277,7 @@ func (x *GetSupportedDM) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSupportedDM.ProtoReflect.Descriptor instead.
func (*GetSupportedDM) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{8}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{8}
}
func (x *GetSupportedDM) GetObjPaths() []string {
@@ -1263,7 +1326,7 @@ type GetSupportedDMResp struct {
func (x *GetSupportedDMResp) Reset() {
*x = GetSupportedDMResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[9]
+ mi := &file_usp_msg_1_3_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1276,7 +1339,7 @@ func (x *GetSupportedDMResp) String() string {
func (*GetSupportedDMResp) ProtoMessage() {}
func (x *GetSupportedDMResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[9]
+ mi := &file_usp_msg_1_3_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1289,7 +1352,7 @@ func (x *GetSupportedDMResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSupportedDMResp.ProtoReflect.Descriptor instead.
func (*GetSupportedDMResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9}
}
func (x *GetSupportedDMResp) GetReqObjResults() []*GetSupportedDMResp_RequestedObjectResult {
@@ -1311,7 +1374,7 @@ type GetInstances struct {
func (x *GetInstances) Reset() {
*x = GetInstances{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[10]
+ mi := &file_usp_msg_1_3_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1324,7 +1387,7 @@ func (x *GetInstances) String() string {
func (*GetInstances) ProtoMessage() {}
func (x *GetInstances) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[10]
+ mi := &file_usp_msg_1_3_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1337,7 +1400,7 @@ func (x *GetInstances) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetInstances.ProtoReflect.Descriptor instead.
func (*GetInstances) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{10}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{10}
}
func (x *GetInstances) GetObjPaths() []string {
@@ -1365,7 +1428,7 @@ type GetInstancesResp struct {
func (x *GetInstancesResp) Reset() {
*x = GetInstancesResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[11]
+ mi := &file_usp_msg_1_3_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1378,7 +1441,7 @@ func (x *GetInstancesResp) String() string {
func (*GetInstancesResp) ProtoMessage() {}
func (x *GetInstancesResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[11]
+ mi := &file_usp_msg_1_3_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1391,7 +1454,7 @@ func (x *GetInstancesResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetInstancesResp.ProtoReflect.Descriptor instead.
func (*GetInstancesResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{11}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{11}
}
func (x *GetInstancesResp) GetReqPathResults() []*GetInstancesResp_RequestedPathResult {
@@ -1412,7 +1475,7 @@ type GetSupportedProtocol struct {
func (x *GetSupportedProtocol) Reset() {
*x = GetSupportedProtocol{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[12]
+ mi := &file_usp_msg_1_3_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1425,7 +1488,7 @@ func (x *GetSupportedProtocol) String() string {
func (*GetSupportedProtocol) ProtoMessage() {}
func (x *GetSupportedProtocol) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[12]
+ mi := &file_usp_msg_1_3_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1438,7 +1501,7 @@ func (x *GetSupportedProtocol) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSupportedProtocol.ProtoReflect.Descriptor instead.
func (*GetSupportedProtocol) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{12}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{12}
}
func (x *GetSupportedProtocol) GetControllerSupportedProtocolVersions() string {
@@ -1459,7 +1522,7 @@ type GetSupportedProtocolResp struct {
func (x *GetSupportedProtocolResp) Reset() {
*x = GetSupportedProtocolResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[13]
+ mi := &file_usp_msg_1_3_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1472,7 +1535,7 @@ func (x *GetSupportedProtocolResp) String() string {
func (*GetSupportedProtocolResp) ProtoMessage() {}
func (x *GetSupportedProtocolResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[13]
+ mi := &file_usp_msg_1_3_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1485,7 +1548,7 @@ func (x *GetSupportedProtocolResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetSupportedProtocolResp.ProtoReflect.Descriptor instead.
func (*GetSupportedProtocolResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{13}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{13}
}
func (x *GetSupportedProtocolResp) GetAgentSupportedProtocolVersions() string {
@@ -1507,7 +1570,7 @@ type Add struct {
func (x *Add) Reset() {
*x = Add{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[14]
+ mi := &file_usp_msg_1_3_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1520,7 +1583,7 @@ func (x *Add) String() string {
func (*Add) ProtoMessage() {}
func (x *Add) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[14]
+ mi := &file_usp_msg_1_3_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1533,7 +1596,7 @@ func (x *Add) ProtoReflect() protoreflect.Message {
// Deprecated: Use Add.ProtoReflect.Descriptor instead.
func (*Add) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{14}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{14}
}
func (x *Add) GetAllowPartial() bool {
@@ -1561,7 +1624,7 @@ type AddResp struct {
func (x *AddResp) Reset() {
*x = AddResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[15]
+ mi := &file_usp_msg_1_3_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1574,7 +1637,7 @@ func (x *AddResp) String() string {
func (*AddResp) ProtoMessage() {}
func (x *AddResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[15]
+ mi := &file_usp_msg_1_3_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1587,7 +1650,7 @@ func (x *AddResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use AddResp.ProtoReflect.Descriptor instead.
func (*AddResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{15}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{15}
}
func (x *AddResp) GetCreatedObjResults() []*AddResp_CreatedObjectResult {
@@ -1609,7 +1672,7 @@ type Delete struct {
func (x *Delete) Reset() {
*x = Delete{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[16]
+ mi := &file_usp_msg_1_3_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1622,7 +1685,7 @@ func (x *Delete) String() string {
func (*Delete) ProtoMessage() {}
func (x *Delete) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[16]
+ mi := &file_usp_msg_1_3_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1635,7 +1698,7 @@ func (x *Delete) ProtoReflect() protoreflect.Message {
// Deprecated: Use Delete.ProtoReflect.Descriptor instead.
func (*Delete) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{16}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{16}
}
func (x *Delete) GetAllowPartial() bool {
@@ -1663,7 +1726,7 @@ type DeleteResp struct {
func (x *DeleteResp) Reset() {
*x = DeleteResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[17]
+ mi := &file_usp_msg_1_3_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1676,7 +1739,7 @@ func (x *DeleteResp) String() string {
func (*DeleteResp) ProtoMessage() {}
func (x *DeleteResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[17]
+ mi := &file_usp_msg_1_3_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1689,7 +1752,7 @@ func (x *DeleteResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteResp.ProtoReflect.Descriptor instead.
func (*DeleteResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{17}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{17}
}
func (x *DeleteResp) GetDeletedObjResults() []*DeleteResp_DeletedObjectResult {
@@ -1711,7 +1774,7 @@ type Set struct {
func (x *Set) Reset() {
*x = Set{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[18]
+ mi := &file_usp_msg_1_3_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1724,7 +1787,7 @@ func (x *Set) String() string {
func (*Set) ProtoMessage() {}
func (x *Set) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[18]
+ mi := &file_usp_msg_1_3_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1737,7 +1800,7 @@ func (x *Set) ProtoReflect() protoreflect.Message {
// Deprecated: Use Set.ProtoReflect.Descriptor instead.
func (*Set) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{18}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{18}
}
func (x *Set) GetAllowPartial() bool {
@@ -1765,7 +1828,7 @@ type SetResp struct {
func (x *SetResp) Reset() {
*x = SetResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[19]
+ mi := &file_usp_msg_1_3_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1778,7 +1841,7 @@ func (x *SetResp) String() string {
func (*SetResp) ProtoMessage() {}
func (x *SetResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[19]
+ mi := &file_usp_msg_1_3_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1791,7 +1854,7 @@ func (x *SetResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetResp.ProtoReflect.Descriptor instead.
func (*SetResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19}
}
func (x *SetResp) GetUpdatedObjResults() []*SetResp_UpdatedObjectResult {
@@ -1815,7 +1878,7 @@ type Operate struct {
func (x *Operate) Reset() {
*x = Operate{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[20]
+ mi := &file_usp_msg_1_3_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1828,7 +1891,7 @@ func (x *Operate) String() string {
func (*Operate) ProtoMessage() {}
func (x *Operate) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[20]
+ mi := &file_usp_msg_1_3_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1841,7 +1904,7 @@ func (x *Operate) ProtoReflect() protoreflect.Message {
// Deprecated: Use Operate.ProtoReflect.Descriptor instead.
func (*Operate) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{20}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{20}
}
func (x *Operate) GetCommand() string {
@@ -1883,7 +1946,7 @@ type OperateResp struct {
func (x *OperateResp) Reset() {
*x = OperateResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[21]
+ mi := &file_usp_msg_1_3_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1896,7 +1959,7 @@ func (x *OperateResp) String() string {
func (*OperateResp) ProtoMessage() {}
func (x *OperateResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[21]
+ mi := &file_usp_msg_1_3_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1909,7 +1972,7 @@ func (x *OperateResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use OperateResp.ProtoReflect.Descriptor instead.
func (*OperateResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{21}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{21}
}
func (x *OperateResp) GetOperationResults() []*OperateResp_OperationResult {
@@ -1940,7 +2003,7 @@ type Notify struct {
func (x *Notify) Reset() {
*x = Notify{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[22]
+ mi := &file_usp_msg_1_3_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1953,7 +2016,7 @@ func (x *Notify) String() string {
func (*Notify) ProtoMessage() {}
func (x *Notify) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[22]
+ mi := &file_usp_msg_1_3_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1966,7 +2029,7 @@ func (x *Notify) ProtoReflect() protoreflect.Message {
// Deprecated: Use Notify.ProtoReflect.Descriptor instead.
func (*Notify) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22}
}
func (x *Notify) GetSubscriptionId() string {
@@ -2083,7 +2146,7 @@ type NotifyResp struct {
func (x *NotifyResp) Reset() {
*x = NotifyResp{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[23]
+ mi := &file_usp_msg_1_3_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2096,7 +2159,7 @@ func (x *NotifyResp) String() string {
func (*NotifyResp) ProtoMessage() {}
func (x *NotifyResp) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[23]
+ mi := &file_usp_msg_1_3_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2109,7 +2172,7 @@ func (x *NotifyResp) ProtoReflect() protoreflect.Message {
// Deprecated: Use NotifyResp.ProtoReflect.Descriptor instead.
func (*NotifyResp) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{23}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{23}
}
func (x *NotifyResp) GetSubscriptionId() string {
@@ -2119,6 +2182,202 @@ func (x *NotifyResp) GetSubscriptionId() string {
return ""
}
+type Register struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AllowPartial bool `protobuf:"varint,1,opt,name=allow_partial,json=allowPartial,proto3" json:"allow_partial,omitempty"`
+ RegPaths []*Register_RegistrationPath `protobuf:"bytes,2,rep,name=reg_paths,json=regPaths,proto3" json:"reg_paths,omitempty"`
+}
+
+func (x *Register) Reset() {
+ *x = Register{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Register) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Register) ProtoMessage() {}
+
+func (x *Register) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Register.ProtoReflect.Descriptor instead.
+func (*Register) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *Register) GetAllowPartial() bool {
+ if x != nil {
+ return x.AllowPartial
+ }
+ return false
+}
+
+func (x *Register) GetRegPaths() []*Register_RegistrationPath {
+ if x != nil {
+ return x.RegPaths
+ }
+ return nil
+}
+
+type RegisterResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegisteredPathResults []*RegisterResp_RegisteredPathResult `protobuf:"bytes,1,rep,name=registered_path_results,json=registeredPathResults,proto3" json:"registered_path_results,omitempty"`
+}
+
+func (x *RegisterResp) Reset() {
+ *x = RegisterResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegisterResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegisterResp) ProtoMessage() {}
+
+func (x *RegisterResp) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegisterResp.ProtoReflect.Descriptor instead.
+func (*RegisterResp) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *RegisterResp) GetRegisteredPathResults() []*RegisterResp_RegisteredPathResult {
+ if x != nil {
+ return x.RegisteredPathResults
+ }
+ return nil
+}
+
+type Deregister struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+}
+
+func (x *Deregister) Reset() {
+ *x = Deregister{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Deregister) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Deregister) ProtoMessage() {}
+
+func (x *Deregister) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Deregister.ProtoReflect.Descriptor instead.
+func (*Deregister) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *Deregister) GetPaths() []string {
+ if x != nil {
+ return x.Paths
+ }
+ return nil
+}
+
+type DeregisterResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DeregisteredPathResults []*DeregisterResp_DeregisteredPathResult `protobuf:"bytes,1,rep,name=deregistered_path_results,json=deregisteredPathResults,proto3" json:"deregistered_path_results,omitempty"`
+}
+
+func (x *DeregisterResp) Reset() {
+ *x = DeregisterResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeregisterResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeregisterResp) ProtoMessage() {}
+
+func (x *DeregisterResp) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeregisterResp.ProtoReflect.Descriptor instead.
+func (*DeregisterResp) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *DeregisterResp) GetDeregisteredPathResults() []*DeregisterResp_DeregisteredPathResult {
+ if x != nil {
+ return x.DeregisteredPathResults
+ }
+ return nil
+}
+
type Error_ParamError struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2132,7 +2391,7 @@ type Error_ParamError struct {
func (x *Error_ParamError) Reset() {
*x = Error_ParamError{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[24]
+ mi := &file_usp_msg_1_3_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2145,7 +2404,7 @@ func (x *Error_ParamError) String() string {
func (*Error_ParamError) ProtoMessage() {}
func (x *Error_ParamError) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[24]
+ mi := &file_usp_msg_1_3_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2158,7 +2417,7 @@ func (x *Error_ParamError) ProtoReflect() protoreflect.Message {
// Deprecated: Use Error_ParamError.ProtoReflect.Descriptor instead.
func (*Error_ParamError) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{5, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{5, 0}
}
func (x *Error_ParamError) GetParamPath() string {
@@ -2196,7 +2455,7 @@ type GetResp_RequestedPathResult struct {
func (x *GetResp_RequestedPathResult) Reset() {
*x = GetResp_RequestedPathResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[25]
+ mi := &file_usp_msg_1_3_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2209,7 +2468,7 @@ func (x *GetResp_RequestedPathResult) String() string {
func (*GetResp_RequestedPathResult) ProtoMessage() {}
func (x *GetResp_RequestedPathResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[25]
+ mi := &file_usp_msg_1_3_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2222,7 +2481,7 @@ func (x *GetResp_RequestedPathResult) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetResp_RequestedPathResult.ProtoReflect.Descriptor instead.
func (*GetResp_RequestedPathResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{7, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{7, 0}
}
func (x *GetResp_RequestedPathResult) GetRequestedPath() string {
@@ -2265,7 +2524,7 @@ type GetResp_ResolvedPathResult struct {
func (x *GetResp_ResolvedPathResult) Reset() {
*x = GetResp_ResolvedPathResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[26]
+ mi := &file_usp_msg_1_3_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2278,7 +2537,7 @@ func (x *GetResp_ResolvedPathResult) String() string {
func (*GetResp_ResolvedPathResult) ProtoMessage() {}
func (x *GetResp_ResolvedPathResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[26]
+ mi := &file_usp_msg_1_3_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2291,7 +2550,7 @@ func (x *GetResp_ResolvedPathResult) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetResp_ResolvedPathResult.ProtoReflect.Descriptor instead.
func (*GetResp_ResolvedPathResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{7, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{7, 1}
}
func (x *GetResp_ResolvedPathResult) GetResolvedPath() string {
@@ -2323,7 +2582,7 @@ type GetSupportedDMResp_RequestedObjectResult struct {
func (x *GetSupportedDMResp_RequestedObjectResult) Reset() {
*x = GetSupportedDMResp_RequestedObjectResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[28]
+ mi := &file_usp_msg_1_3_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2336,7 +2595,7 @@ func (x *GetSupportedDMResp_RequestedObjectResult) String() string {
func (*GetSupportedDMResp_RequestedObjectResult) ProtoMessage() {}
func (x *GetSupportedDMResp_RequestedObjectResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[28]
+ mi := &file_usp_msg_1_3_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2349,7 +2608,7 @@ func (x *GetSupportedDMResp_RequestedObjectResult) ProtoReflect() protoreflect.M
// Deprecated: Use GetSupportedDMResp_RequestedObjectResult.ProtoReflect.Descriptor instead.
func (*GetSupportedDMResp_RequestedObjectResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 0}
}
func (x *GetSupportedDMResp_RequestedObjectResult) GetReqObjPath() string {
@@ -2404,7 +2663,7 @@ type GetSupportedDMResp_SupportedObjectResult struct {
func (x *GetSupportedDMResp_SupportedObjectResult) Reset() {
*x = GetSupportedDMResp_SupportedObjectResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[29]
+ mi := &file_usp_msg_1_3_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2417,7 +2676,7 @@ func (x *GetSupportedDMResp_SupportedObjectResult) String() string {
func (*GetSupportedDMResp_SupportedObjectResult) ProtoMessage() {}
func (x *GetSupportedDMResp_SupportedObjectResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[29]
+ mi := &file_usp_msg_1_3_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2430,7 +2689,7 @@ func (x *GetSupportedDMResp_SupportedObjectResult) ProtoReflect() protoreflect.M
// Deprecated: Use GetSupportedDMResp_SupportedObjectResult.ProtoReflect.Descriptor instead.
func (*GetSupportedDMResp_SupportedObjectResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 1}
}
func (x *GetSupportedDMResp_SupportedObjectResult) GetSupportedObjPath() string {
@@ -2496,7 +2755,7 @@ type GetSupportedDMResp_SupportedParamResult struct {
func (x *GetSupportedDMResp_SupportedParamResult) Reset() {
*x = GetSupportedDMResp_SupportedParamResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[30]
+ mi := &file_usp_msg_1_3_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2509,7 +2768,7 @@ func (x *GetSupportedDMResp_SupportedParamResult) String() string {
func (*GetSupportedDMResp_SupportedParamResult) ProtoMessage() {}
func (x *GetSupportedDMResp_SupportedParamResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[30]
+ mi := &file_usp_msg_1_3_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2522,7 +2781,7 @@ func (x *GetSupportedDMResp_SupportedParamResult) ProtoReflect() protoreflect.Me
// Deprecated: Use GetSupportedDMResp_SupportedParamResult.ProtoReflect.Descriptor instead.
func (*GetSupportedDMResp_SupportedParamResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 2}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 2}
}
func (x *GetSupportedDMResp_SupportedParamResult) GetParamName() string {
@@ -2567,7 +2826,7 @@ type GetSupportedDMResp_SupportedCommandResult struct {
func (x *GetSupportedDMResp_SupportedCommandResult) Reset() {
*x = GetSupportedDMResp_SupportedCommandResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[31]
+ mi := &file_usp_msg_1_3_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2580,7 +2839,7 @@ func (x *GetSupportedDMResp_SupportedCommandResult) String() string {
func (*GetSupportedDMResp_SupportedCommandResult) ProtoMessage() {}
func (x *GetSupportedDMResp_SupportedCommandResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[31]
+ mi := &file_usp_msg_1_3_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2593,7 +2852,7 @@ func (x *GetSupportedDMResp_SupportedCommandResult) ProtoReflect() protoreflect.
// Deprecated: Use GetSupportedDMResp_SupportedCommandResult.ProtoReflect.Descriptor instead.
func (*GetSupportedDMResp_SupportedCommandResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 3}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 3}
}
func (x *GetSupportedDMResp_SupportedCommandResult) GetCommandName() string {
@@ -2636,7 +2895,7 @@ type GetSupportedDMResp_SupportedEventResult struct {
func (x *GetSupportedDMResp_SupportedEventResult) Reset() {
*x = GetSupportedDMResp_SupportedEventResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[32]
+ mi := &file_usp_msg_1_3_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2649,7 +2908,7 @@ func (x *GetSupportedDMResp_SupportedEventResult) String() string {
func (*GetSupportedDMResp_SupportedEventResult) ProtoMessage() {}
func (x *GetSupportedDMResp_SupportedEventResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[32]
+ mi := &file_usp_msg_1_3_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2662,7 +2921,7 @@ func (x *GetSupportedDMResp_SupportedEventResult) ProtoReflect() protoreflect.Me
// Deprecated: Use GetSupportedDMResp_SupportedEventResult.ProtoReflect.Descriptor instead.
func (*GetSupportedDMResp_SupportedEventResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{9, 4}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{9, 4}
}
func (x *GetSupportedDMResp_SupportedEventResult) GetEventName() string {
@@ -2693,7 +2952,7 @@ type GetInstancesResp_RequestedPathResult struct {
func (x *GetInstancesResp_RequestedPathResult) Reset() {
*x = GetInstancesResp_RequestedPathResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[33]
+ mi := &file_usp_msg_1_3_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2706,7 +2965,7 @@ func (x *GetInstancesResp_RequestedPathResult) String() string {
func (*GetInstancesResp_RequestedPathResult) ProtoMessage() {}
func (x *GetInstancesResp_RequestedPathResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[33]
+ mi := &file_usp_msg_1_3_proto_msgTypes[37]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2719,7 +2978,7 @@ func (x *GetInstancesResp_RequestedPathResult) ProtoReflect() protoreflect.Messa
// Deprecated: Use GetInstancesResp_RequestedPathResult.ProtoReflect.Descriptor instead.
func (*GetInstancesResp_RequestedPathResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{11, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{11, 0}
}
func (x *GetInstancesResp_RequestedPathResult) GetRequestedPath() string {
@@ -2762,7 +3021,7 @@ type GetInstancesResp_CurrInstance struct {
func (x *GetInstancesResp_CurrInstance) Reset() {
*x = GetInstancesResp_CurrInstance{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[34]
+ mi := &file_usp_msg_1_3_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2775,7 +3034,7 @@ func (x *GetInstancesResp_CurrInstance) String() string {
func (*GetInstancesResp_CurrInstance) ProtoMessage() {}
func (x *GetInstancesResp_CurrInstance) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[34]
+ mi := &file_usp_msg_1_3_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2788,7 +3047,7 @@ func (x *GetInstancesResp_CurrInstance) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetInstancesResp_CurrInstance.ProtoReflect.Descriptor instead.
func (*GetInstancesResp_CurrInstance) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{11, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{11, 1}
}
func (x *GetInstancesResp_CurrInstance) GetInstantiatedObjPath() string {
@@ -2817,7 +3076,7 @@ type Add_CreateObject struct {
func (x *Add_CreateObject) Reset() {
*x = Add_CreateObject{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[36]
+ mi := &file_usp_msg_1_3_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2830,7 +3089,7 @@ func (x *Add_CreateObject) String() string {
func (*Add_CreateObject) ProtoMessage() {}
func (x *Add_CreateObject) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[36]
+ mi := &file_usp_msg_1_3_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2843,7 +3102,7 @@ func (x *Add_CreateObject) ProtoReflect() protoreflect.Message {
// Deprecated: Use Add_CreateObject.ProtoReflect.Descriptor instead.
func (*Add_CreateObject) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{14, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{14, 0}
}
func (x *Add_CreateObject) GetObjPath() string {
@@ -2873,7 +3132,7 @@ type Add_CreateParamSetting struct {
func (x *Add_CreateParamSetting) Reset() {
*x = Add_CreateParamSetting{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[37]
+ mi := &file_usp_msg_1_3_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2886,7 +3145,7 @@ func (x *Add_CreateParamSetting) String() string {
func (*Add_CreateParamSetting) ProtoMessage() {}
func (x *Add_CreateParamSetting) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[37]
+ mi := &file_usp_msg_1_3_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2899,7 +3158,7 @@ func (x *Add_CreateParamSetting) ProtoReflect() protoreflect.Message {
// Deprecated: Use Add_CreateParamSetting.ProtoReflect.Descriptor instead.
func (*Add_CreateParamSetting) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{14, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{14, 1}
}
func (x *Add_CreateParamSetting) GetParam() string {
@@ -2935,7 +3194,7 @@ type AddResp_CreatedObjectResult struct {
func (x *AddResp_CreatedObjectResult) Reset() {
*x = AddResp_CreatedObjectResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[38]
+ mi := &file_usp_msg_1_3_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2948,7 +3207,7 @@ func (x *AddResp_CreatedObjectResult) String() string {
func (*AddResp_CreatedObjectResult) ProtoMessage() {}
func (x *AddResp_CreatedObjectResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[38]
+ mi := &file_usp_msg_1_3_proto_msgTypes[42]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2961,7 +3220,7 @@ func (x *AddResp_CreatedObjectResult) ProtoReflect() protoreflect.Message {
// Deprecated: Use AddResp_CreatedObjectResult.ProtoReflect.Descriptor instead.
func (*AddResp_CreatedObjectResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{15, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{15, 0}
}
func (x *AddResp_CreatedObjectResult) GetRequestedPath() string {
@@ -2991,7 +3250,7 @@ type AddResp_ParameterError struct {
func (x *AddResp_ParameterError) Reset() {
*x = AddResp_ParameterError{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[39]
+ mi := &file_usp_msg_1_3_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3004,7 +3263,7 @@ func (x *AddResp_ParameterError) String() string {
func (*AddResp_ParameterError) ProtoMessage() {}
func (x *AddResp_ParameterError) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[39]
+ mi := &file_usp_msg_1_3_proto_msgTypes[43]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3017,7 +3276,7 @@ func (x *AddResp_ParameterError) ProtoReflect() protoreflect.Message {
// Deprecated: Use AddResp_ParameterError.ProtoReflect.Descriptor instead.
func (*AddResp_ParameterError) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{15, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{15, 1}
}
func (x *AddResp_ParameterError) GetParam() string {
@@ -3056,7 +3315,7 @@ type AddResp_CreatedObjectResult_OperationStatus struct {
func (x *AddResp_CreatedObjectResult_OperationStatus) Reset() {
*x = AddResp_CreatedObjectResult_OperationStatus{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[40]
+ mi := &file_usp_msg_1_3_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3069,7 +3328,7 @@ func (x *AddResp_CreatedObjectResult_OperationStatus) String() string {
func (*AddResp_CreatedObjectResult_OperationStatus) ProtoMessage() {}
func (x *AddResp_CreatedObjectResult_OperationStatus) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[40]
+ mi := &file_usp_msg_1_3_proto_msgTypes[44]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3082,7 +3341,7 @@ func (x *AddResp_CreatedObjectResult_OperationStatus) ProtoReflect() protoreflec
// Deprecated: Use AddResp_CreatedObjectResult_OperationStatus.ProtoReflect.Descriptor instead.
func (*AddResp_CreatedObjectResult_OperationStatus) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{15, 0, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{15, 0, 0}
}
func (m *AddResp_CreatedObjectResult_OperationStatus) GetOperStatus() isAddResp_CreatedObjectResult_OperationStatus_OperStatus {
@@ -3136,7 +3395,7 @@ type AddResp_CreatedObjectResult_OperationStatus_OperationFailure struct {
func (x *AddResp_CreatedObjectResult_OperationStatus_OperationFailure) Reset() {
*x = AddResp_CreatedObjectResult_OperationStatus_OperationFailure{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[41]
+ mi := &file_usp_msg_1_3_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3149,7 +3408,7 @@ func (x *AddResp_CreatedObjectResult_OperationStatus_OperationFailure) String()
func (*AddResp_CreatedObjectResult_OperationStatus_OperationFailure) ProtoMessage() {}
func (x *AddResp_CreatedObjectResult_OperationStatus_OperationFailure) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[41]
+ mi := &file_usp_msg_1_3_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3162,7 +3421,7 @@ func (x *AddResp_CreatedObjectResult_OperationStatus_OperationFailure) ProtoRefl
// Deprecated: Use AddResp_CreatedObjectResult_OperationStatus_OperationFailure.ProtoReflect.Descriptor instead.
func (*AddResp_CreatedObjectResult_OperationStatus_OperationFailure) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{15, 0, 0, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{15, 0, 0, 0}
}
func (x *AddResp_CreatedObjectResult_OperationStatus_OperationFailure) GetErrCode() uint32 {
@@ -3192,7 +3451,7 @@ type AddResp_CreatedObjectResult_OperationStatus_OperationSuccess struct {
func (x *AddResp_CreatedObjectResult_OperationStatus_OperationSuccess) Reset() {
*x = AddResp_CreatedObjectResult_OperationStatus_OperationSuccess{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[42]
+ mi := &file_usp_msg_1_3_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3205,7 +3464,7 @@ func (x *AddResp_CreatedObjectResult_OperationStatus_OperationSuccess) String()
func (*AddResp_CreatedObjectResult_OperationStatus_OperationSuccess) ProtoMessage() {}
func (x *AddResp_CreatedObjectResult_OperationStatus_OperationSuccess) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[42]
+ mi := &file_usp_msg_1_3_proto_msgTypes[46]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3218,7 +3477,7 @@ func (x *AddResp_CreatedObjectResult_OperationStatus_OperationSuccess) ProtoRefl
// Deprecated: Use AddResp_CreatedObjectResult_OperationStatus_OperationSuccess.ProtoReflect.Descriptor instead.
func (*AddResp_CreatedObjectResult_OperationStatus_OperationSuccess) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{15, 0, 0, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{15, 0, 0, 1}
}
func (x *AddResp_CreatedObjectResult_OperationStatus_OperationSuccess) GetInstantiatedPath() string {
@@ -3254,7 +3513,7 @@ type DeleteResp_DeletedObjectResult struct {
func (x *DeleteResp_DeletedObjectResult) Reset() {
*x = DeleteResp_DeletedObjectResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[44]
+ mi := &file_usp_msg_1_3_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3267,7 +3526,7 @@ func (x *DeleteResp_DeletedObjectResult) String() string {
func (*DeleteResp_DeletedObjectResult) ProtoMessage() {}
func (x *DeleteResp_DeletedObjectResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[44]
+ mi := &file_usp_msg_1_3_proto_msgTypes[48]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3280,7 +3539,7 @@ func (x *DeleteResp_DeletedObjectResult) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteResp_DeletedObjectResult.ProtoReflect.Descriptor instead.
func (*DeleteResp_DeletedObjectResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{17, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{17, 0}
}
func (x *DeleteResp_DeletedObjectResult) GetRequestedPath() string {
@@ -3310,7 +3569,7 @@ type DeleteResp_UnaffectedPathError struct {
func (x *DeleteResp_UnaffectedPathError) Reset() {
*x = DeleteResp_UnaffectedPathError{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[45]
+ mi := &file_usp_msg_1_3_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3323,7 +3582,7 @@ func (x *DeleteResp_UnaffectedPathError) String() string {
func (*DeleteResp_UnaffectedPathError) ProtoMessage() {}
func (x *DeleteResp_UnaffectedPathError) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[45]
+ mi := &file_usp_msg_1_3_proto_msgTypes[49]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3336,7 +3595,7 @@ func (x *DeleteResp_UnaffectedPathError) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteResp_UnaffectedPathError.ProtoReflect.Descriptor instead.
func (*DeleteResp_UnaffectedPathError) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{17, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{17, 1}
}
func (x *DeleteResp_UnaffectedPathError) GetUnaffectedPath() string {
@@ -3375,7 +3634,7 @@ type DeleteResp_DeletedObjectResult_OperationStatus struct {
func (x *DeleteResp_DeletedObjectResult_OperationStatus) Reset() {
*x = DeleteResp_DeletedObjectResult_OperationStatus{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[46]
+ mi := &file_usp_msg_1_3_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3388,7 +3647,7 @@ func (x *DeleteResp_DeletedObjectResult_OperationStatus) String() string {
func (*DeleteResp_DeletedObjectResult_OperationStatus) ProtoMessage() {}
func (x *DeleteResp_DeletedObjectResult_OperationStatus) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[46]
+ mi := &file_usp_msg_1_3_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3401,7 +3660,7 @@ func (x *DeleteResp_DeletedObjectResult_OperationStatus) ProtoReflect() protoref
// Deprecated: Use DeleteResp_DeletedObjectResult_OperationStatus.ProtoReflect.Descriptor instead.
func (*DeleteResp_DeletedObjectResult_OperationStatus) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{17, 0, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{17, 0, 0}
}
func (m *DeleteResp_DeletedObjectResult_OperationStatus) GetOperStatus() isDeleteResp_DeletedObjectResult_OperationStatus_OperStatus {
@@ -3455,7 +3714,7 @@ type DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure struct {
func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure) Reset() {
*x = DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[47]
+ mi := &file_usp_msg_1_3_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3468,7 +3727,7 @@ func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure) String
func (*DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure) ProtoMessage() {}
func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[47]
+ mi := &file_usp_msg_1_3_proto_msgTypes[51]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3481,7 +3740,7 @@ func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure) ProtoR
// Deprecated: Use DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure.ProtoReflect.Descriptor instead.
func (*DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{17, 0, 0, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{17, 0, 0, 0}
}
func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure) GetErrCode() uint32 {
@@ -3510,7 +3769,7 @@ type DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess struct {
func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess) Reset() {
*x = DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[48]
+ mi := &file_usp_msg_1_3_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3523,7 +3782,7 @@ func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess) String
func (*DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess) ProtoMessage() {}
func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[48]
+ mi := &file_usp_msg_1_3_proto_msgTypes[52]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3536,7 +3795,7 @@ func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess) ProtoR
// Deprecated: Use DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess.ProtoReflect.Descriptor instead.
func (*DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{17, 0, 0, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{17, 0, 0, 1}
}
func (x *DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess) GetAffectedPaths() []string {
@@ -3565,7 +3824,7 @@ type Set_UpdateObject struct {
func (x *Set_UpdateObject) Reset() {
*x = Set_UpdateObject{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[49]
+ mi := &file_usp_msg_1_3_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3578,7 +3837,7 @@ func (x *Set_UpdateObject) String() string {
func (*Set_UpdateObject) ProtoMessage() {}
func (x *Set_UpdateObject) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[49]
+ mi := &file_usp_msg_1_3_proto_msgTypes[53]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3591,7 +3850,7 @@ func (x *Set_UpdateObject) ProtoReflect() protoreflect.Message {
// Deprecated: Use Set_UpdateObject.ProtoReflect.Descriptor instead.
func (*Set_UpdateObject) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{18, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{18, 0}
}
func (x *Set_UpdateObject) GetObjPath() string {
@@ -3621,7 +3880,7 @@ type Set_UpdateParamSetting struct {
func (x *Set_UpdateParamSetting) Reset() {
*x = Set_UpdateParamSetting{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[50]
+ mi := &file_usp_msg_1_3_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3634,7 +3893,7 @@ func (x *Set_UpdateParamSetting) String() string {
func (*Set_UpdateParamSetting) ProtoMessage() {}
func (x *Set_UpdateParamSetting) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[50]
+ mi := &file_usp_msg_1_3_proto_msgTypes[54]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3647,7 +3906,7 @@ func (x *Set_UpdateParamSetting) ProtoReflect() protoreflect.Message {
// Deprecated: Use Set_UpdateParamSetting.ProtoReflect.Descriptor instead.
func (*Set_UpdateParamSetting) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{18, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{18, 1}
}
func (x *Set_UpdateParamSetting) GetParam() string {
@@ -3683,7 +3942,7 @@ type SetResp_UpdatedObjectResult struct {
func (x *SetResp_UpdatedObjectResult) Reset() {
*x = SetResp_UpdatedObjectResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[51]
+ mi := &file_usp_msg_1_3_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3696,7 +3955,7 @@ func (x *SetResp_UpdatedObjectResult) String() string {
func (*SetResp_UpdatedObjectResult) ProtoMessage() {}
func (x *SetResp_UpdatedObjectResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[51]
+ mi := &file_usp_msg_1_3_proto_msgTypes[55]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3709,7 +3968,7 @@ func (x *SetResp_UpdatedObjectResult) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetResp_UpdatedObjectResult.ProtoReflect.Descriptor instead.
func (*SetResp_UpdatedObjectResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19, 0}
}
func (x *SetResp_UpdatedObjectResult) GetRequestedPath() string {
@@ -3738,7 +3997,7 @@ type SetResp_UpdatedInstanceFailure struct {
func (x *SetResp_UpdatedInstanceFailure) Reset() {
*x = SetResp_UpdatedInstanceFailure{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[52]
+ mi := &file_usp_msg_1_3_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3751,7 +4010,7 @@ func (x *SetResp_UpdatedInstanceFailure) String() string {
func (*SetResp_UpdatedInstanceFailure) ProtoMessage() {}
func (x *SetResp_UpdatedInstanceFailure) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[52]
+ mi := &file_usp_msg_1_3_proto_msgTypes[56]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3764,7 +4023,7 @@ func (x *SetResp_UpdatedInstanceFailure) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetResp_UpdatedInstanceFailure.ProtoReflect.Descriptor instead.
func (*SetResp_UpdatedInstanceFailure) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19, 1}
}
func (x *SetResp_UpdatedInstanceFailure) GetAffectedPath() string {
@@ -3794,7 +4053,7 @@ type SetResp_UpdatedInstanceResult struct {
func (x *SetResp_UpdatedInstanceResult) Reset() {
*x = SetResp_UpdatedInstanceResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[53]
+ mi := &file_usp_msg_1_3_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3807,7 +4066,7 @@ func (x *SetResp_UpdatedInstanceResult) String() string {
func (*SetResp_UpdatedInstanceResult) ProtoMessage() {}
func (x *SetResp_UpdatedInstanceResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[53]
+ mi := &file_usp_msg_1_3_proto_msgTypes[57]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3820,7 +4079,7 @@ func (x *SetResp_UpdatedInstanceResult) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetResp_UpdatedInstanceResult.ProtoReflect.Descriptor instead.
func (*SetResp_UpdatedInstanceResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19, 2}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19, 2}
}
func (x *SetResp_UpdatedInstanceResult) GetAffectedPath() string {
@@ -3857,7 +4116,7 @@ type SetResp_ParameterError struct {
func (x *SetResp_ParameterError) Reset() {
*x = SetResp_ParameterError{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[54]
+ mi := &file_usp_msg_1_3_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3870,7 +4129,7 @@ func (x *SetResp_ParameterError) String() string {
func (*SetResp_ParameterError) ProtoMessage() {}
func (x *SetResp_ParameterError) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[54]
+ mi := &file_usp_msg_1_3_proto_msgTypes[58]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3883,7 +4142,7 @@ func (x *SetResp_ParameterError) ProtoReflect() protoreflect.Message {
// Deprecated: Use SetResp_ParameterError.ProtoReflect.Descriptor instead.
func (*SetResp_ParameterError) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19, 3}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19, 3}
}
func (x *SetResp_ParameterError) GetParam() string {
@@ -3922,7 +4181,7 @@ type SetResp_UpdatedObjectResult_OperationStatus struct {
func (x *SetResp_UpdatedObjectResult_OperationStatus) Reset() {
*x = SetResp_UpdatedObjectResult_OperationStatus{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[55]
+ mi := &file_usp_msg_1_3_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3935,7 +4194,7 @@ func (x *SetResp_UpdatedObjectResult_OperationStatus) String() string {
func (*SetResp_UpdatedObjectResult_OperationStatus) ProtoMessage() {}
func (x *SetResp_UpdatedObjectResult_OperationStatus) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[55]
+ mi := &file_usp_msg_1_3_proto_msgTypes[59]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3948,7 +4207,7 @@ func (x *SetResp_UpdatedObjectResult_OperationStatus) ProtoReflect() protoreflec
// Deprecated: Use SetResp_UpdatedObjectResult_OperationStatus.ProtoReflect.Descriptor instead.
func (*SetResp_UpdatedObjectResult_OperationStatus) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19, 0, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19, 0, 0}
}
func (m *SetResp_UpdatedObjectResult_OperationStatus) GetOperStatus() isSetResp_UpdatedObjectResult_OperationStatus_OperStatus {
@@ -4003,7 +4262,7 @@ type SetResp_UpdatedObjectResult_OperationStatus_OperationFailure struct {
func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationFailure) Reset() {
*x = SetResp_UpdatedObjectResult_OperationStatus_OperationFailure{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[56]
+ mi := &file_usp_msg_1_3_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4016,7 +4275,7 @@ func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationFailure) String()
func (*SetResp_UpdatedObjectResult_OperationStatus_OperationFailure) ProtoMessage() {}
func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationFailure) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[56]
+ mi := &file_usp_msg_1_3_proto_msgTypes[60]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4029,7 +4288,7 @@ func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationFailure) ProtoRefl
// Deprecated: Use SetResp_UpdatedObjectResult_OperationStatus_OperationFailure.ProtoReflect.Descriptor instead.
func (*SetResp_UpdatedObjectResult_OperationStatus_OperationFailure) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19, 0, 0, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19, 0, 0, 0}
}
func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationFailure) GetErrCode() uint32 {
@@ -4064,7 +4323,7 @@ type SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess struct {
func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess) Reset() {
*x = SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[57]
+ mi := &file_usp_msg_1_3_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4077,7 +4336,7 @@ func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess) String()
func (*SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess) ProtoMessage() {}
func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[57]
+ mi := &file_usp_msg_1_3_proto_msgTypes[61]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4090,7 +4349,7 @@ func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess) ProtoRefl
// Deprecated: Use SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess.ProtoReflect.Descriptor instead.
func (*SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{19, 0, 0, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{19, 0, 0, 1}
}
func (x *SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess) GetUpdatedInstResults() []*SetResp_UpdatedInstanceResult {
@@ -4117,7 +4376,7 @@ type OperateResp_OperationResult struct {
func (x *OperateResp_OperationResult) Reset() {
*x = OperateResp_OperationResult{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[60]
+ mi := &file_usp_msg_1_3_proto_msgTypes[64]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4130,7 +4389,7 @@ func (x *OperateResp_OperationResult) String() string {
func (*OperateResp_OperationResult) ProtoMessage() {}
func (x *OperateResp_OperationResult) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[60]
+ mi := &file_usp_msg_1_3_proto_msgTypes[64]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4143,7 +4402,7 @@ func (x *OperateResp_OperationResult) ProtoReflect() protoreflect.Message {
// Deprecated: Use OperateResp_OperationResult.ProtoReflect.Descriptor instead.
func (*OperateResp_OperationResult) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{21, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{21, 0}
}
func (x *OperateResp_OperationResult) GetExecutedCommand() string {
@@ -4214,7 +4473,7 @@ type OperateResp_OperationResult_OutputArgs struct {
func (x *OperateResp_OperationResult_OutputArgs) Reset() {
*x = OperateResp_OperationResult_OutputArgs{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[61]
+ mi := &file_usp_msg_1_3_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4227,7 +4486,7 @@ func (x *OperateResp_OperationResult_OutputArgs) String() string {
func (*OperateResp_OperationResult_OutputArgs) ProtoMessage() {}
func (x *OperateResp_OperationResult_OutputArgs) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[61]
+ mi := &file_usp_msg_1_3_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4240,7 +4499,7 @@ func (x *OperateResp_OperationResult_OutputArgs) ProtoReflect() protoreflect.Mes
// Deprecated: Use OperateResp_OperationResult_OutputArgs.ProtoReflect.Descriptor instead.
func (*OperateResp_OperationResult_OutputArgs) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{21, 0, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{21, 0, 0}
}
func (x *OperateResp_OperationResult_OutputArgs) GetOutputArgs() map[string]string {
@@ -4262,7 +4521,7 @@ type OperateResp_OperationResult_CommandFailure struct {
func (x *OperateResp_OperationResult_CommandFailure) Reset() {
*x = OperateResp_OperationResult_CommandFailure{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[62]
+ mi := &file_usp_msg_1_3_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4275,7 +4534,7 @@ func (x *OperateResp_OperationResult_CommandFailure) String() string {
func (*OperateResp_OperationResult_CommandFailure) ProtoMessage() {}
func (x *OperateResp_OperationResult_CommandFailure) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[62]
+ mi := &file_usp_msg_1_3_proto_msgTypes[66]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4288,7 +4547,7 @@ func (x *OperateResp_OperationResult_CommandFailure) ProtoReflect() protoreflect
// Deprecated: Use OperateResp_OperationResult_CommandFailure.ProtoReflect.Descriptor instead.
func (*OperateResp_OperationResult_CommandFailure) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{21, 0, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{21, 0, 1}
}
func (x *OperateResp_OperationResult_CommandFailure) GetErrCode() uint32 {
@@ -4318,7 +4577,7 @@ type Notify_Event struct {
func (x *Notify_Event) Reset() {
*x = Notify_Event{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[64]
+ mi := &file_usp_msg_1_3_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4331,7 +4590,7 @@ func (x *Notify_Event) String() string {
func (*Notify_Event) ProtoMessage() {}
func (x *Notify_Event) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[64]
+ mi := &file_usp_msg_1_3_proto_msgTypes[68]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4344,7 +4603,7 @@ func (x *Notify_Event) ProtoReflect() protoreflect.Message {
// Deprecated: Use Notify_Event.ProtoReflect.Descriptor instead.
func (*Notify_Event) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 0}
}
func (x *Notify_Event) GetObjPath() string {
@@ -4380,7 +4639,7 @@ type Notify_ValueChange struct {
func (x *Notify_ValueChange) Reset() {
*x = Notify_ValueChange{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[65]
+ mi := &file_usp_msg_1_3_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4393,7 +4652,7 @@ func (x *Notify_ValueChange) String() string {
func (*Notify_ValueChange) ProtoMessage() {}
func (x *Notify_ValueChange) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[65]
+ mi := &file_usp_msg_1_3_proto_msgTypes[69]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4406,7 +4665,7 @@ func (x *Notify_ValueChange) ProtoReflect() protoreflect.Message {
// Deprecated: Use Notify_ValueChange.ProtoReflect.Descriptor instead.
func (*Notify_ValueChange) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 1}
}
func (x *Notify_ValueChange) GetParamPath() string {
@@ -4435,7 +4694,7 @@ type Notify_ObjectCreation struct {
func (x *Notify_ObjectCreation) Reset() {
*x = Notify_ObjectCreation{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[66]
+ mi := &file_usp_msg_1_3_proto_msgTypes[70]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4448,7 +4707,7 @@ func (x *Notify_ObjectCreation) String() string {
func (*Notify_ObjectCreation) ProtoMessage() {}
func (x *Notify_ObjectCreation) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[66]
+ mi := &file_usp_msg_1_3_proto_msgTypes[70]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4461,7 +4720,7 @@ func (x *Notify_ObjectCreation) ProtoReflect() protoreflect.Message {
// Deprecated: Use Notify_ObjectCreation.ProtoReflect.Descriptor instead.
func (*Notify_ObjectCreation) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 2}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 2}
}
func (x *Notify_ObjectCreation) GetObjPath() string {
@@ -4489,7 +4748,7 @@ type Notify_ObjectDeletion struct {
func (x *Notify_ObjectDeletion) Reset() {
*x = Notify_ObjectDeletion{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[67]
+ mi := &file_usp_msg_1_3_proto_msgTypes[71]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4502,7 +4761,7 @@ func (x *Notify_ObjectDeletion) String() string {
func (*Notify_ObjectDeletion) ProtoMessage() {}
func (x *Notify_ObjectDeletion) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[67]
+ mi := &file_usp_msg_1_3_proto_msgTypes[71]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4515,7 +4774,7 @@ func (x *Notify_ObjectDeletion) ProtoReflect() protoreflect.Message {
// Deprecated: Use Notify_ObjectDeletion.ProtoReflect.Descriptor instead.
func (*Notify_ObjectDeletion) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 3}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 3}
}
func (x *Notify_ObjectDeletion) GetObjPath() string {
@@ -4543,7 +4802,7 @@ type Notify_OperationComplete struct {
func (x *Notify_OperationComplete) Reset() {
*x = Notify_OperationComplete{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[68]
+ mi := &file_usp_msg_1_3_proto_msgTypes[72]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4556,7 +4815,7 @@ func (x *Notify_OperationComplete) String() string {
func (*Notify_OperationComplete) ProtoMessage() {}
func (x *Notify_OperationComplete) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[68]
+ mi := &file_usp_msg_1_3_proto_msgTypes[72]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4569,7 +4828,7 @@ func (x *Notify_OperationComplete) ProtoReflect() protoreflect.Message {
// Deprecated: Use Notify_OperationComplete.ProtoReflect.Descriptor instead.
func (*Notify_OperationComplete) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 4}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 4}
}
func (x *Notify_OperationComplete) GetObjPath() string {
@@ -4644,7 +4903,7 @@ type Notify_OnBoardRequest struct {
func (x *Notify_OnBoardRequest) Reset() {
*x = Notify_OnBoardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[69]
+ mi := &file_usp_msg_1_3_proto_msgTypes[73]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4657,7 +4916,7 @@ func (x *Notify_OnBoardRequest) String() string {
func (*Notify_OnBoardRequest) ProtoMessage() {}
func (x *Notify_OnBoardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[69]
+ mi := &file_usp_msg_1_3_proto_msgTypes[73]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4670,7 +4929,7 @@ func (x *Notify_OnBoardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use Notify_OnBoardRequest.ProtoReflect.Descriptor instead.
func (*Notify_OnBoardRequest) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 5}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 5}
}
func (x *Notify_OnBoardRequest) GetOui() string {
@@ -4712,7 +4971,7 @@ type Notify_OperationComplete_OutputArgs struct {
func (x *Notify_OperationComplete_OutputArgs) Reset() {
*x = Notify_OperationComplete_OutputArgs{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[72]
+ mi := &file_usp_msg_1_3_proto_msgTypes[76]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4725,7 +4984,7 @@ func (x *Notify_OperationComplete_OutputArgs) String() string {
func (*Notify_OperationComplete_OutputArgs) ProtoMessage() {}
func (x *Notify_OperationComplete_OutputArgs) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[72]
+ mi := &file_usp_msg_1_3_proto_msgTypes[76]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4738,7 +4997,7 @@ func (x *Notify_OperationComplete_OutputArgs) ProtoReflect() protoreflect.Messag
// Deprecated: Use Notify_OperationComplete_OutputArgs.ProtoReflect.Descriptor instead.
func (*Notify_OperationComplete_OutputArgs) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 4, 0}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 4, 0}
}
func (x *Notify_OperationComplete_OutputArgs) GetOutputArgs() map[string]string {
@@ -4760,7 +5019,7 @@ type Notify_OperationComplete_CommandFailure struct {
func (x *Notify_OperationComplete_CommandFailure) Reset() {
*x = Notify_OperationComplete_CommandFailure{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_msg_1_2_proto_msgTypes[73]
+ mi := &file_usp_msg_1_3_proto_msgTypes[77]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4773,7 +5032,7 @@ func (x *Notify_OperationComplete_CommandFailure) String() string {
func (*Notify_OperationComplete_CommandFailure) ProtoMessage() {}
func (x *Notify_OperationComplete_CommandFailure) ProtoReflect() protoreflect.Message {
- mi := &file_usp_msg_1_2_proto_msgTypes[73]
+ mi := &file_usp_msg_1_3_proto_msgTypes[77]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4786,7 +5045,7 @@ func (x *Notify_OperationComplete_CommandFailure) ProtoReflect() protoreflect.Me
// Deprecated: Use Notify_OperationComplete_CommandFailure.ProtoReflect.Descriptor instead.
func (*Notify_OperationComplete_CommandFailure) Descriptor() ([]byte, []int) {
- return file_usp_msg_1_2_proto_rawDescGZIP(), []int{22, 4, 1}
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{22, 4, 1}
}
func (x *Notify_OperationComplete_CommandFailure) GetErrCode() uint32 {
@@ -4803,21 +5062,548 @@ func (x *Notify_OperationComplete_CommandFailure) GetErrMsg() string {
return ""
}
-var File_usp_msg_1_2_proto protoreflect.FileDescriptor
+type Register_RegistrationPath struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-var file_usp_msg_1_2_proto_rawDesc = []byte{
- 0x0a, 0x11, 0x75, 0x73, 0x70, 0x2d, 0x6d, 0x73, 0x67, 0x2d, 0x31, 0x2d, 0x32, 0x2e, 0x70, 0x72,
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *Register_RegistrationPath) Reset() {
+ *x = Register_RegistrationPath{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[79]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Register_RegistrationPath) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Register_RegistrationPath) ProtoMessage() {}
+
+func (x *Register_RegistrationPath) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[79]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Register_RegistrationPath.ProtoReflect.Descriptor instead.
+func (*Register_RegistrationPath) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{24, 0}
+}
+
+func (x *Register_RegistrationPath) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+type RegisterResp_RegisteredPathResult struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RequestedPath string `protobuf:"bytes,1,opt,name=requested_path,json=requestedPath,proto3" json:"requested_path,omitempty"`
+ OperStatus *RegisterResp_RegisteredPathResult_OperationStatus `protobuf:"bytes,2,opt,name=oper_status,json=operStatus,proto3" json:"oper_status,omitempty"`
+}
+
+func (x *RegisterResp_RegisteredPathResult) Reset() {
+ *x = RegisterResp_RegisteredPathResult{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[80]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegisterResp_RegisteredPathResult) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegisterResp_RegisteredPathResult) ProtoMessage() {}
+
+func (x *RegisterResp_RegisteredPathResult) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[80]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegisterResp_RegisteredPathResult.ProtoReflect.Descriptor instead.
+func (*RegisterResp_RegisteredPathResult) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{25, 0}
+}
+
+func (x *RegisterResp_RegisteredPathResult) GetRequestedPath() string {
+ if x != nil {
+ return x.RequestedPath
+ }
+ return ""
+}
+
+func (x *RegisterResp_RegisteredPathResult) GetOperStatus() *RegisterResp_RegisteredPathResult_OperationStatus {
+ if x != nil {
+ return x.OperStatus
+ }
+ return nil
+}
+
+type RegisterResp_RegisteredPathResult_OperationStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to OperStatus:
+ //
+ // *RegisterResp_RegisteredPathResult_OperationStatus_OperFailure
+ // *RegisterResp_RegisteredPathResult_OperationStatus_OperSuccess
+ OperStatus isRegisterResp_RegisteredPathResult_OperationStatus_OperStatus `protobuf_oneof:"oper_status"`
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus) Reset() {
+ *x = RegisterResp_RegisteredPathResult_OperationStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[81]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegisterResp_RegisteredPathResult_OperationStatus) ProtoMessage() {}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[81]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegisterResp_RegisteredPathResult_OperationStatus.ProtoReflect.Descriptor instead.
+func (*RegisterResp_RegisteredPathResult_OperationStatus) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{25, 0, 0}
+}
+
+func (m *RegisterResp_RegisteredPathResult_OperationStatus) GetOperStatus() isRegisterResp_RegisteredPathResult_OperationStatus_OperStatus {
+ if m != nil {
+ return m.OperStatus
+ }
+ return nil
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus) GetOperFailure() *RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure {
+ if x, ok := x.GetOperStatus().(*RegisterResp_RegisteredPathResult_OperationStatus_OperFailure); ok {
+ return x.OperFailure
+ }
+ return nil
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus) GetOperSuccess() *RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess {
+ if x, ok := x.GetOperStatus().(*RegisterResp_RegisteredPathResult_OperationStatus_OperSuccess); ok {
+ return x.OperSuccess
+ }
+ return nil
+}
+
+type isRegisterResp_RegisteredPathResult_OperationStatus_OperStatus interface {
+ isRegisterResp_RegisteredPathResult_OperationStatus_OperStatus()
+}
+
+type RegisterResp_RegisteredPathResult_OperationStatus_OperFailure struct {
+ OperFailure *RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure `protobuf:"bytes,1,opt,name=oper_failure,json=operFailure,proto3,oneof"`
+}
+
+type RegisterResp_RegisteredPathResult_OperationStatus_OperSuccess struct {
+ OperSuccess *RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess `protobuf:"bytes,2,opt,name=oper_success,json=operSuccess,proto3,oneof"`
+}
+
+func (*RegisterResp_RegisteredPathResult_OperationStatus_OperFailure) isRegisterResp_RegisteredPathResult_OperationStatus_OperStatus() {
+}
+
+func (*RegisterResp_RegisteredPathResult_OperationStatus_OperSuccess) isRegisterResp_RegisteredPathResult_OperationStatus_OperStatus() {
+}
+
+type RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ErrCode uint32 `protobuf:"fixed32,1,opt,name=err_code,json=errCode,proto3" json:"err_code,omitempty"`
+ ErrMsg string `protobuf:"bytes,2,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"`
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure) Reset() {
+ *x = RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[82]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure) ProtoMessage() {}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[82]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure.ProtoReflect.Descriptor instead.
+func (*RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{25, 0, 0, 0}
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure) GetErrCode() uint32 {
+ if x != nil {
+ return x.ErrCode
+ }
+ return 0
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure) GetErrMsg() string {
+ if x != nil {
+ return x.ErrMsg
+ }
+ return ""
+}
+
+type RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RegisteredPath string `protobuf:"bytes,1,opt,name=registered_path,json=registeredPath,proto3" json:"registered_path,omitempty"`
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess) Reset() {
+ *x = RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[83]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess) ProtoMessage() {}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[83]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess.ProtoReflect.Descriptor instead.
+func (*RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{25, 0, 0, 1}
+}
+
+func (x *RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess) GetRegisteredPath() string {
+ if x != nil {
+ return x.RegisteredPath
+ }
+ return ""
+}
+
+type DeregisterResp_DeregisteredPathResult struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RequestedPath string `protobuf:"bytes,1,opt,name=requested_path,json=requestedPath,proto3" json:"requested_path,omitempty"`
+ OperStatus *DeregisterResp_DeregisteredPathResult_OperationStatus `protobuf:"bytes,2,opt,name=oper_status,json=operStatus,proto3" json:"oper_status,omitempty"`
+}
+
+func (x *DeregisterResp_DeregisteredPathResult) Reset() {
+ *x = DeregisterResp_DeregisteredPathResult{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[84]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeregisterResp_DeregisteredPathResult) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeregisterResp_DeregisteredPathResult) ProtoMessage() {}
+
+func (x *DeregisterResp_DeregisteredPathResult) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[84]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeregisterResp_DeregisteredPathResult.ProtoReflect.Descriptor instead.
+func (*DeregisterResp_DeregisteredPathResult) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{27, 0}
+}
+
+func (x *DeregisterResp_DeregisteredPathResult) GetRequestedPath() string {
+ if x != nil {
+ return x.RequestedPath
+ }
+ return ""
+}
+
+func (x *DeregisterResp_DeregisteredPathResult) GetOperStatus() *DeregisterResp_DeregisteredPathResult_OperationStatus {
+ if x != nil {
+ return x.OperStatus
+ }
+ return nil
+}
+
+type DeregisterResp_DeregisteredPathResult_OperationStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to OperStatus:
+ //
+ // *DeregisterResp_DeregisteredPathResult_OperationStatus_OperFailure
+ // *DeregisterResp_DeregisteredPathResult_OperationStatus_OperSuccess
+ OperStatus isDeregisterResp_DeregisteredPathResult_OperationStatus_OperStatus `protobuf_oneof:"oper_status"`
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus) Reset() {
+ *x = DeregisterResp_DeregisteredPathResult_OperationStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[85]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus) ProtoMessage() {}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[85]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeregisterResp_DeregisteredPathResult_OperationStatus.ProtoReflect.Descriptor instead.
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{27, 0, 0}
+}
+
+func (m *DeregisterResp_DeregisteredPathResult_OperationStatus) GetOperStatus() isDeregisterResp_DeregisteredPathResult_OperationStatus_OperStatus {
+ if m != nil {
+ return m.OperStatus
+ }
+ return nil
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus) GetOperFailure() *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure {
+ if x, ok := x.GetOperStatus().(*DeregisterResp_DeregisteredPathResult_OperationStatus_OperFailure); ok {
+ return x.OperFailure
+ }
+ return nil
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus) GetOperSuccess() *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess {
+ if x, ok := x.GetOperStatus().(*DeregisterResp_DeregisteredPathResult_OperationStatus_OperSuccess); ok {
+ return x.OperSuccess
+ }
+ return nil
+}
+
+type isDeregisterResp_DeregisteredPathResult_OperationStatus_OperStatus interface {
+ isDeregisterResp_DeregisteredPathResult_OperationStatus_OperStatus()
+}
+
+type DeregisterResp_DeregisteredPathResult_OperationStatus_OperFailure struct {
+ OperFailure *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure `protobuf:"bytes,1,opt,name=oper_failure,json=operFailure,proto3,oneof"`
+}
+
+type DeregisterResp_DeregisteredPathResult_OperationStatus_OperSuccess struct {
+ OperSuccess *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess `protobuf:"bytes,2,opt,name=oper_success,json=operSuccess,proto3,oneof"`
+}
+
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperFailure) isDeregisterResp_DeregisteredPathResult_OperationStatus_OperStatus() {
+}
+
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperSuccess) isDeregisterResp_DeregisteredPathResult_OperationStatus_OperStatus() {
+}
+
+type DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ErrCode uint32 `protobuf:"fixed32,1,opt,name=err_code,json=errCode,proto3" json:"err_code,omitempty"`
+ ErrMsg string `protobuf:"bytes,2,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"`
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure) Reset() {
+ *x = DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[86]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure) ProtoMessage() {}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[86]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure.ProtoReflect.Descriptor instead.
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{27, 0, 0, 0}
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure) GetErrCode() uint32 {
+ if x != nil {
+ return x.ErrCode
+ }
+ return 0
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure) GetErrMsg() string {
+ if x != nil {
+ return x.ErrMsg
+ }
+ return ""
+}
+
+type DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DeregisteredPath []string `protobuf:"bytes,1,rep,name=deregistered_path,json=deregisteredPath,proto3" json:"deregistered_path,omitempty"`
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess) Reset() {
+ *x = DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_msg_1_3_proto_msgTypes[87]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess) ProtoMessage() {}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_msg_1_3_proto_msgTypes[87]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess.ProtoReflect.Descriptor instead.
+func (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess) Descriptor() ([]byte, []int) {
+ return file_usp_msg_1_3_proto_rawDescGZIP(), []int{27, 0, 0, 1}
+}
+
+func (x *DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess) GetDeregisteredPath() []string {
+ if x != nil {
+ return x.DeregisteredPath
+ }
+ return nil
+}
+
+var File_usp_msg_1_3_proto protoreflect.FileDescriptor
+
+var file_usp_msg_1_3_proto_rawDesc = []byte{
+ 0x0a, 0x11, 0x75, 0x73, 0x70, 0x2d, 0x6d, 0x73, 0x67, 0x2d, 0x31, 0x2d, 0x33, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x03, 0x75, 0x73, 0x70, 0x22, 0x49, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12,
0x23, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x0b, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65,
0x61, 0x64, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x09, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x22, 0x97, 0x03, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x15,
+ 0x6f, 0x64, 0x79, 0x22, 0xdd, 0x03, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x15,
0x0a, 0x06, 0x6d, 0x73, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x6d, 0x73, 0x67, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x08, 0x6d, 0x73, 0x67, 0x5f, 0x74, 0x79, 0x70,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x48, 0x65,
0x61, 0x64, 0x65, 0x72, 0x2e, 0x4d, 0x73, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6d, 0x73,
- 0x67, 0x54, 0x79, 0x70, 0x65, 0x22, 0xc5, 0x02, 0x0a, 0x07, 0x4d, 0x73, 0x67, 0x54, 0x79, 0x70,
+ 0x67, 0x54, 0x79, 0x70, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x4d, 0x73, 0x67, 0x54, 0x79, 0x70,
0x65, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03,
0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x47, 0x45, 0x54, 0x5f, 0x52, 0x45, 0x53,
0x50, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12,
@@ -4837,78 +5623,96 @@ var file_usp_msg_1_2_proto_rawDesc = []byte{
0x52, 0x45, 0x53, 0x50, 0x10, 0x10, 0x12, 0x17, 0x0a, 0x13, 0x47, 0x45, 0x54, 0x5f, 0x53, 0x55,
0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x10, 0x11, 0x12,
0x1c, 0x0a, 0x18, 0x47, 0x45, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44,
- 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x10, 0x12, 0x22, 0x8d, 0x01,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x28, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x2b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a,
- 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x75,
- 0x73, 0x70, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x6d, 0x73, 0x67, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x22, 0xb5, 0x03,
- 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74,
- 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x3f, 0x0a, 0x10, 0x67, 0x65, 0x74, 0x5f, 0x73,
- 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x13, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f,
- 0x72, 0x74, 0x65, 0x64, 0x44, 0x4d, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x65, 0x74, 0x53, 0x75, 0x70,
- 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x6d, 0x12, 0x38, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x11, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x08, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x73, 0x65, 0x74,
- 0x12, 0x1c, 0x0a, 0x03, 0x61, 0x64, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e,
- 0x75, 0x73, 0x70, 0x2e, 0x41, 0x64, 0x64, 0x48, 0x00, 0x52, 0x03, 0x61, 0x64, 0x64, 0x12, 0x25,
- 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b,
- 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x06, 0x64,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12,
- 0x25, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0b, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x48, 0x00, 0x52, 0x06,
- 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x51, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75,
- 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
- 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74,
- 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
- 0x6c, 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65,
- 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x71,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xac, 0x04, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x67, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x48, 0x00, 0x52, 0x07, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x4c, 0x0a,
- 0x15, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64,
- 0x6d, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x75,
- 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44,
- 0x4d, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x12, 0x67, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70,
- 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x12, 0x45, 0x0a, 0x12, 0x67,
- 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73,
- 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65,
- 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00,
- 0x52, 0x10, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x12, 0x29, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x48, 0x00, 0x52, 0x07, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x29, 0x0a,
- 0x08, 0x61, 0x64, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0c, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52,
- 0x07, 0x61, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x32, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
- 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00,
- 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x35, 0x0a, 0x0c,
- 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x12, 0x32, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x72, 0x65,
- 0x73, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74,
- 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12, 0x5e, 0x0a, 0x1b, 0x67, 0x65, 0x74, 0x5f, 0x73,
- 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
- 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x75,
+ 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x10, 0x12, 0x12, 0x0c, 0x0a,
+ 0x08, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x10, 0x13, 0x12, 0x11, 0x0a, 0x0d, 0x52,
+ 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x10, 0x14, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x44, 0x45, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x10, 0x15, 0x12, 0x13,
+ 0x0a, 0x0f, 0x44, 0x45, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x52, 0x45, 0x53,
+ 0x50, 0x10, 0x16, 0x22, 0x8d, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x28, 0x0a, 0x07,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e,
+ 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x6d, 0x73, 0x67, 0x5f, 0x62,
+ 0x6f, 0x64, 0x79, 0x22, 0x95, 0x04, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75,
+ 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x3f, 0x0a,
+ 0x10, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64,
+ 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65,
+ 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x4d, 0x48, 0x00, 0x52, 0x0e,
+ 0x67, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x6d, 0x12, 0x38,
+ 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x73, 0x65, 0x74, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x03, 0x73, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x03, 0x61, 0x64, 0x64, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x41, 0x64, 0x64, 0x48, 0x00, 0x52,
+ 0x03, 0x61, 0x64, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75,
+ 0x73, 0x70, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x79, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x51, 0x0a, 0x16,
+ 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x75,
0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x18, 0x67,
- 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x5f,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x53, 0x75,
+ 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12,
+ 0x2b, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0d, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72,
+ 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x0a,
+ 0x64, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
+ 0x72, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x42,
+ 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa6, 0x05, 0x0a, 0x08,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x67, 0x65, 0x74, 0x5f,
+ 0x72, 0x65, 0x73, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75, 0x73, 0x70,
+ 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x07, 0x67, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f,
+ 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x6d, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70,
+ 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x4d, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x12, 0x67,
+ 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x6d, 0x52, 0x65, 0x73,
+ 0x70, 0x12, 0x45, 0x0a, 0x12, 0x67, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x10, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x29, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x5f,
+ 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75, 0x73, 0x70,
+ 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x07, 0x73, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x12, 0x29, 0x0a, 0x08, 0x61, 0x64, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x41, 0x64, 0x64, 0x52,
+ 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x07, 0x61, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x12, 0x32,
+ 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x12, 0x35, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65,
+ 0x73, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x32, 0x0a, 0x0b, 0x6e, 0x6f, 0x74,
+ 0x69, 0x66, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f,
+ 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x48,
+ 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12, 0x5e, 0x0a,
+ 0x1b, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70,
+ 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x73,
+ 0x70, 0x48, 0x00, 0x52, 0x18, 0x67, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65,
+ 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x12, 0x38, 0x0a,
+ 0x0d, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73,
+ 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x69, 0x73,
+ 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3e, 0x0a, 0x0f, 0x64, 0x65, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x13, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
+ 0x72, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x64, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73,
+ 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x5f,
0x74, 0x79, 0x70, 0x65, 0x22, 0xd2, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19,
0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07,
0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72,
@@ -5539,25 +6343,120 @@ var file_usp_msg_1_2_proto_rawDesc = []byte{
0x6e, 0x22, 0x35, 0x0a, 0x0a, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x12,
0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x42, 0x0b, 0x5a, 0x09, 0x2e, 0x2f, 0x75, 0x73,
- 0x70, 0x2d, 0x6d, 0x73, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x94, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70,
+ 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x3b, 0x0a, 0x09, 0x72, 0x65,
+ 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
+ 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x72,
+ 0x65, 0x67, 0x50, 0x61, 0x74, 0x68, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73,
+ 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22,
+ 0x8b, 0x05, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70,
+ 0x12, 0x5e, 0x0a, 0x17, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x26, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72,
+ 0x52, 0x65, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50,
+ 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x15, 0x72, 0x65, 0x67, 0x69, 0x73,
+ 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73,
+ 0x1a, 0x9a, 0x04, 0x0a, 0x14, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50,
+ 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68,
+ 0x12, 0x57, 0x0a, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
+ 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0a, 0x6f,
+ 0x70, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x81, 0x03, 0x0a, 0x0f, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x6c, 0x0a,
+ 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65,
+ 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x48, 0x00, 0x52, 0x0b,
+ 0x6f, 0x70, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x6c, 0x0a, 0x0c, 0x6f,
+ 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x47, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72,
+ 0x52, 0x65, 0x73, 0x70, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50,
+ 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x70,
+ 0x65, 0x72, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x1a, 0x46, 0x0a, 0x10, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x19, 0x0a,
+ 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x52,
+ 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f,
+ 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73,
+ 0x67, 0x1a, 0x3b, 0x0a, 0x10, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
+ 0x72, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
+ 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x42, 0x0d,
+ 0x0a, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x22, 0x0a,
+ 0x0a, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70,
+ 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68,
+ 0x73, 0x22, 0xa7, 0x05, 0x0a, 0x0e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72,
+ 0x52, 0x65, 0x73, 0x70, 0x12, 0x66, 0x0a, 0x19, 0x64, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74,
+ 0x65, 0x72, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65,
+ 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x52, 0x17, 0x64, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65,
+ 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0xac, 0x04, 0x0a,
+ 0x16, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74,
+ 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x12, 0x5b,
+ 0x0a, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69,
+ 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73,
+ 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e,
+ 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x8d, 0x03, 0x0a, 0x0f,
+ 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x70, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72, 0x65,
+ 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72, 0x65, 0x67,
+ 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75,
+ 0x72, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
+ 0x65, 0x12, 0x70, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x75, 0x73, 0x70, 0x2e, 0x44, 0x65,
+ 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x44, 0x65, 0x72,
+ 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x53, 0x75, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x1a, 0x46, 0x0a, 0x10, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63,
+ 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f,
+ 0x64, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x3f, 0x0a, 0x10, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12,
+ 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x72, 0x65,
+ 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x50, 0x61, 0x74, 0x68, 0x42, 0x0d, 0x0a, 0x0b,
+ 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0b, 0x5a, 0x09, 0x2e,
+ 0x2f, 0x75, 0x73, 0x70, 0x2d, 0x6d, 0x73, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
- file_usp_msg_1_2_proto_rawDescOnce sync.Once
- file_usp_msg_1_2_proto_rawDescData = file_usp_msg_1_2_proto_rawDesc
+ file_usp_msg_1_3_proto_rawDescOnce sync.Once
+ file_usp_msg_1_3_proto_rawDescData = file_usp_msg_1_3_proto_rawDesc
)
-func file_usp_msg_1_2_proto_rawDescGZIP() []byte {
- file_usp_msg_1_2_proto_rawDescOnce.Do(func() {
- file_usp_msg_1_2_proto_rawDescData = protoimpl.X.CompressGZIP(file_usp_msg_1_2_proto_rawDescData)
+func file_usp_msg_1_3_proto_rawDescGZIP() []byte {
+ file_usp_msg_1_3_proto_rawDescOnce.Do(func() {
+ file_usp_msg_1_3_proto_rawDescData = protoimpl.X.CompressGZIP(file_usp_msg_1_3_proto_rawDescData)
})
- return file_usp_msg_1_2_proto_rawDescData
+ return file_usp_msg_1_3_proto_rawDescData
}
-var file_usp_msg_1_2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
-var file_usp_msg_1_2_proto_msgTypes = make([]protoimpl.MessageInfo, 75)
-var file_usp_msg_1_2_proto_goTypes = []interface{}{
+var file_usp_msg_1_3_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_usp_msg_1_3_proto_msgTypes = make([]protoimpl.MessageInfo, 88)
+var file_usp_msg_1_3_proto_goTypes = []interface{}{
(Header_MsgType)(0), // 0: usp.Header.MsgType
(GetSupportedDMResp_ParamAccessType)(0), // 1: usp.GetSupportedDMResp.ParamAccessType
(GetSupportedDMResp_ObjAccessType)(0), // 2: usp.GetSupportedDMResp.ObjAccessType
@@ -5588,59 +6487,72 @@ var file_usp_msg_1_2_proto_goTypes = []interface{}{
(*OperateResp)(nil), // 27: usp.OperateResp
(*Notify)(nil), // 28: usp.Notify
(*NotifyResp)(nil), // 29: usp.NotifyResp
- (*Error_ParamError)(nil), // 30: usp.Error.ParamError
- (*GetResp_RequestedPathResult)(nil), // 31: usp.GetResp.RequestedPathResult
- (*GetResp_ResolvedPathResult)(nil), // 32: usp.GetResp.ResolvedPathResult
- nil, // 33: usp.GetResp.ResolvedPathResult.ResultParamsEntry
- (*GetSupportedDMResp_RequestedObjectResult)(nil), // 34: usp.GetSupportedDMResp.RequestedObjectResult
- (*GetSupportedDMResp_SupportedObjectResult)(nil), // 35: usp.GetSupportedDMResp.SupportedObjectResult
- (*GetSupportedDMResp_SupportedParamResult)(nil), // 36: usp.GetSupportedDMResp.SupportedParamResult
- (*GetSupportedDMResp_SupportedCommandResult)(nil), // 37: usp.GetSupportedDMResp.SupportedCommandResult
- (*GetSupportedDMResp_SupportedEventResult)(nil), // 38: usp.GetSupportedDMResp.SupportedEventResult
- (*GetInstancesResp_RequestedPathResult)(nil), // 39: usp.GetInstancesResp.RequestedPathResult
- (*GetInstancesResp_CurrInstance)(nil), // 40: usp.GetInstancesResp.CurrInstance
- nil, // 41: usp.GetInstancesResp.CurrInstance.UniqueKeysEntry
- (*Add_CreateObject)(nil), // 42: usp.Add.CreateObject
- (*Add_CreateParamSetting)(nil), // 43: usp.Add.CreateParamSetting
- (*AddResp_CreatedObjectResult)(nil), // 44: usp.AddResp.CreatedObjectResult
- (*AddResp_ParameterError)(nil), // 45: usp.AddResp.ParameterError
- (*AddResp_CreatedObjectResult_OperationStatus)(nil), // 46: usp.AddResp.CreatedObjectResult.OperationStatus
- (*AddResp_CreatedObjectResult_OperationStatus_OperationFailure)(nil), // 47: usp.AddResp.CreatedObjectResult.OperationStatus.OperationFailure
- (*AddResp_CreatedObjectResult_OperationStatus_OperationSuccess)(nil), // 48: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess
- nil, // 49: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.UniqueKeysEntry
- (*DeleteResp_DeletedObjectResult)(nil), // 50: usp.DeleteResp.DeletedObjectResult
- (*DeleteResp_UnaffectedPathError)(nil), // 51: usp.DeleteResp.UnaffectedPathError
- (*DeleteResp_DeletedObjectResult_OperationStatus)(nil), // 52: usp.DeleteResp.DeletedObjectResult.OperationStatus
- (*DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure)(nil), // 53: usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationFailure
- (*DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess)(nil), // 54: usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationSuccess
- (*Set_UpdateObject)(nil), // 55: usp.Set.UpdateObject
- (*Set_UpdateParamSetting)(nil), // 56: usp.Set.UpdateParamSetting
- (*SetResp_UpdatedObjectResult)(nil), // 57: usp.SetResp.UpdatedObjectResult
- (*SetResp_UpdatedInstanceFailure)(nil), // 58: usp.SetResp.UpdatedInstanceFailure
- (*SetResp_UpdatedInstanceResult)(nil), // 59: usp.SetResp.UpdatedInstanceResult
- (*SetResp_ParameterError)(nil), // 60: usp.SetResp.ParameterError
- (*SetResp_UpdatedObjectResult_OperationStatus)(nil), // 61: usp.SetResp.UpdatedObjectResult.OperationStatus
- (*SetResp_UpdatedObjectResult_OperationStatus_OperationFailure)(nil), // 62: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationFailure
- (*SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess)(nil), // 63: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationSuccess
- nil, // 64: usp.SetResp.UpdatedInstanceResult.UpdatedParamsEntry
- nil, // 65: usp.Operate.InputArgsEntry
- (*OperateResp_OperationResult)(nil), // 66: usp.OperateResp.OperationResult
- (*OperateResp_OperationResult_OutputArgs)(nil), // 67: usp.OperateResp.OperationResult.OutputArgs
- (*OperateResp_OperationResult_CommandFailure)(nil), // 68: usp.OperateResp.OperationResult.CommandFailure
- nil, // 69: usp.OperateResp.OperationResult.OutputArgs.OutputArgsEntry
- (*Notify_Event)(nil), // 70: usp.Notify.Event
- (*Notify_ValueChange)(nil), // 71: usp.Notify.ValueChange
- (*Notify_ObjectCreation)(nil), // 72: usp.Notify.ObjectCreation
- (*Notify_ObjectDeletion)(nil), // 73: usp.Notify.ObjectDeletion
- (*Notify_OperationComplete)(nil), // 74: usp.Notify.OperationComplete
- (*Notify_OnBoardRequest)(nil), // 75: usp.Notify.OnBoardRequest
- nil, // 76: usp.Notify.Event.ParamsEntry
- nil, // 77: usp.Notify.ObjectCreation.UniqueKeysEntry
- (*Notify_OperationComplete_OutputArgs)(nil), // 78: usp.Notify.OperationComplete.OutputArgs
- (*Notify_OperationComplete_CommandFailure)(nil), // 79: usp.Notify.OperationComplete.CommandFailure
- nil, // 80: usp.Notify.OperationComplete.OutputArgs.OutputArgsEntry
+ (*Register)(nil), // 30: usp.Register
+ (*RegisterResp)(nil), // 31: usp.RegisterResp
+ (*Deregister)(nil), // 32: usp.Deregister
+ (*DeregisterResp)(nil), // 33: usp.DeregisterResp
+ (*Error_ParamError)(nil), // 34: usp.Error.ParamError
+ (*GetResp_RequestedPathResult)(nil), // 35: usp.GetResp.RequestedPathResult
+ (*GetResp_ResolvedPathResult)(nil), // 36: usp.GetResp.ResolvedPathResult
+ nil, // 37: usp.GetResp.ResolvedPathResult.ResultParamsEntry
+ (*GetSupportedDMResp_RequestedObjectResult)(nil), // 38: usp.GetSupportedDMResp.RequestedObjectResult
+ (*GetSupportedDMResp_SupportedObjectResult)(nil), // 39: usp.GetSupportedDMResp.SupportedObjectResult
+ (*GetSupportedDMResp_SupportedParamResult)(nil), // 40: usp.GetSupportedDMResp.SupportedParamResult
+ (*GetSupportedDMResp_SupportedCommandResult)(nil), // 41: usp.GetSupportedDMResp.SupportedCommandResult
+ (*GetSupportedDMResp_SupportedEventResult)(nil), // 42: usp.GetSupportedDMResp.SupportedEventResult
+ (*GetInstancesResp_RequestedPathResult)(nil), // 43: usp.GetInstancesResp.RequestedPathResult
+ (*GetInstancesResp_CurrInstance)(nil), // 44: usp.GetInstancesResp.CurrInstance
+ nil, // 45: usp.GetInstancesResp.CurrInstance.UniqueKeysEntry
+ (*Add_CreateObject)(nil), // 46: usp.Add.CreateObject
+ (*Add_CreateParamSetting)(nil), // 47: usp.Add.CreateParamSetting
+ (*AddResp_CreatedObjectResult)(nil), // 48: usp.AddResp.CreatedObjectResult
+ (*AddResp_ParameterError)(nil), // 49: usp.AddResp.ParameterError
+ (*AddResp_CreatedObjectResult_OperationStatus)(nil), // 50: usp.AddResp.CreatedObjectResult.OperationStatus
+ (*AddResp_CreatedObjectResult_OperationStatus_OperationFailure)(nil), // 51: usp.AddResp.CreatedObjectResult.OperationStatus.OperationFailure
+ (*AddResp_CreatedObjectResult_OperationStatus_OperationSuccess)(nil), // 52: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess
+ nil, // 53: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.UniqueKeysEntry
+ (*DeleteResp_DeletedObjectResult)(nil), // 54: usp.DeleteResp.DeletedObjectResult
+ (*DeleteResp_UnaffectedPathError)(nil), // 55: usp.DeleteResp.UnaffectedPathError
+ (*DeleteResp_DeletedObjectResult_OperationStatus)(nil), // 56: usp.DeleteResp.DeletedObjectResult.OperationStatus
+ (*DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure)(nil), // 57: usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationFailure
+ (*DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess)(nil), // 58: usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationSuccess
+ (*Set_UpdateObject)(nil), // 59: usp.Set.UpdateObject
+ (*Set_UpdateParamSetting)(nil), // 60: usp.Set.UpdateParamSetting
+ (*SetResp_UpdatedObjectResult)(nil), // 61: usp.SetResp.UpdatedObjectResult
+ (*SetResp_UpdatedInstanceFailure)(nil), // 62: usp.SetResp.UpdatedInstanceFailure
+ (*SetResp_UpdatedInstanceResult)(nil), // 63: usp.SetResp.UpdatedInstanceResult
+ (*SetResp_ParameterError)(nil), // 64: usp.SetResp.ParameterError
+ (*SetResp_UpdatedObjectResult_OperationStatus)(nil), // 65: usp.SetResp.UpdatedObjectResult.OperationStatus
+ (*SetResp_UpdatedObjectResult_OperationStatus_OperationFailure)(nil), // 66: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationFailure
+ (*SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess)(nil), // 67: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationSuccess
+ nil, // 68: usp.SetResp.UpdatedInstanceResult.UpdatedParamsEntry
+ nil, // 69: usp.Operate.InputArgsEntry
+ (*OperateResp_OperationResult)(nil), // 70: usp.OperateResp.OperationResult
+ (*OperateResp_OperationResult_OutputArgs)(nil), // 71: usp.OperateResp.OperationResult.OutputArgs
+ (*OperateResp_OperationResult_CommandFailure)(nil), // 72: usp.OperateResp.OperationResult.CommandFailure
+ nil, // 73: usp.OperateResp.OperationResult.OutputArgs.OutputArgsEntry
+ (*Notify_Event)(nil), // 74: usp.Notify.Event
+ (*Notify_ValueChange)(nil), // 75: usp.Notify.ValueChange
+ (*Notify_ObjectCreation)(nil), // 76: usp.Notify.ObjectCreation
+ (*Notify_ObjectDeletion)(nil), // 77: usp.Notify.ObjectDeletion
+ (*Notify_OperationComplete)(nil), // 78: usp.Notify.OperationComplete
+ (*Notify_OnBoardRequest)(nil), // 79: usp.Notify.OnBoardRequest
+ nil, // 80: usp.Notify.Event.ParamsEntry
+ nil, // 81: usp.Notify.ObjectCreation.UniqueKeysEntry
+ (*Notify_OperationComplete_OutputArgs)(nil), // 82: usp.Notify.OperationComplete.OutputArgs
+ (*Notify_OperationComplete_CommandFailure)(nil), // 83: usp.Notify.OperationComplete.CommandFailure
+ nil, // 84: usp.Notify.OperationComplete.OutputArgs.OutputArgsEntry
+ (*Register_RegistrationPath)(nil), // 85: usp.Register.RegistrationPath
+ (*RegisterResp_RegisteredPathResult)(nil), // 86: usp.RegisterResp.RegisteredPathResult
+ (*RegisterResp_RegisteredPathResult_OperationStatus)(nil), // 87: usp.RegisterResp.RegisteredPathResult.OperationStatus
+ (*RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure)(nil), // 88: usp.RegisterResp.RegisteredPathResult.OperationStatus.OperationFailure
+ (*RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess)(nil), // 89: usp.RegisterResp.RegisteredPathResult.OperationStatus.OperationSuccess
+ (*DeregisterResp_DeregisteredPathResult)(nil), // 90: usp.DeregisterResp.DeregisteredPathResult
+ (*DeregisterResp_DeregisteredPathResult_OperationStatus)(nil), // 91: usp.DeregisterResp.DeregisteredPathResult.OperationStatus
+ (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure)(nil), // 92: usp.DeregisterResp.DeregisteredPathResult.OperationStatus.OperationFailure
+ (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess)(nil), // 93: usp.DeregisterResp.DeregisteredPathResult.OperationStatus.OperationSuccess
}
-var file_usp_msg_1_2_proto_depIdxs = []int32{
+var file_usp_msg_1_3_proto_depIdxs = []int32{
7, // 0: usp.Msg.header:type_name -> usp.Header
8, // 1: usp.Msg.body:type_name -> usp.Body
0, // 2: usp.Header.msg_type:type_name -> usp.Header.MsgType
@@ -5656,86 +6568,99 @@ var file_usp_msg_1_2_proto_depIdxs = []int32{
26, // 12: usp.Request.operate:type_name -> usp.Operate
28, // 13: usp.Request.notify:type_name -> usp.Notify
18, // 14: usp.Request.get_supported_protocol:type_name -> usp.GetSupportedProtocol
- 13, // 15: usp.Response.get_resp:type_name -> usp.GetResp
- 15, // 16: usp.Response.get_supported_dm_resp:type_name -> usp.GetSupportedDMResp
- 17, // 17: usp.Response.get_instances_resp:type_name -> usp.GetInstancesResp
- 25, // 18: usp.Response.set_resp:type_name -> usp.SetResp
- 21, // 19: usp.Response.add_resp:type_name -> usp.AddResp
- 23, // 20: usp.Response.delete_resp:type_name -> usp.DeleteResp
- 27, // 21: usp.Response.operate_resp:type_name -> usp.OperateResp
- 29, // 22: usp.Response.notify_resp:type_name -> usp.NotifyResp
- 19, // 23: usp.Response.get_supported_protocol_resp:type_name -> usp.GetSupportedProtocolResp
- 30, // 24: usp.Error.param_errs:type_name -> usp.Error.ParamError
- 31, // 25: usp.GetResp.req_path_results:type_name -> usp.GetResp.RequestedPathResult
- 34, // 26: usp.GetSupportedDMResp.req_obj_results:type_name -> usp.GetSupportedDMResp.RequestedObjectResult
- 39, // 27: usp.GetInstancesResp.req_path_results:type_name -> usp.GetInstancesResp.RequestedPathResult
- 42, // 28: usp.Add.create_objs:type_name -> usp.Add.CreateObject
- 44, // 29: usp.AddResp.created_obj_results:type_name -> usp.AddResp.CreatedObjectResult
- 50, // 30: usp.DeleteResp.deleted_obj_results:type_name -> usp.DeleteResp.DeletedObjectResult
- 55, // 31: usp.Set.update_objs:type_name -> usp.Set.UpdateObject
- 57, // 32: usp.SetResp.updated_obj_results:type_name -> usp.SetResp.UpdatedObjectResult
- 65, // 33: usp.Operate.input_args:type_name -> usp.Operate.InputArgsEntry
- 66, // 34: usp.OperateResp.operation_results:type_name -> usp.OperateResp.OperationResult
- 70, // 35: usp.Notify.event:type_name -> usp.Notify.Event
- 71, // 36: usp.Notify.value_change:type_name -> usp.Notify.ValueChange
- 72, // 37: usp.Notify.obj_creation:type_name -> usp.Notify.ObjectCreation
- 73, // 38: usp.Notify.obj_deletion:type_name -> usp.Notify.ObjectDeletion
- 74, // 39: usp.Notify.oper_complete:type_name -> usp.Notify.OperationComplete
- 75, // 40: usp.Notify.on_board_req:type_name -> usp.Notify.OnBoardRequest
- 32, // 41: usp.GetResp.RequestedPathResult.resolved_path_results:type_name -> usp.GetResp.ResolvedPathResult
- 33, // 42: usp.GetResp.ResolvedPathResult.result_params:type_name -> usp.GetResp.ResolvedPathResult.ResultParamsEntry
- 35, // 43: usp.GetSupportedDMResp.RequestedObjectResult.supported_objs:type_name -> usp.GetSupportedDMResp.SupportedObjectResult
- 2, // 44: usp.GetSupportedDMResp.SupportedObjectResult.access:type_name -> usp.GetSupportedDMResp.ObjAccessType
- 37, // 45: usp.GetSupportedDMResp.SupportedObjectResult.supported_commands:type_name -> usp.GetSupportedDMResp.SupportedCommandResult
- 38, // 46: usp.GetSupportedDMResp.SupportedObjectResult.supported_events:type_name -> usp.GetSupportedDMResp.SupportedEventResult
- 36, // 47: usp.GetSupportedDMResp.SupportedObjectResult.supported_params:type_name -> usp.GetSupportedDMResp.SupportedParamResult
- 1, // 48: usp.GetSupportedDMResp.SupportedParamResult.access:type_name -> usp.GetSupportedDMResp.ParamAccessType
- 3, // 49: usp.GetSupportedDMResp.SupportedParamResult.value_type:type_name -> usp.GetSupportedDMResp.ParamValueType
- 4, // 50: usp.GetSupportedDMResp.SupportedParamResult.value_change:type_name -> usp.GetSupportedDMResp.ValueChangeType
- 5, // 51: usp.GetSupportedDMResp.SupportedCommandResult.command_type:type_name -> usp.GetSupportedDMResp.CmdType
- 40, // 52: usp.GetInstancesResp.RequestedPathResult.curr_insts:type_name -> usp.GetInstancesResp.CurrInstance
- 41, // 53: usp.GetInstancesResp.CurrInstance.unique_keys:type_name -> usp.GetInstancesResp.CurrInstance.UniqueKeysEntry
- 43, // 54: usp.Add.CreateObject.param_settings:type_name -> usp.Add.CreateParamSetting
- 46, // 55: usp.AddResp.CreatedObjectResult.oper_status:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus
- 47, // 56: usp.AddResp.CreatedObjectResult.OperationStatus.oper_failure:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus.OperationFailure
- 48, // 57: usp.AddResp.CreatedObjectResult.OperationStatus.oper_success:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess
- 45, // 58: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.param_errs:type_name -> usp.AddResp.ParameterError
- 49, // 59: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.unique_keys:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.UniqueKeysEntry
- 52, // 60: usp.DeleteResp.DeletedObjectResult.oper_status:type_name -> usp.DeleteResp.DeletedObjectResult.OperationStatus
- 53, // 61: usp.DeleteResp.DeletedObjectResult.OperationStatus.oper_failure:type_name -> usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationFailure
- 54, // 62: usp.DeleteResp.DeletedObjectResult.OperationStatus.oper_success:type_name -> usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationSuccess
- 51, // 63: usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationSuccess.unaffected_path_errs:type_name -> usp.DeleteResp.UnaffectedPathError
- 56, // 64: usp.Set.UpdateObject.param_settings:type_name -> usp.Set.UpdateParamSetting
- 61, // 65: usp.SetResp.UpdatedObjectResult.oper_status:type_name -> usp.SetResp.UpdatedObjectResult.OperationStatus
- 60, // 66: usp.SetResp.UpdatedInstanceFailure.param_errs:type_name -> usp.SetResp.ParameterError
- 60, // 67: usp.SetResp.UpdatedInstanceResult.param_errs:type_name -> usp.SetResp.ParameterError
- 64, // 68: usp.SetResp.UpdatedInstanceResult.updated_params:type_name -> usp.SetResp.UpdatedInstanceResult.UpdatedParamsEntry
- 62, // 69: usp.SetResp.UpdatedObjectResult.OperationStatus.oper_failure:type_name -> usp.SetResp.UpdatedObjectResult.OperationStatus.OperationFailure
- 63, // 70: usp.SetResp.UpdatedObjectResult.OperationStatus.oper_success:type_name -> usp.SetResp.UpdatedObjectResult.OperationStatus.OperationSuccess
- 58, // 71: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationFailure.updated_inst_failures:type_name -> usp.SetResp.UpdatedInstanceFailure
- 59, // 72: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationSuccess.updated_inst_results:type_name -> usp.SetResp.UpdatedInstanceResult
- 67, // 73: usp.OperateResp.OperationResult.req_output_args:type_name -> usp.OperateResp.OperationResult.OutputArgs
- 68, // 74: usp.OperateResp.OperationResult.cmd_failure:type_name -> usp.OperateResp.OperationResult.CommandFailure
- 69, // 75: usp.OperateResp.OperationResult.OutputArgs.output_args:type_name -> usp.OperateResp.OperationResult.OutputArgs.OutputArgsEntry
- 76, // 76: usp.Notify.Event.params:type_name -> usp.Notify.Event.ParamsEntry
- 77, // 77: usp.Notify.ObjectCreation.unique_keys:type_name -> usp.Notify.ObjectCreation.UniqueKeysEntry
- 78, // 78: usp.Notify.OperationComplete.req_output_args:type_name -> usp.Notify.OperationComplete.OutputArgs
- 79, // 79: usp.Notify.OperationComplete.cmd_failure:type_name -> usp.Notify.OperationComplete.CommandFailure
- 80, // 80: usp.Notify.OperationComplete.OutputArgs.output_args:type_name -> usp.Notify.OperationComplete.OutputArgs.OutputArgsEntry
- 81, // [81:81] is the sub-list for method output_type
- 81, // [81:81] is the sub-list for method input_type
- 81, // [81:81] is the sub-list for extension type_name
- 81, // [81:81] is the sub-list for extension extendee
- 0, // [0:81] is the sub-list for field type_name
+ 30, // 15: usp.Request.register:type_name -> usp.Register
+ 32, // 16: usp.Request.deregister:type_name -> usp.Deregister
+ 13, // 17: usp.Response.get_resp:type_name -> usp.GetResp
+ 15, // 18: usp.Response.get_supported_dm_resp:type_name -> usp.GetSupportedDMResp
+ 17, // 19: usp.Response.get_instances_resp:type_name -> usp.GetInstancesResp
+ 25, // 20: usp.Response.set_resp:type_name -> usp.SetResp
+ 21, // 21: usp.Response.add_resp:type_name -> usp.AddResp
+ 23, // 22: usp.Response.delete_resp:type_name -> usp.DeleteResp
+ 27, // 23: usp.Response.operate_resp:type_name -> usp.OperateResp
+ 29, // 24: usp.Response.notify_resp:type_name -> usp.NotifyResp
+ 19, // 25: usp.Response.get_supported_protocol_resp:type_name -> usp.GetSupportedProtocolResp
+ 31, // 26: usp.Response.register_resp:type_name -> usp.RegisterResp
+ 33, // 27: usp.Response.deregister_resp:type_name -> usp.DeregisterResp
+ 34, // 28: usp.Error.param_errs:type_name -> usp.Error.ParamError
+ 35, // 29: usp.GetResp.req_path_results:type_name -> usp.GetResp.RequestedPathResult
+ 38, // 30: usp.GetSupportedDMResp.req_obj_results:type_name -> usp.GetSupportedDMResp.RequestedObjectResult
+ 43, // 31: usp.GetInstancesResp.req_path_results:type_name -> usp.GetInstancesResp.RequestedPathResult
+ 46, // 32: usp.Add.create_objs:type_name -> usp.Add.CreateObject
+ 48, // 33: usp.AddResp.created_obj_results:type_name -> usp.AddResp.CreatedObjectResult
+ 54, // 34: usp.DeleteResp.deleted_obj_results:type_name -> usp.DeleteResp.DeletedObjectResult
+ 59, // 35: usp.Set.update_objs:type_name -> usp.Set.UpdateObject
+ 61, // 36: usp.SetResp.updated_obj_results:type_name -> usp.SetResp.UpdatedObjectResult
+ 69, // 37: usp.Operate.input_args:type_name -> usp.Operate.InputArgsEntry
+ 70, // 38: usp.OperateResp.operation_results:type_name -> usp.OperateResp.OperationResult
+ 74, // 39: usp.Notify.event:type_name -> usp.Notify.Event
+ 75, // 40: usp.Notify.value_change:type_name -> usp.Notify.ValueChange
+ 76, // 41: usp.Notify.obj_creation:type_name -> usp.Notify.ObjectCreation
+ 77, // 42: usp.Notify.obj_deletion:type_name -> usp.Notify.ObjectDeletion
+ 78, // 43: usp.Notify.oper_complete:type_name -> usp.Notify.OperationComplete
+ 79, // 44: usp.Notify.on_board_req:type_name -> usp.Notify.OnBoardRequest
+ 85, // 45: usp.Register.reg_paths:type_name -> usp.Register.RegistrationPath
+ 86, // 46: usp.RegisterResp.registered_path_results:type_name -> usp.RegisterResp.RegisteredPathResult
+ 90, // 47: usp.DeregisterResp.deregistered_path_results:type_name -> usp.DeregisterResp.DeregisteredPathResult
+ 36, // 48: usp.GetResp.RequestedPathResult.resolved_path_results:type_name -> usp.GetResp.ResolvedPathResult
+ 37, // 49: usp.GetResp.ResolvedPathResult.result_params:type_name -> usp.GetResp.ResolvedPathResult.ResultParamsEntry
+ 39, // 50: usp.GetSupportedDMResp.RequestedObjectResult.supported_objs:type_name -> usp.GetSupportedDMResp.SupportedObjectResult
+ 2, // 51: usp.GetSupportedDMResp.SupportedObjectResult.access:type_name -> usp.GetSupportedDMResp.ObjAccessType
+ 41, // 52: usp.GetSupportedDMResp.SupportedObjectResult.supported_commands:type_name -> usp.GetSupportedDMResp.SupportedCommandResult
+ 42, // 53: usp.GetSupportedDMResp.SupportedObjectResult.supported_events:type_name -> usp.GetSupportedDMResp.SupportedEventResult
+ 40, // 54: usp.GetSupportedDMResp.SupportedObjectResult.supported_params:type_name -> usp.GetSupportedDMResp.SupportedParamResult
+ 1, // 55: usp.GetSupportedDMResp.SupportedParamResult.access:type_name -> usp.GetSupportedDMResp.ParamAccessType
+ 3, // 56: usp.GetSupportedDMResp.SupportedParamResult.value_type:type_name -> usp.GetSupportedDMResp.ParamValueType
+ 4, // 57: usp.GetSupportedDMResp.SupportedParamResult.value_change:type_name -> usp.GetSupportedDMResp.ValueChangeType
+ 5, // 58: usp.GetSupportedDMResp.SupportedCommandResult.command_type:type_name -> usp.GetSupportedDMResp.CmdType
+ 44, // 59: usp.GetInstancesResp.RequestedPathResult.curr_insts:type_name -> usp.GetInstancesResp.CurrInstance
+ 45, // 60: usp.GetInstancesResp.CurrInstance.unique_keys:type_name -> usp.GetInstancesResp.CurrInstance.UniqueKeysEntry
+ 47, // 61: usp.Add.CreateObject.param_settings:type_name -> usp.Add.CreateParamSetting
+ 50, // 62: usp.AddResp.CreatedObjectResult.oper_status:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus
+ 51, // 63: usp.AddResp.CreatedObjectResult.OperationStatus.oper_failure:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus.OperationFailure
+ 52, // 64: usp.AddResp.CreatedObjectResult.OperationStatus.oper_success:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess
+ 49, // 65: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.param_errs:type_name -> usp.AddResp.ParameterError
+ 53, // 66: usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.unique_keys:type_name -> usp.AddResp.CreatedObjectResult.OperationStatus.OperationSuccess.UniqueKeysEntry
+ 56, // 67: usp.DeleteResp.DeletedObjectResult.oper_status:type_name -> usp.DeleteResp.DeletedObjectResult.OperationStatus
+ 57, // 68: usp.DeleteResp.DeletedObjectResult.OperationStatus.oper_failure:type_name -> usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationFailure
+ 58, // 69: usp.DeleteResp.DeletedObjectResult.OperationStatus.oper_success:type_name -> usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationSuccess
+ 55, // 70: usp.DeleteResp.DeletedObjectResult.OperationStatus.OperationSuccess.unaffected_path_errs:type_name -> usp.DeleteResp.UnaffectedPathError
+ 60, // 71: usp.Set.UpdateObject.param_settings:type_name -> usp.Set.UpdateParamSetting
+ 65, // 72: usp.SetResp.UpdatedObjectResult.oper_status:type_name -> usp.SetResp.UpdatedObjectResult.OperationStatus
+ 64, // 73: usp.SetResp.UpdatedInstanceFailure.param_errs:type_name -> usp.SetResp.ParameterError
+ 64, // 74: usp.SetResp.UpdatedInstanceResult.param_errs:type_name -> usp.SetResp.ParameterError
+ 68, // 75: usp.SetResp.UpdatedInstanceResult.updated_params:type_name -> usp.SetResp.UpdatedInstanceResult.UpdatedParamsEntry
+ 66, // 76: usp.SetResp.UpdatedObjectResult.OperationStatus.oper_failure:type_name -> usp.SetResp.UpdatedObjectResult.OperationStatus.OperationFailure
+ 67, // 77: usp.SetResp.UpdatedObjectResult.OperationStatus.oper_success:type_name -> usp.SetResp.UpdatedObjectResult.OperationStatus.OperationSuccess
+ 62, // 78: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationFailure.updated_inst_failures:type_name -> usp.SetResp.UpdatedInstanceFailure
+ 63, // 79: usp.SetResp.UpdatedObjectResult.OperationStatus.OperationSuccess.updated_inst_results:type_name -> usp.SetResp.UpdatedInstanceResult
+ 71, // 80: usp.OperateResp.OperationResult.req_output_args:type_name -> usp.OperateResp.OperationResult.OutputArgs
+ 72, // 81: usp.OperateResp.OperationResult.cmd_failure:type_name -> usp.OperateResp.OperationResult.CommandFailure
+ 73, // 82: usp.OperateResp.OperationResult.OutputArgs.output_args:type_name -> usp.OperateResp.OperationResult.OutputArgs.OutputArgsEntry
+ 80, // 83: usp.Notify.Event.params:type_name -> usp.Notify.Event.ParamsEntry
+ 81, // 84: usp.Notify.ObjectCreation.unique_keys:type_name -> usp.Notify.ObjectCreation.UniqueKeysEntry
+ 82, // 85: usp.Notify.OperationComplete.req_output_args:type_name -> usp.Notify.OperationComplete.OutputArgs
+ 83, // 86: usp.Notify.OperationComplete.cmd_failure:type_name -> usp.Notify.OperationComplete.CommandFailure
+ 84, // 87: usp.Notify.OperationComplete.OutputArgs.output_args:type_name -> usp.Notify.OperationComplete.OutputArgs.OutputArgsEntry
+ 87, // 88: usp.RegisterResp.RegisteredPathResult.oper_status:type_name -> usp.RegisterResp.RegisteredPathResult.OperationStatus
+ 88, // 89: usp.RegisterResp.RegisteredPathResult.OperationStatus.oper_failure:type_name -> usp.RegisterResp.RegisteredPathResult.OperationStatus.OperationFailure
+ 89, // 90: usp.RegisterResp.RegisteredPathResult.OperationStatus.oper_success:type_name -> usp.RegisterResp.RegisteredPathResult.OperationStatus.OperationSuccess
+ 91, // 91: usp.DeregisterResp.DeregisteredPathResult.oper_status:type_name -> usp.DeregisterResp.DeregisteredPathResult.OperationStatus
+ 92, // 92: usp.DeregisterResp.DeregisteredPathResult.OperationStatus.oper_failure:type_name -> usp.DeregisterResp.DeregisteredPathResult.OperationStatus.OperationFailure
+ 93, // 93: usp.DeregisterResp.DeregisteredPathResult.OperationStatus.oper_success:type_name -> usp.DeregisterResp.DeregisteredPathResult.OperationStatus.OperationSuccess
+ 94, // [94:94] is the sub-list for method output_type
+ 94, // [94:94] is the sub-list for method input_type
+ 94, // [94:94] is the sub-list for extension type_name
+ 94, // [94:94] is the sub-list for extension extendee
+ 0, // [0:94] is the sub-list for field type_name
}
-func init() { file_usp_msg_1_2_proto_init() }
-func file_usp_msg_1_2_proto_init() {
- if File_usp_msg_1_2_proto != nil {
+func init() { file_usp_msg_1_3_proto_init() }
+func file_usp_msg_1_3_proto_init() {
+ if File_usp_msg_1_3_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
- file_usp_msg_1_2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Msg); i {
case 0:
return &v.state
@@ -5747,7 +6672,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Header); i {
case 0:
return &v.state
@@ -5759,7 +6684,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Body); i {
case 0:
return &v.state
@@ -5771,7 +6696,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Request); i {
case 0:
return &v.state
@@ -5783,7 +6708,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Response); i {
case 0:
return &v.state
@@ -5795,7 +6720,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Error); i {
case 0:
return &v.state
@@ -5807,7 +6732,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Get); i {
case 0:
return &v.state
@@ -5819,7 +6744,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetResp); i {
case 0:
return &v.state
@@ -5831,7 +6756,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedDM); i {
case 0:
return &v.state
@@ -5843,7 +6768,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedDMResp); i {
case 0:
return &v.state
@@ -5855,7 +6780,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetInstances); i {
case 0:
return &v.state
@@ -5867,7 +6792,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetInstancesResp); i {
case 0:
return &v.state
@@ -5879,7 +6804,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedProtocol); i {
case 0:
return &v.state
@@ -5891,7 +6816,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedProtocolResp); i {
case 0:
return &v.state
@@ -5903,7 +6828,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Add); i {
case 0:
return &v.state
@@ -5915,7 +6840,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AddResp); i {
case 0:
return &v.state
@@ -5927,7 +6852,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Delete); i {
case 0:
return &v.state
@@ -5939,7 +6864,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteResp); i {
case 0:
return &v.state
@@ -5951,7 +6876,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Set); i {
case 0:
return &v.state
@@ -5963,7 +6888,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp); i {
case 0:
return &v.state
@@ -5975,7 +6900,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Operate); i {
case 0:
return &v.state
@@ -5987,7 +6912,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OperateResp); i {
case 0:
return &v.state
@@ -5999,7 +6924,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify); i {
case 0:
return &v.state
@@ -6011,7 +6936,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NotifyResp); i {
case 0:
return &v.state
@@ -6023,7 +6948,55 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Register); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegisterResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Deregister); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeregisterResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Error_ParamError); i {
case 0:
return &v.state
@@ -6035,7 +7008,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetResp_RequestedPathResult); i {
case 0:
return &v.state
@@ -6047,7 +7020,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetResp_ResolvedPathResult); i {
case 0:
return &v.state
@@ -6059,7 +7032,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedDMResp_RequestedObjectResult); i {
case 0:
return &v.state
@@ -6071,7 +7044,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedDMResp_SupportedObjectResult); i {
case 0:
return &v.state
@@ -6083,7 +7056,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedDMResp_SupportedParamResult); i {
case 0:
return &v.state
@@ -6095,7 +7068,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedDMResp_SupportedCommandResult); i {
case 0:
return &v.state
@@ -6107,7 +7080,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetSupportedDMResp_SupportedEventResult); i {
case 0:
return &v.state
@@ -6119,7 +7092,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetInstancesResp_RequestedPathResult); i {
case 0:
return &v.state
@@ -6131,7 +7104,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetInstancesResp_CurrInstance); i {
case 0:
return &v.state
@@ -6143,7 +7116,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Add_CreateObject); i {
case 0:
return &v.state
@@ -6155,7 +7128,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Add_CreateParamSetting); i {
case 0:
return &v.state
@@ -6167,7 +7140,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AddResp_CreatedObjectResult); i {
case 0:
return &v.state
@@ -6179,7 +7152,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AddResp_ParameterError); i {
case 0:
return &v.state
@@ -6191,7 +7164,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AddResp_CreatedObjectResult_OperationStatus); i {
case 0:
return &v.state
@@ -6203,7 +7176,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AddResp_CreatedObjectResult_OperationStatus_OperationFailure); i {
case 0:
return &v.state
@@ -6215,7 +7188,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AddResp_CreatedObjectResult_OperationStatus_OperationSuccess); i {
case 0:
return &v.state
@@ -6227,7 +7200,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteResp_DeletedObjectResult); i {
case 0:
return &v.state
@@ -6239,7 +7212,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteResp_UnaffectedPathError); i {
case 0:
return &v.state
@@ -6251,7 +7224,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteResp_DeletedObjectResult_OperationStatus); i {
case 0:
return &v.state
@@ -6263,7 +7236,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteResp_DeletedObjectResult_OperationStatus_OperationFailure); i {
case 0:
return &v.state
@@ -6275,7 +7248,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DeleteResp_DeletedObjectResult_OperationStatus_OperationSuccess); i {
case 0:
return &v.state
@@ -6287,7 +7260,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Set_UpdateObject); i {
case 0:
return &v.state
@@ -6299,7 +7272,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Set_UpdateParamSetting); i {
case 0:
return &v.state
@@ -6311,7 +7284,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp_UpdatedObjectResult); i {
case 0:
return &v.state
@@ -6323,7 +7296,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp_UpdatedInstanceFailure); i {
case 0:
return &v.state
@@ -6335,7 +7308,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp_UpdatedInstanceResult); i {
case 0:
return &v.state
@@ -6347,7 +7320,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp_ParameterError); i {
case 0:
return &v.state
@@ -6359,7 +7332,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp_UpdatedObjectResult_OperationStatus); i {
case 0:
return &v.state
@@ -6371,7 +7344,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp_UpdatedObjectResult_OperationStatus_OperationFailure); i {
case 0:
return &v.state
@@ -6383,7 +7356,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetResp_UpdatedObjectResult_OperationStatus_OperationSuccess); i {
case 0:
return &v.state
@@ -6395,7 +7368,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OperateResp_OperationResult); i {
case 0:
return &v.state
@@ -6407,7 +7380,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OperateResp_OperationResult_OutputArgs); i {
case 0:
return &v.state
@@ -6419,7 +7392,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OperateResp_OperationResult_CommandFailure); i {
case 0:
return &v.state
@@ -6431,7 +7404,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_Event); i {
case 0:
return &v.state
@@ -6443,7 +7416,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_ValueChange); i {
case 0:
return &v.state
@@ -6455,7 +7428,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_ObjectCreation); i {
case 0:
return &v.state
@@ -6467,7 +7440,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_ObjectDeletion); i {
case 0:
return &v.state
@@ -6479,7 +7452,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_OperationComplete); i {
case 0:
return &v.state
@@ -6491,7 +7464,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_OnBoardRequest); i {
case 0:
return &v.state
@@ -6503,7 +7476,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_OperationComplete_OutputArgs); i {
case 0:
return &v.state
@@ -6515,7 +7488,7 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
- file_usp_msg_1_2_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_msg_1_3_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Notify_OperationComplete_CommandFailure); i {
case 0:
return &v.state
@@ -6527,13 +7500,121 @@ func file_usp_msg_1_2_proto_init() {
return nil
}
}
+ file_usp_msg_1_3_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Register_RegistrationPath); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegisterResp_RegisteredPathResult); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegisterResp_RegisteredPathResult_OperationStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegisterResp_RegisteredPathResult_OperationStatus_OperationFailure); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegisterResp_RegisteredPathResult_OperationStatus_OperationSuccess); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeregisterResp_DeregisteredPathResult); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeregisterResp_DeregisteredPathResult_OperationStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationFailure); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_msg_1_3_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DeregisterResp_DeregisteredPathResult_OperationStatus_OperationSuccess); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
- file_usp_msg_1_2_proto_msgTypes[2].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[2].OneofWrappers = []interface{}{
(*Body_Request)(nil),
(*Body_Response)(nil),
(*Body_Error)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[3].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[3].OneofWrappers = []interface{}{
(*Request_Get)(nil),
(*Request_GetSupportedDm)(nil),
(*Request_GetInstances)(nil),
@@ -6543,8 +7624,10 @@ func file_usp_msg_1_2_proto_init() {
(*Request_Operate)(nil),
(*Request_Notify)(nil),
(*Request_GetSupportedProtocol)(nil),
+ (*Request_Register)(nil),
+ (*Request_Deregister)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[4].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[4].OneofWrappers = []interface{}{
(*Response_GetResp)(nil),
(*Response_GetSupportedDmResp)(nil),
(*Response_GetInstancesResp)(nil),
@@ -6554,8 +7637,10 @@ func file_usp_msg_1_2_proto_init() {
(*Response_OperateResp)(nil),
(*Response_NotifyResp)(nil),
(*Response_GetSupportedProtocolResp)(nil),
+ (*Response_RegisterResp)(nil),
+ (*Response_DeregisterResp)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[22].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[22].OneofWrappers = []interface{}{
(*Notify_Event_)(nil),
(*Notify_ValueChange_)(nil),
(*Notify_ObjCreation)(nil),
@@ -6563,44 +7648,52 @@ func file_usp_msg_1_2_proto_init() {
(*Notify_OperComplete)(nil),
(*Notify_OnBoardReq)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[40].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[44].OneofWrappers = []interface{}{
(*AddResp_CreatedObjectResult_OperationStatus_OperFailure)(nil),
(*AddResp_CreatedObjectResult_OperationStatus_OperSuccess)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[46].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[50].OneofWrappers = []interface{}{
(*DeleteResp_DeletedObjectResult_OperationStatus_OperFailure)(nil),
(*DeleteResp_DeletedObjectResult_OperationStatus_OperSuccess)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[55].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[59].OneofWrappers = []interface{}{
(*SetResp_UpdatedObjectResult_OperationStatus_OperFailure)(nil),
(*SetResp_UpdatedObjectResult_OperationStatus_OperSuccess)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[60].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[64].OneofWrappers = []interface{}{
(*OperateResp_OperationResult_ReqObjPath)(nil),
(*OperateResp_OperationResult_ReqOutputArgs)(nil),
(*OperateResp_OperationResult_CmdFailure)(nil),
}
- file_usp_msg_1_2_proto_msgTypes[68].OneofWrappers = []interface{}{
+ file_usp_msg_1_3_proto_msgTypes[72].OneofWrappers = []interface{}{
(*Notify_OperationComplete_ReqOutputArgs)(nil),
(*Notify_OperationComplete_CmdFailure)(nil),
}
+ file_usp_msg_1_3_proto_msgTypes[81].OneofWrappers = []interface{}{
+ (*RegisterResp_RegisteredPathResult_OperationStatus_OperFailure)(nil),
+ (*RegisterResp_RegisteredPathResult_OperationStatus_OperSuccess)(nil),
+ }
+ file_usp_msg_1_3_proto_msgTypes[85].OneofWrappers = []interface{}{
+ (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperFailure)(nil),
+ (*DeregisterResp_DeregisteredPathResult_OperationStatus_OperSuccess)(nil),
+ }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_usp_msg_1_2_proto_rawDesc,
+ RawDescriptor: file_usp_msg_1_3_proto_rawDesc,
NumEnums: 6,
- NumMessages: 75,
+ NumMessages: 88,
NumExtensions: 0,
NumServices: 0,
},
- GoTypes: file_usp_msg_1_2_proto_goTypes,
- DependencyIndexes: file_usp_msg_1_2_proto_depIdxs,
- EnumInfos: file_usp_msg_1_2_proto_enumTypes,
- MessageInfos: file_usp_msg_1_2_proto_msgTypes,
+ GoTypes: file_usp_msg_1_3_proto_goTypes,
+ DependencyIndexes: file_usp_msg_1_3_proto_depIdxs,
+ EnumInfos: file_usp_msg_1_3_proto_enumTypes,
+ MessageInfos: file_usp_msg_1_3_proto_msgTypes,
}.Build()
- File_usp_msg_1_2_proto = out.File
- file_usp_msg_1_2_proto_rawDesc = nil
- file_usp_msg_1_2_proto_goTypes = nil
- file_usp_msg_1_2_proto_depIdxs = nil
+ File_usp_msg_1_3_proto = out.File
+ file_usp_msg_1_3_proto_rawDesc = nil
+ file_usp_msg_1_3_proto_goTypes = nil
+ file_usp_msg_1_3_proto_depIdxs = nil
}
diff --git a/backend/services/mtp/ws-adapter/internal/usp/usp_msg/usp-msg-1-2.proto b/backend/services/controller/internal/usp/usp_msg/usp-msg-1-3.proto
old mode 100755
new mode 100644
similarity index 88%
rename from backend/services/mtp/ws-adapter/internal/usp/usp_msg/usp-msg-1-2.proto
rename to backend/services/controller/internal/usp/usp_msg/usp-msg-1-3.proto
index 7572e7a..8cbfb14
--- a/backend/services/mtp/ws-adapter/internal/usp/usp_msg/usp-msg-1-2.proto
+++ b/backend/services/controller/internal/usp/usp_msg/usp-msg-1-3.proto
@@ -3,7 +3,7 @@ syntax = "proto3";
//**************************************************************************
// TR-369 USP Message Protocol Buffer Schema
//
-// Copyright (c) 2017-2018, Broadband Forum
+// Copyright (c) 2017-2022, Broadband Forum
//
// The undersigned members have elected to grant the copyright to
// their contributed material used in this software:
@@ -47,13 +47,8 @@ syntax = "proto3";
// Any moral rights which are necessary to exercise under the above
// license grant are also deemed granted under this license.
//
-// | Version | Name | Date |
-// | TR-369 1.0.0 | User Services Platform | APR, 2018 |
-// | TR-369 1.0.1 | User Services Platform | JUN, 2018 |
-// | TR-369 1.0.2 | User Services Platform | OCT, 2018 |
-// | TR-369 1.1 | User Services Platform | SEP, 2019 |
-//
-// BBF software release registry: http://www.broadband-forum.org/software
+// BBF software release registry:
+// https://www.broadband-forum.org/software-releases
//**************************************************************************
package usp;
@@ -90,6 +85,10 @@ message Header {
NOTIFY_RESP = 16;
GET_SUPPORTED_PROTO = 17;
GET_SUPPORTED_PROTO_RESP = 18;
+ REGISTER = 19;
+ REGISTER_RESP = 20;
+ DEREGISTER = 21;
+ DEREGISTER_RESP = 22;
}
}
@@ -114,6 +113,8 @@ message Request {
Operate operate = 7;
Notify notify = 8;
GetSupportedProtocol get_supported_protocol = 9;
+ Register register = 10;
+ Deregister deregister = 11;
}
}
@@ -129,6 +130,8 @@ message Response {
OperateResp operate_resp = 7;
NotifyResp notify_resp = 8;
GetSupportedProtocolResp get_supported_protocol_resp = 9;
+ RegisterResp register_resp = 10;
+ DeregisterResp deregister_resp = 11;
}
}
@@ -528,3 +531,66 @@ message Notify {
message NotifyResp {
string subscription_id = 1;
}
+
+message Register {
+ bool allow_partial = 1;
+ repeated RegistrationPath reg_paths = 2;
+
+ message RegistrationPath {
+ string path = 1;
+ }
+}
+
+message RegisterResp {
+ repeated RegisteredPathResult registered_path_results = 1;
+
+ message RegisteredPathResult {
+ string requested_path = 1;
+ OperationStatus oper_status = 2;
+
+ message OperationStatus {
+ oneof oper_status {
+ OperationFailure oper_failure = 1;
+ OperationSuccess oper_success = 2;
+ }
+
+ message OperationFailure {
+ fixed32 err_code = 1;
+ string err_msg = 2;
+ }
+
+ message OperationSuccess {
+ string registered_path = 1;
+ }
+ }
+ }
+}
+
+message Deregister {
+ repeated string paths = 1;
+}
+
+message DeregisterResp {
+ repeated DeregisteredPathResult deregistered_path_results = 1;
+
+ message DeregisteredPathResult {
+ string requested_path = 1;
+ OperationStatus oper_status = 2;
+
+ message OperationStatus {
+ oneof oper_status {
+ OperationFailure oper_failure = 1;
+ OperationSuccess oper_success = 2;
+ }
+
+ message OperationFailure {
+ fixed32 err_code = 1;
+ string err_msg = 2;
+ }
+
+ message OperationSuccess {
+ repeated string deregistered_path = 1;
+ }
+ }
+ }
+}
diff --git a/backend/services/mtp/ws/internal/usp_record/usp-record-1-2.pb.go b/backend/services/controller/internal/usp/usp_record/usp-record-1-3.pb.go
old mode 100755
new mode 100644
similarity index 69%
rename from backend/services/mtp/ws/internal/usp_record/usp-record-1-2.pb.go
rename to backend/services/controller/internal/usp/usp_record/usp-record-1-3.pb.go
index 6d3e41b..b69a7b3
--- a/backend/services/mtp/ws/internal/usp_record/usp-record-1-2.pb.go
+++ b/backend/services/controller/internal/usp/usp_record/usp-record-1-3.pb.go
@@ -1,17 +1,17 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.28.1
+// protoc-gen-go v1.31.0
// protoc v3.21.12
-// source: usp-record-1-2.proto
+// source: usp-record-1-3.proto
//**************************************************************************
// TR-369 USP Record Protocol Buffer Schema
//
-// Copyright (c) 2017-2018, Broadband Forum
+// Copyright (c) 2017-2022, Broadband Forum
//
// The undersigned members have elected to grant the copyright to
// their contributed material used in this software:
-// Copyright (c) 2017-2018 ARRIS Enterprises, LLC.
+// Copyright (c) 2017-2022 ARRIS Enterprises, LLC.
//
// Redistribution and use in source and binary forms, with or
// without modification, are permitted provided that the following
@@ -51,14 +51,8 @@
// Any moral rights which are necessary to exercise under the above
// license grant are also deemed granted under this license.
//
-//
-// | Version | Name | Date |
-// | TR-369 1.0.0 | User Services Platform | APR, 2018 |
-// | TR-369 1.0.1 | User Services Platform | JUN, 2018 |
-// | TR-369 1.0.2 | User Services Platform | OCT, 2018 |
-// | TR-369 1.1 | User Services Platform | SEP, 2019 |
-//
-// BBF software release registry: http://www.broadband-forum.org/software
+// BBF software release registry:
+// https://www.broadband-forum.org/software-releases
//**************************************************************************
package usp_record
@@ -107,11 +101,11 @@ func (x Record_PayloadSecurity) String() string {
}
func (Record_PayloadSecurity) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_record_1_2_proto_enumTypes[0].Descriptor()
+ return file_usp_record_1_3_proto_enumTypes[0].Descriptor()
}
func (Record_PayloadSecurity) Type() protoreflect.EnumType {
- return &file_usp_record_1_2_proto_enumTypes[0]
+ return &file_usp_record_1_3_proto_enumTypes[0]
}
func (x Record_PayloadSecurity) Number() protoreflect.EnumNumber {
@@ -120,7 +114,7 @@ func (x Record_PayloadSecurity) Number() protoreflect.EnumNumber {
// Deprecated: Use Record_PayloadSecurity.Descriptor instead.
func (Record_PayloadSecurity) EnumDescriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{0, 0}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{0, 0}
}
type SessionContextRecord_PayloadSARState int32
@@ -159,11 +153,11 @@ func (x SessionContextRecord_PayloadSARState) String() string {
}
func (SessionContextRecord_PayloadSARState) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_record_1_2_proto_enumTypes[1].Descriptor()
+ return file_usp_record_1_3_proto_enumTypes[1].Descriptor()
}
func (SessionContextRecord_PayloadSARState) Type() protoreflect.EnumType {
- return &file_usp_record_1_2_proto_enumTypes[1]
+ return &file_usp_record_1_3_proto_enumTypes[1]
}
func (x SessionContextRecord_PayloadSARState) Number() protoreflect.EnumNumber {
@@ -172,7 +166,7 @@ func (x SessionContextRecord_PayloadSARState) Number() protoreflect.EnumNumber {
// Deprecated: Use SessionContextRecord_PayloadSARState.Descriptor instead.
func (SessionContextRecord_PayloadSARState) EnumDescriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{2, 0}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{2, 0}
}
type MQTTConnectRecord_MQTTVersion int32
@@ -205,11 +199,11 @@ func (x MQTTConnectRecord_MQTTVersion) String() string {
}
func (MQTTConnectRecord_MQTTVersion) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_record_1_2_proto_enumTypes[2].Descriptor()
+ return file_usp_record_1_3_proto_enumTypes[2].Descriptor()
}
func (MQTTConnectRecord_MQTTVersion) Type() protoreflect.EnumType {
- return &file_usp_record_1_2_proto_enumTypes[2]
+ return &file_usp_record_1_3_proto_enumTypes[2]
}
func (x MQTTConnectRecord_MQTTVersion) Number() protoreflect.EnumNumber {
@@ -218,7 +212,7 @@ func (x MQTTConnectRecord_MQTTVersion) Number() protoreflect.EnumNumber {
// Deprecated: Use MQTTConnectRecord_MQTTVersion.Descriptor instead.
func (MQTTConnectRecord_MQTTVersion) EnumDescriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{4, 0}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{4, 0}
}
type STOMPConnectRecord_STOMPVersion int32
@@ -248,11 +242,11 @@ func (x STOMPConnectRecord_STOMPVersion) String() string {
}
func (STOMPConnectRecord_STOMPVersion) Descriptor() protoreflect.EnumDescriptor {
- return file_usp_record_1_2_proto_enumTypes[3].Descriptor()
+ return file_usp_record_1_3_proto_enumTypes[3].Descriptor()
}
func (STOMPConnectRecord_STOMPVersion) Type() protoreflect.EnumType {
- return &file_usp_record_1_2_proto_enumTypes[3]
+ return &file_usp_record_1_3_proto_enumTypes[3]
}
func (x STOMPConnectRecord_STOMPVersion) Number() protoreflect.EnumNumber {
@@ -261,7 +255,7 @@ func (x STOMPConnectRecord_STOMPVersion) Number() protoreflect.EnumNumber {
// Deprecated: Use STOMPConnectRecord_STOMPVersion.Descriptor instead.
func (STOMPConnectRecord_STOMPVersion) EnumDescriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{5, 0}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{5, 0}
}
type Record struct {
@@ -283,13 +277,14 @@ type Record struct {
// *Record_MqttConnect
// *Record_StompConnect
// *Record_Disconnect
+ // *Record_UdsConnect
RecordType isRecord_RecordType `protobuf_oneof:"record_type"`
}
func (x *Record) Reset() {
*x = Record{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_record_1_2_proto_msgTypes[0]
+ mi := &file_usp_record_1_3_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -302,7 +297,7 @@ func (x *Record) String() string {
func (*Record) ProtoMessage() {}
func (x *Record) ProtoReflect() protoreflect.Message {
- mi := &file_usp_record_1_2_proto_msgTypes[0]
+ mi := &file_usp_record_1_3_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -315,7 +310,7 @@ func (x *Record) ProtoReflect() protoreflect.Message {
// Deprecated: Use Record.ProtoReflect.Descriptor instead.
func (*Record) Descriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{0}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{0}
}
func (x *Record) GetVersion() string {
@@ -409,6 +404,13 @@ func (x *Record) GetDisconnect() *DisconnectRecord {
return nil
}
+func (x *Record) GetUdsConnect() *UDSConnectRecord {
+ if x, ok := x.GetRecordType().(*Record_UdsConnect); ok {
+ return x.UdsConnect
+ }
+ return nil
+}
+
type isRecord_RecordType interface {
isRecord_RecordType()
}
@@ -437,6 +439,10 @@ type Record_Disconnect struct {
Disconnect *DisconnectRecord `protobuf:"bytes,12,opt,name=disconnect,proto3,oneof"`
}
+type Record_UdsConnect struct {
+ UdsConnect *UDSConnectRecord `protobuf:"bytes,13,opt,name=uds_connect,json=udsConnect,proto3,oneof"`
+}
+
func (*Record_NoSessionContext) isRecord_RecordType() {}
func (*Record_SessionContext) isRecord_RecordType() {}
@@ -449,6 +455,8 @@ func (*Record_StompConnect) isRecord_RecordType() {}
func (*Record_Disconnect) isRecord_RecordType() {}
+func (*Record_UdsConnect) isRecord_RecordType() {}
+
type NoSessionContextRecord struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -460,7 +468,7 @@ type NoSessionContextRecord struct {
func (x *NoSessionContextRecord) Reset() {
*x = NoSessionContextRecord{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_record_1_2_proto_msgTypes[1]
+ mi := &file_usp_record_1_3_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -473,7 +481,7 @@ func (x *NoSessionContextRecord) String() string {
func (*NoSessionContextRecord) ProtoMessage() {}
func (x *NoSessionContextRecord) ProtoReflect() protoreflect.Message {
- mi := &file_usp_record_1_2_proto_msgTypes[1]
+ mi := &file_usp_record_1_3_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -486,7 +494,7 @@ func (x *NoSessionContextRecord) ProtoReflect() protoreflect.Message {
// Deprecated: Use NoSessionContextRecord.ProtoReflect.Descriptor instead.
func (*NoSessionContextRecord) Descriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{1}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{1}
}
func (x *NoSessionContextRecord) GetPayload() []byte {
@@ -513,7 +521,7 @@ type SessionContextRecord struct {
func (x *SessionContextRecord) Reset() {
*x = SessionContextRecord{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_record_1_2_proto_msgTypes[2]
+ mi := &file_usp_record_1_3_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -526,7 +534,7 @@ func (x *SessionContextRecord) String() string {
func (*SessionContextRecord) ProtoMessage() {}
func (x *SessionContextRecord) ProtoReflect() protoreflect.Message {
- mi := &file_usp_record_1_2_proto_msgTypes[2]
+ mi := &file_usp_record_1_3_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -539,7 +547,7 @@ func (x *SessionContextRecord) ProtoReflect() protoreflect.Message {
// Deprecated: Use SessionContextRecord.ProtoReflect.Descriptor instead.
func (*SessionContextRecord) Descriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{2}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{2}
}
func (x *SessionContextRecord) GetSessionId() uint64 {
@@ -600,7 +608,7 @@ type WebSocketConnectRecord struct {
func (x *WebSocketConnectRecord) Reset() {
*x = WebSocketConnectRecord{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_record_1_2_proto_msgTypes[3]
+ mi := &file_usp_record_1_3_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -613,7 +621,7 @@ func (x *WebSocketConnectRecord) String() string {
func (*WebSocketConnectRecord) ProtoMessage() {}
func (x *WebSocketConnectRecord) ProtoReflect() protoreflect.Message {
- mi := &file_usp_record_1_2_proto_msgTypes[3]
+ mi := &file_usp_record_1_3_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -626,7 +634,7 @@ func (x *WebSocketConnectRecord) ProtoReflect() protoreflect.Message {
// Deprecated: Use WebSocketConnectRecord.ProtoReflect.Descriptor instead.
func (*WebSocketConnectRecord) Descriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{3}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{3}
}
type MQTTConnectRecord struct {
@@ -641,7 +649,7 @@ type MQTTConnectRecord struct {
func (x *MQTTConnectRecord) Reset() {
*x = MQTTConnectRecord{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_record_1_2_proto_msgTypes[4]
+ mi := &file_usp_record_1_3_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -654,7 +662,7 @@ func (x *MQTTConnectRecord) String() string {
func (*MQTTConnectRecord) ProtoMessage() {}
func (x *MQTTConnectRecord) ProtoReflect() protoreflect.Message {
- mi := &file_usp_record_1_2_proto_msgTypes[4]
+ mi := &file_usp_record_1_3_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -667,7 +675,7 @@ func (x *MQTTConnectRecord) ProtoReflect() protoreflect.Message {
// Deprecated: Use MQTTConnectRecord.ProtoReflect.Descriptor instead.
func (*MQTTConnectRecord) Descriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{4}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{4}
}
func (x *MQTTConnectRecord) GetVersion() MQTTConnectRecord_MQTTVersion {
@@ -696,7 +704,7 @@ type STOMPConnectRecord struct {
func (x *STOMPConnectRecord) Reset() {
*x = STOMPConnectRecord{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_record_1_2_proto_msgTypes[5]
+ mi := &file_usp_record_1_3_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -709,7 +717,7 @@ func (x *STOMPConnectRecord) String() string {
func (*STOMPConnectRecord) ProtoMessage() {}
func (x *STOMPConnectRecord) ProtoReflect() protoreflect.Message {
- mi := &file_usp_record_1_2_proto_msgTypes[5]
+ mi := &file_usp_record_1_3_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -722,7 +730,7 @@ func (x *STOMPConnectRecord) ProtoReflect() protoreflect.Message {
// Deprecated: Use STOMPConnectRecord.ProtoReflect.Descriptor instead.
func (*STOMPConnectRecord) Descriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{5}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{5}
}
func (x *STOMPConnectRecord) GetVersion() STOMPConnectRecord_STOMPVersion {
@@ -739,6 +747,44 @@ func (x *STOMPConnectRecord) GetSubscribedDestination() string {
return ""
}
+type UDSConnectRecord struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *UDSConnectRecord) Reset() {
+ *x = UDSConnectRecord{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_usp_record_1_3_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UDSConnectRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UDSConnectRecord) ProtoMessage() {}
+
+func (x *UDSConnectRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_usp_record_1_3_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UDSConnectRecord.ProtoReflect.Descriptor instead.
+func (*UDSConnectRecord) Descriptor() ([]byte, []int) {
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{6}
+}
+
type DisconnectRecord struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -751,7 +797,7 @@ type DisconnectRecord struct {
func (x *DisconnectRecord) Reset() {
*x = DisconnectRecord{}
if protoimpl.UnsafeEnabled {
- mi := &file_usp_record_1_2_proto_msgTypes[6]
+ mi := &file_usp_record_1_3_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -764,7 +810,7 @@ func (x *DisconnectRecord) String() string {
func (*DisconnectRecord) ProtoMessage() {}
func (x *DisconnectRecord) ProtoReflect() protoreflect.Message {
- mi := &file_usp_record_1_2_proto_msgTypes[6]
+ mi := &file_usp_record_1_3_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -777,7 +823,7 @@ func (x *DisconnectRecord) ProtoReflect() protoreflect.Message {
// Deprecated: Use DisconnectRecord.ProtoReflect.Descriptor instead.
func (*DisconnectRecord) Descriptor() ([]byte, []int) {
- return file_usp_record_1_2_proto_rawDescGZIP(), []int{6}
+ return file_usp_record_1_3_proto_rawDescGZIP(), []int{7}
}
func (x *DisconnectRecord) GetReason() string {
@@ -794,12 +840,12 @@ func (x *DisconnectRecord) GetReasonCode() uint32 {
return 0
}
-var File_usp_record_1_2_proto protoreflect.FileDescriptor
+var File_usp_record_1_3_proto protoreflect.FileDescriptor
-var file_usp_record_1_2_proto_rawDesc = []byte{
- 0x0a, 0x14, 0x75, 0x73, 0x70, 0x2d, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2d, 0x31, 0x2d, 0x32,
+var file_usp_record_1_3_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x75, 0x73, 0x70, 0x2d, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2d, 0x31, 0x2d, 0x33,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f,
- 0x72, 0x64, 0x22, 0xe0, 0x05, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x18, 0x0a,
+ 0x72, 0x64, 0x22, 0xa1, 0x06, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x18, 0x0a,
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x69, 0x64,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x6f, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07,
@@ -841,88 +887,94 @@ var file_usp_record_1_2_proto_rawDesc = []byte{
0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x48, 0x00, 0x52, 0x0a, 0x64,
- 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x50, 0x61, 0x79,
- 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0d, 0x0a, 0x09,
- 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54,
- 0x4c, 0x53, 0x31, 0x32, 0x10, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x32, 0x0a, 0x16, 0x4e, 0x6f, 0x53, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12,
- 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xbd, 0x03, 0x0a, 0x14, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x65, 0x63, 0x6f,
- 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49,
- 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65,
- 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x69,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65,
- 0x64, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69,
- 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72,
- 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x49, 0x64, 0x12, 0x5c, 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c,
- 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x61, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
- 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52,
- 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x41, 0x52,
- 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x61,
- 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x62, 0x0a, 0x14, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
- 0x64, 0x72, 0x65, 0x63, 0x5f, 0x73, 0x61, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06,
+ 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x3f, 0x0a, 0x0b, 0x75, 0x64, 0x73,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x55, 0x44, 0x53, 0x43,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x48, 0x00, 0x52, 0x0a,
+ 0x75, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x22, 0x2b, 0x0a, 0x0f, 0x50, 0x61,
+ 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0d, 0x0a,
+ 0x09, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05,
+ 0x54, 0x4c, 0x53, 0x31, 0x32, 0x10, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x32, 0x0a, 0x16, 0x4e, 0x6f, 0x53, 0x65, 0x73, 0x73,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xbd, 0x03, 0x0a, 0x14, 0x53,
+ 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x65, 0x63,
+ 0x6f, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63,
+ 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
+ 0x65, 0x64, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6d,
+ 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x65, 0x74,
+ 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x49, 0x64, 0x12, 0x5c, 0x0a, 0x11, 0x70, 0x61, 0x79,
+ 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x61, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72,
0x64, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74,
0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x41,
- 0x52, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x12, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x72,
- 0x65, 0x63, 0x53, 0x61, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61,
- 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79,
- 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x43, 0x0a, 0x0f, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53,
- 0x41, 0x52, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10,
- 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
- 0x49, 0x4e, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x43,
- 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x03, 0x22, 0x18, 0x0a, 0x16, 0x57, 0x65, 0x62,
- 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x22, 0xa6, 0x01, 0x0a, 0x11, 0x4d, 0x51, 0x54, 0x54, 0x43, 0x6f, 0x6e, 0x6e,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x75, 0x73, 0x70,
- 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x51, 0x54, 0x54, 0x43, 0x6f, 0x6e, 0x6e,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x51, 0x54, 0x54, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x29,
- 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x70,
- 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72,
- 0x69, 0x62, 0x65, 0x64, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x21, 0x0a, 0x0b, 0x4d, 0x51, 0x54,
- 0x54, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x33, 0x5f, 0x31,
- 0x5f, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x35, 0x10, 0x01, 0x22, 0xac, 0x01, 0x0a,
- 0x12, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x12, 0x45, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72,
- 0x64, 0x2e, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x16, 0x73, 0x75,
- 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x75, 0x62, 0x73,
- 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x22, 0x18, 0x0a, 0x0c, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x56, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x22, 0x4b, 0x0a, 0x10, 0x44,
- 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12,
- 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x73, 0x6f,
- 0x6e, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x52, 0x0a, 0x72, 0x65,
- 0x61, 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x0e, 0x5a, 0x0c, 0x2e, 0x2f, 0x75, 0x73,
- 0x70, 0x2d, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x52, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53,
+ 0x61, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x62, 0x0a, 0x14, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+ 0x61, 0x64, 0x72, 0x65, 0x63, 0x5f, 0x73, 0x61, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f,
+ 0x72, 0x64, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78,
+ 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53,
+ 0x41, 0x52, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x12, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
+ 0x72, 0x65, 0x63, 0x53, 0x61, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70,
+ 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61,
+ 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x43, 0x0a, 0x0f, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
+ 0x53, 0x41, 0x52, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45,
+ 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x01, 0x12, 0x0d, 0x0a,
+ 0x09, 0x49, 0x4e, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08,
+ 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x03, 0x22, 0x18, 0x0a, 0x16, 0x57, 0x65,
+ 0x62, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x22, 0xa6, 0x01, 0x0a, 0x11, 0x4d, 0x51, 0x54, 0x54, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x75, 0x73,
+ 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x51, 0x54, 0x54, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4d, 0x51, 0x54, 0x54, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x29, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x5f, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63,
+ 0x72, 0x69, 0x62, 0x65, 0x64, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x21, 0x0a, 0x0b, 0x4d, 0x51,
+ 0x54, 0x54, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x33, 0x5f,
+ 0x31, 0x5f, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x35, 0x10, 0x01, 0x22, 0xac, 0x01,
+ 0x0a, 0x12, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x12, 0x45, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x75, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x63, 0x6f,
+ 0x72, 0x64, 0x2e, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x16, 0x73,
+ 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x75, 0x62,
+ 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0x18, 0x0a, 0x0c, 0x53, 0x54, 0x4f, 0x4d, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x56, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x22, 0x12, 0x0a, 0x10,
+ 0x55, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x22, 0x4b, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x63, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x07, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x0e, 0x5a,
+ 0x0c, 0x2e, 0x2f, 0x75, 0x73, 0x70, 0x2d, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
- file_usp_record_1_2_proto_rawDescOnce sync.Once
- file_usp_record_1_2_proto_rawDescData = file_usp_record_1_2_proto_rawDesc
+ file_usp_record_1_3_proto_rawDescOnce sync.Once
+ file_usp_record_1_3_proto_rawDescData = file_usp_record_1_3_proto_rawDesc
)
-func file_usp_record_1_2_proto_rawDescGZIP() []byte {
- file_usp_record_1_2_proto_rawDescOnce.Do(func() {
- file_usp_record_1_2_proto_rawDescData = protoimpl.X.CompressGZIP(file_usp_record_1_2_proto_rawDescData)
+func file_usp_record_1_3_proto_rawDescGZIP() []byte {
+ file_usp_record_1_3_proto_rawDescOnce.Do(func() {
+ file_usp_record_1_3_proto_rawDescData = protoimpl.X.CompressGZIP(file_usp_record_1_3_proto_rawDescData)
})
- return file_usp_record_1_2_proto_rawDescData
+ return file_usp_record_1_3_proto_rawDescData
}
-var file_usp_record_1_2_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
-var file_usp_record_1_2_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
-var file_usp_record_1_2_proto_goTypes = []interface{}{
+var file_usp_record_1_3_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_usp_record_1_3_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_usp_record_1_3_proto_goTypes = []interface{}{
(Record_PayloadSecurity)(0), // 0: usp_record.Record.PayloadSecurity
(SessionContextRecord_PayloadSARState)(0), // 1: usp_record.SessionContextRecord.PayloadSARState
(MQTTConnectRecord_MQTTVersion)(0), // 2: usp_record.MQTTConnectRecord.MQTTVersion
@@ -933,34 +985,36 @@ var file_usp_record_1_2_proto_goTypes = []interface{}{
(*WebSocketConnectRecord)(nil), // 7: usp_record.WebSocketConnectRecord
(*MQTTConnectRecord)(nil), // 8: usp_record.MQTTConnectRecord
(*STOMPConnectRecord)(nil), // 9: usp_record.STOMPConnectRecord
- (*DisconnectRecord)(nil), // 10: usp_record.DisconnectRecord
+ (*UDSConnectRecord)(nil), // 10: usp_record.UDSConnectRecord
+ (*DisconnectRecord)(nil), // 11: usp_record.DisconnectRecord
}
-var file_usp_record_1_2_proto_depIdxs = []int32{
+var file_usp_record_1_3_proto_depIdxs = []int32{
0, // 0: usp_record.Record.payload_security:type_name -> usp_record.Record.PayloadSecurity
5, // 1: usp_record.Record.no_session_context:type_name -> usp_record.NoSessionContextRecord
6, // 2: usp_record.Record.session_context:type_name -> usp_record.SessionContextRecord
7, // 3: usp_record.Record.websocket_connect:type_name -> usp_record.WebSocketConnectRecord
8, // 4: usp_record.Record.mqtt_connect:type_name -> usp_record.MQTTConnectRecord
9, // 5: usp_record.Record.stomp_connect:type_name -> usp_record.STOMPConnectRecord
- 10, // 6: usp_record.Record.disconnect:type_name -> usp_record.DisconnectRecord
- 1, // 7: usp_record.SessionContextRecord.payload_sar_state:type_name -> usp_record.SessionContextRecord.PayloadSARState
- 1, // 8: usp_record.SessionContextRecord.payloadrec_sar_state:type_name -> usp_record.SessionContextRecord.PayloadSARState
- 2, // 9: usp_record.MQTTConnectRecord.version:type_name -> usp_record.MQTTConnectRecord.MQTTVersion
- 3, // 10: usp_record.STOMPConnectRecord.version:type_name -> usp_record.STOMPConnectRecord.STOMPVersion
- 11, // [11:11] is the sub-list for method output_type
- 11, // [11:11] is the sub-list for method input_type
- 11, // [11:11] is the sub-list for extension type_name
- 11, // [11:11] is the sub-list for extension extendee
- 0, // [0:11] is the sub-list for field type_name
+ 11, // 6: usp_record.Record.disconnect:type_name -> usp_record.DisconnectRecord
+ 10, // 7: usp_record.Record.uds_connect:type_name -> usp_record.UDSConnectRecord
+ 1, // 8: usp_record.SessionContextRecord.payload_sar_state:type_name -> usp_record.SessionContextRecord.PayloadSARState
+ 1, // 9: usp_record.SessionContextRecord.payloadrec_sar_state:type_name -> usp_record.SessionContextRecord.PayloadSARState
+ 2, // 10: usp_record.MQTTConnectRecord.version:type_name -> usp_record.MQTTConnectRecord.MQTTVersion
+ 3, // 11: usp_record.STOMPConnectRecord.version:type_name -> usp_record.STOMPConnectRecord.STOMPVersion
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
}
-func init() { file_usp_record_1_2_proto_init() }
-func file_usp_record_1_2_proto_init() {
- if File_usp_record_1_2_proto != nil {
+func init() { file_usp_record_1_3_proto_init() }
+func file_usp_record_1_3_proto_init() {
+ if File_usp_record_1_3_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
- file_usp_record_1_2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_record_1_3_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Record); i {
case 0:
return &v.state
@@ -972,7 +1026,7 @@ func file_usp_record_1_2_proto_init() {
return nil
}
}
- file_usp_record_1_2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_record_1_3_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NoSessionContextRecord); i {
case 0:
return &v.state
@@ -984,7 +1038,7 @@ func file_usp_record_1_2_proto_init() {
return nil
}
}
- file_usp_record_1_2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_record_1_3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SessionContextRecord); i {
case 0:
return &v.state
@@ -996,7 +1050,7 @@ func file_usp_record_1_2_proto_init() {
return nil
}
}
- file_usp_record_1_2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_record_1_3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*WebSocketConnectRecord); i {
case 0:
return &v.state
@@ -1008,7 +1062,7 @@ func file_usp_record_1_2_proto_init() {
return nil
}
}
- file_usp_record_1_2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_record_1_3_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MQTTConnectRecord); i {
case 0:
return &v.state
@@ -1020,7 +1074,7 @@ func file_usp_record_1_2_proto_init() {
return nil
}
}
- file_usp_record_1_2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_record_1_3_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*STOMPConnectRecord); i {
case 0:
return &v.state
@@ -1032,7 +1086,19 @@ func file_usp_record_1_2_proto_init() {
return nil
}
}
- file_usp_record_1_2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_usp_record_1_3_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UDSConnectRecord); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_usp_record_1_3_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DisconnectRecord); i {
case 0:
return &v.state
@@ -1045,31 +1111,32 @@ func file_usp_record_1_2_proto_init() {
}
}
}
- file_usp_record_1_2_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_usp_record_1_3_proto_msgTypes[0].OneofWrappers = []interface{}{
(*Record_NoSessionContext)(nil),
(*Record_SessionContext)(nil),
(*Record_WebsocketConnect)(nil),
(*Record_MqttConnect)(nil),
(*Record_StompConnect)(nil),
(*Record_Disconnect)(nil),
+ (*Record_UdsConnect)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_usp_record_1_2_proto_rawDesc,
+ RawDescriptor: file_usp_record_1_3_proto_rawDesc,
NumEnums: 4,
- NumMessages: 7,
+ NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
- GoTypes: file_usp_record_1_2_proto_goTypes,
- DependencyIndexes: file_usp_record_1_2_proto_depIdxs,
- EnumInfos: file_usp_record_1_2_proto_enumTypes,
- MessageInfos: file_usp_record_1_2_proto_msgTypes,
+ GoTypes: file_usp_record_1_3_proto_goTypes,
+ DependencyIndexes: file_usp_record_1_3_proto_depIdxs,
+ EnumInfos: file_usp_record_1_3_proto_enumTypes,
+ MessageInfos: file_usp_record_1_3_proto_msgTypes,
}.Build()
- File_usp_record_1_2_proto = out.File
- file_usp_record_1_2_proto_rawDesc = nil
- file_usp_record_1_2_proto_goTypes = nil
- file_usp_record_1_2_proto_depIdxs = nil
+ File_usp_record_1_3_proto = out.File
+ file_usp_record_1_3_proto_rawDesc = nil
+ file_usp_record_1_3_proto_goTypes = nil
+ file_usp_record_1_3_proto_depIdxs = nil
}
diff --git a/backend/services/controller/internal/usp/usp_record/usp-record-1-2.proto b/backend/services/controller/internal/usp/usp_record/usp-record-1-3.proto
old mode 100755
new mode 100644
similarity index 88%
rename from backend/services/controller/internal/usp/usp_record/usp-record-1-2.proto
rename to backend/services/controller/internal/usp/usp_record/usp-record-1-3.proto
index 430ca96..03c6a9e
--- a/backend/services/controller/internal/usp/usp_record/usp-record-1-2.proto
+++ b/backend/services/controller/internal/usp/usp_record/usp-record-1-3.proto
@@ -3,11 +3,11 @@ syntax = "proto3";
//**************************************************************************
// TR-369 USP Record Protocol Buffer Schema
//
-// Copyright (c) 2017-2018, Broadband Forum
+// Copyright (c) 2017-2022, Broadband Forum
//
// The undersigned members have elected to grant the copyright to
// their contributed material used in this software:
-// Copyright (c) 2017-2018 ARRIS Enterprises, LLC.
+// Copyright (c) 2017-2022 ARRIS Enterprises, LLC.
//
// Redistribution and use in source and binary forms, with or
// without modification, are permitted provided that the following
@@ -47,14 +47,8 @@ syntax = "proto3";
// Any moral rights which are necessary to exercise under the above
// license grant are also deemed granted under this license.
//
-//
-// | Version | Name | Date |
-// | TR-369 1.0.0 | User Services Platform | APR, 2018 |
-// | TR-369 1.0.1 | User Services Platform | JUN, 2018 |
-// | TR-369 1.0.2 | User Services Platform | OCT, 2018 |
-// | TR-369 1.1 | User Services Platform | SEP, 2019 |
-//
-// BBF software release registry: http://www.broadband-forum.org/software
+// BBF software release registry:
+// https://www.broadband-forum.org/software-releases
//**************************************************************************
package usp_record;
@@ -76,6 +70,7 @@ message Record {
MQTTConnectRecord mqtt_connect = 10;
STOMPConnectRecord stomp_connect = 11;
DisconnectRecord disconnect = 12;
+ UDSConnectRecord uds_connect = 13;
}
enum PayloadSecurity {
@@ -128,6 +123,10 @@ message STOMPConnectRecord {
}
}
+message UDSConnectRecord {
+ // An empty message
+}
+
message DisconnectRecord {
string reason = 1;
fixed32 reason_code = 2;
diff --git a/backend/services/mtp/adapter/internal/db/device.go b/backend/services/mtp/adapter/internal/db/device.go
index c78ac6a..50ef7e8 100644
--- a/backend/services/mtp/adapter/internal/db/device.go
+++ b/backend/services/mtp/adapter/internal/db/device.go
@@ -4,6 +4,7 @@ import (
"log"
"go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
@@ -41,6 +42,18 @@ type Device struct {
Cwmp Status
}
+type DevicesList struct {
+ Devices []Device `json:"devices" bson:"documents"`
+ Total int64 `json:"total" bson:"totalCount"`
+}
+
+type FilterOptions struct {
+ Models []string `json:"models"`
+ ProductClasses []string `json:"productClasses"`
+ Vendors []string `json:"vendors"`
+ Versions []string `json:"versions"`
+}
+
func (d *Database) CreateDevice(device Device) error {
var result bson.M
var deviceExistent Device
@@ -100,32 +113,86 @@ func (d *Database) CreateDevice(device Device) error {
}
return err
}
-func (d *Database) RetrieveDevices(filter bson.A) ([]Device, error) {
+func (d *Database) RetrieveDevices(filter bson.A) (*DevicesList, error) {
- var results []Device
+ var results []DevicesList
cursor, err := d.devices.Aggregate(d.ctx, filter)
if err != nil {
- return results, err
+ return nil, err
}
-
if cursor.Err() != nil {
- return results, cursor.Err()
+ return nil, cursor.Err()
+ }
+ defer cursor.Close(d.ctx)
+ if err := cursor.All(d.ctx, &results); err != nil {
+ log.Println(err)
+ return nil, err
}
- for cursor.Next(d.ctx) {
- var device Device
+ //log.Printf("results: %++v", results)
- err := cursor.Decode(&device)
- if err != nil {
- log.Println("Error to decode device info fields")
- continue
- }
+ return &results[0], err
+}
- results = append(results, device)
+func (d *Database) RetrieveDeviceFilterOptions() (FilterOptions, error) {
+ filter := bson.A{
+ bson.D{
+ {"$group",
+ bson.D{
+ {"_id", primitive.Null{}},
+ {"vendors", bson.D{{"$addToSet", "$vendor"}}},
+ {"versions", bson.D{{"$addToSet", "$version"}}},
+ {"productClasses", bson.D{{"$addToSet", "$productclass"}}},
+ {"models", bson.D{{"$addToSet", "$model"}}},
+ },
+ },
+ },
+ bson.D{
+ {"$project",
+ bson.D{
+ {"_id", 0},
+ {"vendors", 1},
+ {"versions", 1},
+ {"productClasses", 1},
+ {"models", 1},
+ },
+ },
+ },
}
- return results, err
+ var results []FilterOptions
+ cursor, err := d.devices.Aggregate(d.ctx, filter)
+ if err != nil {
+ log.Println(err)
+ return FilterOptions{}, err
+ }
+ defer cursor.Close(d.ctx)
+
+ if err := cursor.All(d.ctx, &results); err != nil {
+ log.Println(err)
+ return FilterOptions{}, err
+ }
+
+ if len(results) > 0 {
+ return results[0], nil
+ } else {
+ return FilterOptions{
+ Models: []string{},
+ ProductClasses: []string{},
+ Vendors: []string{},
+ Versions: []string{},
+ }, nil
+ }
+}
+
+func (d *Database) DeleteDevices(filter bson.D) (int64, error) {
+
+ result, err := d.devices.DeleteMany(d.ctx, filter)
+ if err != nil {
+ log.Println(err)
+ }
+ return result.DeletedCount, err
}
func (d *Database) RetrieveDevice(sn string) (Device, error) {
diff --git a/backend/services/mtp/adapter/internal/reqs/reqs.go b/backend/services/mtp/adapter/internal/reqs/reqs.go
index 3da0fa9..6a0260f 100644
--- a/backend/services/mtp/adapter/internal/reqs/reqs.go
+++ b/backend/services/mtp/adapter/internal/reqs/reqs.go
@@ -52,18 +52,135 @@ func StartRequestsListener(ctx context.Context, nc *nats.Conn, db db.Database) {
nc.QueueSubscribe(local.ADAPTER_SUBJECT+"devices.retrieve", local.ADAPTER_QUEUE, func(msg *nats.Msg) {
- var filter bson.A
+ var criteria map[string]interface{}
- err := json.Unmarshal(msg.Data, &filter)
+ err := json.Unmarshal(msg.Data, &criteria)
if err != nil {
respondMsg(msg.Respond, 500, err.Error())
}
+ //log.Println(criteria)
+ propertiesFilter := bson.D{{}}
+
+ vendorFilter := criteria["vendor"]
+ if vendorFilter != nil {
+ log.Println("Vendor filter", vendorFilter)
+ propertiesFilter = append(propertiesFilter, bson.E{Key: "vendor", Value: vendorFilter})
+ }
+
+ versionFilter := criteria["version"]
+ if versionFilter != nil {
+ log.Println("Version filter", versionFilter)
+ propertiesFilter = append(propertiesFilter, bson.E{Key: "version", Value: versionFilter})
+ }
+
+ typeFilter := criteria["productClass"]
+ if typeFilter != nil {
+ log.Println("Type filter", typeFilter)
+ propertiesFilter = append(propertiesFilter, bson.E{Key: "productclass", Value: typeFilter})
+ }
+
+ aliasFilter := criteria["alias"]
+ if aliasFilter != nil {
+ log.Println("Type filter", aliasFilter)
+ propertiesFilter = append(propertiesFilter, bson.E{Key: "alias", Value: aliasFilter})
+ }
+
+ modelFilter := criteria["model"]
+ if modelFilter != nil {
+ log.Println("Model filter", modelFilter)
+ propertiesFilter = append(propertiesFilter, bson.E{Key: "model", Value: modelFilter})
+ }
+
+ statusFilter := criteria["status"]
+ if statusFilter != nil {
+ log.Println("Status filter", statusFilter)
+ propertiesFilter = append(propertiesFilter, bson.E{Key: "status", Value: statusFilter})
+ }
+
+ filter := bson.A{
+ bson.D{
+ {"$match",
+ propertiesFilter,
+ },
+ },
+ bson.D{
+ {"$facet",
+ bson.D{
+ {"totalCount",
+ bson.A{
+ bson.D{{"$count", "count"}},
+ },
+ },
+ {"documents",
+ bson.A{
+ bson.D{{"$sort", bson.D{{"status", criteria["status_order"]}}}},
+ bson.D{{"$skip", criteria["skip"]}},
+ bson.D{{"$limit", criteria["limit"]}},
+ },
+ },
+ },
+ },
+ },
+ bson.D{
+ {"$project",
+ bson.D{
+ {"totalCount",
+ bson.D{
+ {"$arrayElemAt",
+ bson.A{
+ "$totalCount.count",
+ 0,
+ },
+ },
+ },
+ },
+ {"documents", 1},
+ },
+ },
+ },
+ }
+
devicesList, err := db.RetrieveDevices(filter)
if err != nil {
respondMsg(msg.Respond, 500, err.Error())
}
- respondMsg(msg.Respond, 200, devicesList)
+ respondMsg(msg.Respond, 200, &devicesList)
+ })
+
+ nc.QueueSubscribe(local.ADAPTER_SUBJECT+"devices.delete", local.ADAPTER_QUEUE, func(msg *nats.Msg) {
+
+ var serialNumbersList []string
+
+ err := json.Unmarshal(msg.Data, &serialNumbersList)
+ if err != nil {
+ respondMsg(msg.Respond, 500, err.Error())
+ }
+
+ var criteria bson.A
+
+ for _, sn := range serialNumbersList {
+ criteria = append(criteria, bson.D{{"sn", sn}})
+ }
+
+ // Create the filter with the $or operator
+ filter := bson.D{
+ {"$or", criteria},
+ }
+
+ deletedCount, err := db.DeleteDevices(filter)
+ if err != nil {
+ respondMsg(msg.Respond, 500, err.Error())
+ }
+ respondMsg(msg.Respond, 200, deletedCount)
+ })
+
+ nc.QueueSubscribe(local.ADAPTER_SUBJECT+"devices.filterOptions", local.ADAPTER_QUEUE, func(msg *nats.Msg) {
+ result, err := db.RetrieveDeviceFilterOptions()
+ if err != nil {
+ respondMsg(msg.Respond, 500, err.Error())
+ }
+ respondMsg(msg.Respond, 200, result)
})
nc.QueueSubscribe(local.ADAPTER_SUBJECT+"devices.class", local.ADAPTER_QUEUE, func(msg *nats.Msg) {
diff --git a/backend/services/mtp/stomp-adapter/internal/bridge/bridge.go b/backend/services/mtp/stomp-adapter/internal/bridge/bridge.go
index 6a5dbd7..06adfcc 100644
--- a/backend/services/mtp/stomp-adapter/internal/bridge/bridge.go
+++ b/backend/services/mtp/stomp-adapter/internal/bridge/bridge.go
@@ -8,10 +8,10 @@ import (
"strings"
"time"
- "github.com/go-stomp/stomp/v3"
- "github.com/go-stomp/stomp/v3/frame"
"github.com/nats-io/nats.go"
"github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/config"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
"golang.org/x/sys/unix"
)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/.gitattributes b/backend/services/mtp/stomp-adapter/internal/stomp/.gitattributes
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/.gitattributes
rename to backend/services/mtp/stomp-adapter/internal/stomp/.gitattributes
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/.gitignore b/backend/services/mtp/stomp-adapter/internal/stomp/.gitignore
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/.gitignore
rename to backend/services/mtp/stomp-adapter/internal/stomp/.gitignore
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/AUTHORS.md b/backend/services/mtp/stomp-adapter/internal/stomp/AUTHORS.md
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/AUTHORS.md
rename to backend/services/mtp/stomp-adapter/internal/stomp/AUTHORS.md
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/LICENSE.txt b/backend/services/mtp/stomp-adapter/internal/stomp/LICENSE.txt
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/LICENSE.txt
rename to backend/services/mtp/stomp-adapter/internal/stomp/LICENSE.txt
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/README.md b/backend/services/mtp/stomp-adapter/internal/stomp/README.md
similarity index 62%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/README.md
rename to backend/services/mtp/stomp-adapter/internal/stomp/README.md
index ed606e8..82e57df 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/README.md
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/README.md
@@ -2,8 +2,8 @@
Go language implementation of a STOMP client library.
-
-[](https://pkg.go.dev/github.com/go-stomp/stomp/v3)
+
+[](https://pkg.go.dev/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3)
Features:
@@ -15,10 +15,10 @@ Features:
## Usage Instructions
```
-go get github.com/go-stomp/stomp/v3
+go get github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3
```
-For API documentation, see https://pkg.go.dev/github.com/go-stomp/stomp/v3
+For API documentation, see https://pkg.go.dev/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3
Breaking changes between this previous version and the current version are
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/ack.go b/backend/services/mtp/stomp-adapter/internal/stomp/ack.go
similarity index 92%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/ack.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/ack.go
index ede59c0..401bacb 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/ack.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/ack.go
@@ -1,8 +1,6 @@
package stomp
-import (
- "github.com/go-stomp/stomp/v3/frame"
-)
+import "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
// The AckMode type is an enumeration of the acknowledgement modes for a
// STOMP subscription.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/breaking_changes.md b/backend/services/mtp/stomp-adapter/internal/stomp/breaking_changes.md
similarity index 70%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/breaking_changes.md
rename to backend/services/mtp/stomp-adapter/internal/stomp/breaking_changes.md
index b60209f..a05943a 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/breaking_changes.md
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/breaking_changes.md
@@ -15,7 +15,7 @@ The API it's stable the only breaking change is the import path.
Version 3:
```go
import (
- "github.com/go-stomp/stomp/v3"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp/v3"
)
```
@@ -37,7 +37,7 @@ import (
Version 2:
```go
import (
- "github.com/go-stomp/stomp"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp"
)
```
@@ -64,11 +64,11 @@ package, and the types moved are not needed in normal usage of the `stomp` packa
Version 2 of the stomp library makes use of functional options to provide a clean, flexible way
of specifying options in the following API calls:
-* [Dial()](http://godoc.org/github.com/go-stomp/stomp#Dial)
-* [Connect()](http://godoc.org/github.com/go-stomp/stomp#Connect)
-* [Conn.Send()](http://godoc.org/github.com/go-stomp/stomp#Conn.Send)
-* [Transaction.Send()](http://godoc.org/github.com/go-stomp/stomp#Transaction.Send)
-* [Conn.Subscribe()](http://godoc.org/github.com/go-stomp/stomp#Conn.Subscribe)
+* [Dial()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Dial)
+* [Connect()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Connect)
+* [Conn.Send()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Conn.Send)
+* [Transaction.Send()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Transaction.Send)
+* [Conn.Subscribe()](http://godoc.org/github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp#Conn.Subscribe)
The idea for this comes from Dave Cheney's very excellent blog post,
[Functional Options for Friendly APIs](http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis).
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/conn.go b/backend/services/mtp/stomp-adapter/internal/stomp/conn.go
similarity index 98%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/conn.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/conn.go
index 2abbeb5..020661d 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/conn.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/conn.go
@@ -8,7 +8,7 @@ import (
"sync"
"time"
- "github.com/go-stomp/stomp/v3/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// Default time span to add to read/write heart-beat timeouts
@@ -396,16 +396,19 @@ func processLoop(c *Conn, writer *frame.Writer) {
}
sendFrame = false
} else {
- id, _ := req.Frame.Header.Contains(frame.Id)
+ // id, _ := req.Frame.Header.Contains(frame.Id)
// is this trying to be too clever -- add a receipt
// header so that when the server responds with a
// RECEIPT frame, the corresponding channel will be closed
- req.Frame.Header.Set(frame.Receipt, id)
+ // req.Frame.Header.Set(frame.Receipt, id)
+
+ // don't wait for a receipt frame from the server, just send the unsubscribe frame and go on
}
}
// frame to send, if enabled
if sendFrame {
+ //log.Println("Sending frame", req.Frame.Command)
err := writer.Write(req.Frame)
if err != nil {
sendError(channels, err)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/conn_options.go b/backend/services/mtp/stomp-adapter/internal/stomp/conn_options.go
similarity index 98%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/conn_options.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/conn_options.go
index 1422034..e972002 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/conn_options.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/conn_options.go
@@ -5,8 +5,8 @@ import (
"strings"
"time"
- "github.com/go-stomp/stomp/v3/frame"
- "github.com/go-stomp/stomp/v3/internal/log"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/internal/log"
)
// ConnOptions is an opaque structure used to collection options
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/errors.go b/backend/services/mtp/stomp-adapter/internal/stomp/errors.go
similarity index 95%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/errors.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/errors.go
index 913ca90..e941aec 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/errors.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/errors.go
@@ -1,7 +1,7 @@
package stomp
import (
- "github.com/go-stomp/stomp/v3/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// Error values
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/ack.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/ack.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/ack.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/ack.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/command.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/command.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/command.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/command.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/encode.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/encode.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/encode.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/encode.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/errors.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/errors.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/errors.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/errors.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/frame.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/frame.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/frame.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/frame.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/header.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/header.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/header.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/header.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/heartbeat.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/heartbeat.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/heartbeat.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/heartbeat.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/reader.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/reader.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/reader.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/reader.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/writer.go b/backend/services/mtp/stomp-adapter/internal/stomp/frame/writer.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/frame/writer.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/frame/writer.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/id.go b/backend/services/mtp/stomp-adapter/internal/stomp/id.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/id.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/id.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/internal/log/stdlogger.go b/backend/services/mtp/stomp-adapter/internal/stomp/internal/log/stdlogger.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/internal/log/stdlogger.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/internal/log/stdlogger.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/logger.go b/backend/services/mtp/stomp-adapter/internal/stomp/logger.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/logger.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/logger.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/message.go b/backend/services/mtp/stomp-adapter/internal/stomp/message.go
similarity index 95%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/message.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/message.go
index e53f042..d752dd4 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/message.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/message.go
@@ -2,7 +2,8 @@ package stomp
import (
"io"
- "github.com/go-stomp/stomp/v3/frame"
+
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// A Message represents a message received from the STOMP server.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/send_options.go b/backend/services/mtp/stomp-adapter/internal/stomp/send_options.go
similarity index 94%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/send_options.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/send_options.go
index bd81b00..043155b 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/send_options.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/send_options.go
@@ -1,7 +1,7 @@
package stomp
import (
- "github.com/go-stomp/stomp/v3/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// SendOpt contains options for for the Conn.Send and Transaction.Send functions.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/stomp.go b/backend/services/mtp/stomp-adapter/internal/stomp/stomp.go
similarity index 93%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/stomp.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/stomp.go
index bbe0863..b91625a 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/stomp.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/stomp.go
@@ -20,7 +20,6 @@ Disconnect method. This will perform a graceful shutdown sequence as specified i
Source code and other details for the project are available at GitHub:
- https://github.com/go-stomp/stomp
-
+ https://github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/go-stomp/stomp
*/
package stomp
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/subscribe_options.go b/backend/services/mtp/stomp-adapter/internal/stomp/subscribe_options.go
similarity index 92%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/subscribe_options.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/subscribe_options.go
index e5a5b18..4531645 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/subscribe_options.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/subscribe_options.go
@@ -1,7 +1,7 @@
package stomp
import (
- "github.com/go-stomp/stomp/v3/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// SubscribeOpt contains options for for the Conn.Subscribe function.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/subscription.go b/backend/services/mtp/stomp-adapter/internal/stomp/subscription.go
similarity index 90%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/subscription.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/subscription.go
index 8e0fb63..a7128d5 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/subscription.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/subscription.go
@@ -7,7 +7,7 @@ import (
"sync/atomic"
"time"
- "github.com/go-stomp/stomp/v3/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
const (
@@ -97,17 +97,21 @@ func (s *Subscription) Unsubscribe(opts ...func(*frame.Frame) error) error {
// We don't want to interfere with `s.C` since we might be "stealing"
// MESSAGEs or ERRORs from another goroutine, so use a sync.Cond to
// wait for the terminal state transition instead.
- s.closeMutex.Lock()
- for atomic.LoadInt32(&s.state) != subStateClosed {
- err = waitWithTimeout(s.closeCond, s.unsubscribeReceiptTimeout)
- if err != nil && errors.Is(err, &ErrUnsubscribeReceiptTimeout) {
- msg := s.subscriptionErrorMessage("channel unsubscribe receipt timeout")
- s.C <- msg
- return err
- }
- }
- s.closeMutex.Unlock()
- return err
+ // s.closeMutex.Lock()
+ // for atomic.LoadInt32(&s.state) != subStateClosed {
+ // err = waitWithTimeout(s.closeCond, s.unsubscribeReceiptTimeout)
+ // if err != nil && errors.Is(err, &ErrUnsubscribeReceiptTimeout) {
+ // msg := s.subscriptionErrorMessage("channel unsubscribe receipt timeout")
+ // s.C <- msg
+ // return err
+ // }
+ // }
+ // s.closeMutex.Unlock()
+ s.closeCond.L.Lock()
+ s.closeChannel(nil)
+ s.closeCond.L.Unlock()
+
+ return nil
}
func waitWithTimeout(cond *sync.Cond, timeout time.Duration) error {
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/transaction.go b/backend/services/mtp/stomp-adapter/internal/stomp/transaction.go
similarity index 98%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/transaction.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/transaction.go
index 0a8398f..65a6a29 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/transaction.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/transaction.go
@@ -1,7 +1,7 @@
package stomp
import (
- "github.com/go-stomp/stomp/v3/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// A Transaction applies to the sending of messages to the STOMP server,
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/validator.go b/backend/services/mtp/stomp-adapter/internal/stomp/validator.go
similarity index 81%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/validator.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/validator.go
index 8e64a2c..954a73e 100644
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/validator.go
+++ b/backend/services/mtp/stomp-adapter/internal/stomp/validator.go
@@ -1,7 +1,7 @@
package stomp
import (
- "github.com/go-stomp/stomp/v3/frame"
+ "github.com/oktopUSP/oktopus/backend/services/mtp/stomp-adapter/internal/stomp/frame"
)
// Validator is an interface for validating STOMP frames.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/version.go b/backend/services/mtp/stomp-adapter/internal/stomp/version.go
similarity index 100%
rename from backend/services/mtp/stomp-adapter/vendor/github.com/go-stomp/stomp/v3/version.go
rename to backend/services/mtp/stomp-adapter/internal/stomp/version.go
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/.gitignore b/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/.gitignore
deleted file mode 100644
index e43b0f9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.DS_Store
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/LICENCE b/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/LICENCE
deleted file mode 100644
index e7ddd51..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/LICENCE
+++ /dev/null
@@ -1,23 +0,0 @@
-Copyright (c) 2013 John Barton
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/README.md b/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/README.md
deleted file mode 100644
index bfbe66a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/README.md
+++ /dev/null
@@ -1,202 +0,0 @@
-# GoDotEnv  [](https://goreportcard.com/report/github.com/joho/godotenv)
-
-A Go (golang) port of the Ruby [dotenv](https://github.com/bkeepers/dotenv) project (which loads env vars from a .env file).
-
-From the original Library:
-
-> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables.
->
-> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped.
-
-It can be used as a library (for loading in env for your own daemons etc.) or as a bin command.
-
-There is test coverage and CI for both linuxish and Windows environments, but I make no guarantees about the bin version working on Windows.
-
-## Installation
-
-As a library
-
-```shell
-go get github.com/joho/godotenv
-```
-
-or if you want to use it as a bin command
-
-go >= 1.17
-```shell
-go install github.com/joho/godotenv/cmd/godotenv@latest
-```
-
-go < 1.17
-```shell
-go get github.com/joho/godotenv/cmd/godotenv
-```
-
-## Usage
-
-Add your application configuration to your `.env` file in the root of your project:
-
-```shell
-S3_BUCKET=YOURS3BUCKET
-SECRET_KEY=YOURSECRETKEYGOESHERE
-```
-
-Then in your Go app you can do something like
-
-```go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/joho/godotenv"
-)
-
-func main() {
- err := godotenv.Load()
- if err != nil {
- log.Fatal("Error loading .env file")
- }
-
- s3Bucket := os.Getenv("S3_BUCKET")
- secretKey := os.Getenv("SECRET_KEY")
-
- // now do something with s3 or whatever
-}
-```
-
-If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import
-
-```go
-import _ "github.com/joho/godotenv/autoload"
-```
-
-While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit
-
-```go
-godotenv.Load("somerandomfile")
-godotenv.Load("filenumberone.env", "filenumbertwo.env")
-```
-
-If you want to be really fancy with your env file you can do comments and exports (below is a valid env file)
-
-```shell
-# I am a comment and that is OK
-SOME_VAR=someval
-FOO=BAR # comments at line end are OK too
-export BAR=BAZ
-```
-
-Or finally you can do YAML(ish) style
-
-```yaml
-FOO: bar
-BAR: baz
-```
-
-as a final aside, if you don't want godotenv munging your env you can just get a map back instead
-
-```go
-var myEnv map[string]string
-myEnv, err := godotenv.Read()
-
-s3Bucket := myEnv["S3_BUCKET"]
-```
-
-... or from an `io.Reader` instead of a local file
-
-```go
-reader := getRemoteFile()
-myEnv, err := godotenv.Parse(reader)
-```
-
-... or from a `string` if you so desire
-
-```go
-content := getRemoteFileContent()
-myEnv, err := godotenv.Unmarshal(content)
-```
-
-### Precedence & Conventions
-
-Existing envs take precedence of envs that are loaded later.
-
-The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use)
-for managing multiple environments (i.e. development, test, production)
-is to create an env named `{YOURAPP}_ENV` and load envs in this order:
-
-```go
-env := os.Getenv("FOO_ENV")
-if "" == env {
- env = "development"
-}
-
-godotenv.Load(".env." + env + ".local")
-if "test" != env {
- godotenv.Load(".env.local")
-}
-godotenv.Load(".env." + env)
-godotenv.Load() // The Original .env
-```
-
-If you need to, you can also use `godotenv.Overload()` to defy this convention
-and overwrite existing envs instead of only supplanting them. Use with caution.
-
-### Command Mode
-
-Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH`
-
-```
-godotenv -f /some/path/to/.env some_command with some args
-```
-
-If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD`
-
-By default, it won't override existing environment variables; you can do that with the `-o` flag.
-
-### Writing Env Files
-
-Godotenv can also write a map representing the environment to a correctly-formatted and escaped file
-
-```go
-env, err := godotenv.Unmarshal("KEY=value")
-err := godotenv.Write(env, "./.env")
-```
-
-... or to a string
-
-```go
-env, err := godotenv.Unmarshal("KEY=value")
-content, err := godotenv.Marshal(env)
-```
-
-## Contributing
-
-Contributions are welcome, but with some caveats.
-
-This library has been declared feature complete (see [#182](https://github.com/joho/godotenv/issues/182) for background) and will not be accepting issues or pull requests adding new functionality or breaking the library API.
-
-Contributions would be gladly accepted that:
-
-* bring this library's parsing into closer compatibility with the mainline dotenv implementations, in particular [Ruby's dotenv](https://github.com/bkeepers/dotenv) and [Node.js' dotenv](https://github.com/motdotla/dotenv)
-* keep the library up to date with the go ecosystem (ie CI bumps, documentation changes, changes in the core libraries)
-* bug fixes for use cases that pertain to the library's purpose of easing development of codebases deployed into twelve factor environments
-
-*code changes without tests and references to peer dotenv implementations will not be accepted*
-
-1. Fork it
-2. Create your feature branch (`git checkout -b my-new-feature`)
-3. Commit your changes (`git commit -am 'Added some feature'`)
-4. Push to the branch (`git push origin my-new-feature`)
-5. Create new Pull Request
-
-## Releases
-
-Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`.
-
-Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1`
-
-## Who?
-
-The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/godotenv.go b/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/godotenv.go
deleted file mode 100644
index 61b0ebb..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/godotenv.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
-//
-// Examples/readme can be found on the GitHub page at https://github.com/joho/godotenv
-//
-// The TL;DR is that you make a .env file that looks something like
-//
-// SOME_ENV_VAR=somevalue
-//
-// and then in your go code you can call
-//
-// godotenv.Load()
-//
-// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR")
-package godotenv
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "os/exec"
- "sort"
- "strconv"
- "strings"
-)
-
-const doubleQuoteSpecialChars = "\\\n\r\"!$`"
-
-// Parse reads an env file from io.Reader, returning a map of keys and values.
-func Parse(r io.Reader) (map[string]string, error) {
- var buf bytes.Buffer
- _, err := io.Copy(&buf, r)
- if err != nil {
- return nil, err
- }
-
- return UnmarshalBytes(buf.Bytes())
-}
-
-// Load will read your env file(s) and load them into ENV for this process.
-//
-// Call this function as close as possible to the start of your program (ideally in main).
-//
-// If you call Load without any args it will default to loading .env in the current path.
-//
-// You can otherwise tell it which files to load (there can be more than one) like:
-//
-// godotenv.Load("fileone", "filetwo")
-//
-// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults.
-func Load(filenames ...string) (err error) {
- filenames = filenamesOrDefault(filenames)
-
- for _, filename := range filenames {
- err = loadFile(filename, false)
- if err != nil {
- return // return early on a spazout
- }
- }
- return
-}
-
-// Overload will read your env file(s) and load them into ENV for this process.
-//
-// Call this function as close as possible to the start of your program (ideally in main).
-//
-// If you call Overload without any args it will default to loading .env in the current path.
-//
-// You can otherwise tell it which files to load (there can be more than one) like:
-//
-// godotenv.Overload("fileone", "filetwo")
-//
-// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefully set all vars.
-func Overload(filenames ...string) (err error) {
- filenames = filenamesOrDefault(filenames)
-
- for _, filename := range filenames {
- err = loadFile(filename, true)
- if err != nil {
- return // return early on a spazout
- }
- }
- return
-}
-
-// Read all env (with same file loading semantics as Load) but return values as
-// a map rather than automatically writing values into env
-func Read(filenames ...string) (envMap map[string]string, err error) {
- filenames = filenamesOrDefault(filenames)
- envMap = make(map[string]string)
-
- for _, filename := range filenames {
- individualEnvMap, individualErr := readFile(filename)
-
- if individualErr != nil {
- err = individualErr
- return // return early on a spazout
- }
-
- for key, value := range individualEnvMap {
- envMap[key] = value
- }
- }
-
- return
-}
-
-// Unmarshal reads an env file from a string, returning a map of keys and values.
-func Unmarshal(str string) (envMap map[string]string, err error) {
- return UnmarshalBytes([]byte(str))
-}
-
-// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values.
-func UnmarshalBytes(src []byte) (map[string]string, error) {
- out := make(map[string]string)
- err := parseBytes(src, out)
-
- return out, err
-}
-
-// Exec loads env vars from the specified filenames (empty map falls back to default)
-// then executes the cmd specified.
-//
-// Simply hooks up os.Stdin/err/out to the command and calls Run().
-//
-// If you want more fine grained control over your command it's recommended
-// that you use `Load()`, `Overload()` or `Read()` and the `os/exec` package yourself.
-func Exec(filenames []string, cmd string, cmdArgs []string, overload bool) error {
- op := Load
- if overload {
- op = Overload
- }
- if err := op(filenames...); err != nil {
- return err
- }
-
- command := exec.Command(cmd, cmdArgs...)
- command.Stdin = os.Stdin
- command.Stdout = os.Stdout
- command.Stderr = os.Stderr
- return command.Run()
-}
-
-// Write serializes the given environment and writes it to a file.
-func Write(envMap map[string]string, filename string) error {
- content, err := Marshal(envMap)
- if err != nil {
- return err
- }
- file, err := os.Create(filename)
- if err != nil {
- return err
- }
- defer file.Close()
- _, err = file.WriteString(content + "\n")
- if err != nil {
- return err
- }
- return file.Sync()
-}
-
-// Marshal outputs the given environment as a dotenv-formatted environment file.
-// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped.
-func Marshal(envMap map[string]string) (string, error) {
- lines := make([]string, 0, len(envMap))
- for k, v := range envMap {
- if d, err := strconv.Atoi(v); err == nil {
- lines = append(lines, fmt.Sprintf(`%s=%d`, k, d))
- } else {
- lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v)))
- }
- }
- sort.Strings(lines)
- return strings.Join(lines, "\n"), nil
-}
-
-func filenamesOrDefault(filenames []string) []string {
- if len(filenames) == 0 {
- return []string{".env"}
- }
- return filenames
-}
-
-func loadFile(filename string, overload bool) error {
- envMap, err := readFile(filename)
- if err != nil {
- return err
- }
-
- currentEnv := map[string]bool{}
- rawEnv := os.Environ()
- for _, rawEnvLine := range rawEnv {
- key := strings.Split(rawEnvLine, "=")[0]
- currentEnv[key] = true
- }
-
- for key, value := range envMap {
- if !currentEnv[key] || overload {
- _ = os.Setenv(key, value)
- }
- }
-
- return nil
-}
-
-func readFile(filename string) (envMap map[string]string, err error) {
- file, err := os.Open(filename)
- if err != nil {
- return
- }
- defer file.Close()
-
- return Parse(file)
-}
-
-func doubleQuoteEscape(line string) string {
- for _, c := range doubleQuoteSpecialChars {
- toReplace := "\\" + string(c)
- if c == '\n' {
- toReplace = `\n`
- }
- if c == '\r' {
- toReplace = `\r`
- }
- line = strings.Replace(line, string(c), toReplace, -1)
- }
- return line
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/parser.go b/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/parser.go
deleted file mode 100644
index cc709af..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/joho/godotenv/parser.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package godotenv
-
-import (
- "bytes"
- "errors"
- "fmt"
- "regexp"
- "strings"
- "unicode"
-)
-
-const (
- charComment = '#'
- prefixSingleQuote = '\''
- prefixDoubleQuote = '"'
-
- exportPrefix = "export"
-)
-
-func parseBytes(src []byte, out map[string]string) error {
- src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
- cutset := src
- for {
- cutset = getStatementStart(cutset)
- if cutset == nil {
- // reached end of file
- break
- }
-
- key, left, err := locateKeyName(cutset)
- if err != nil {
- return err
- }
-
- value, left, err := extractVarValue(left, out)
- if err != nil {
- return err
- }
-
- out[key] = value
- cutset = left
- }
-
- return nil
-}
-
-// getStatementPosition returns position of statement begin.
-//
-// It skips any comment line or non-whitespace character.
-func getStatementStart(src []byte) []byte {
- pos := indexOfNonSpaceChar(src)
- if pos == -1 {
- return nil
- }
-
- src = src[pos:]
- if src[0] != charComment {
- return src
- }
-
- // skip comment section
- pos = bytes.IndexFunc(src, isCharFunc('\n'))
- if pos == -1 {
- return nil
- }
-
- return getStatementStart(src[pos:])
-}
-
-// locateKeyName locates and parses key name and returns rest of slice
-func locateKeyName(src []byte) (key string, cutset []byte, err error) {
- // trim "export" and space at beginning
- src = bytes.TrimLeftFunc(src, isSpace)
- if bytes.HasPrefix(src, []byte(exportPrefix)) {
- trimmed := bytes.TrimPrefix(src, []byte(exportPrefix))
- if bytes.IndexFunc(trimmed, isSpace) == 0 {
- src = bytes.TrimLeftFunc(trimmed, isSpace)
- }
- }
-
- // locate key name end and validate it in single loop
- offset := 0
-loop:
- for i, char := range src {
- rchar := rune(char)
- if isSpace(rchar) {
- continue
- }
-
- switch char {
- case '=', ':':
- // library also supports yaml-style value declaration
- key = string(src[0:i])
- offset = i + 1
- break loop
- case '_':
- default:
- // variable name should match [A-Za-z0-9_.]
- if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) || rchar == '.' {
- continue
- }
-
- return "", nil, fmt.Errorf(
- `unexpected character %q in variable name near %q`,
- string(char), string(src))
- }
- }
-
- if len(src) == 0 {
- return "", nil, errors.New("zero length string")
- }
-
- // trim whitespace
- key = strings.TrimRightFunc(key, unicode.IsSpace)
- cutset = bytes.TrimLeftFunc(src[offset:], isSpace)
- return key, cutset, nil
-}
-
-// extractVarValue extracts variable value and returns rest of slice
-func extractVarValue(src []byte, vars map[string]string) (value string, rest []byte, err error) {
- quote, hasPrefix := hasQuotePrefix(src)
- if !hasPrefix {
- // unquoted value - read until end of line
- endOfLine := bytes.IndexFunc(src, isLineEnd)
-
- // Hit EOF without a trailing newline
- if endOfLine == -1 {
- endOfLine = len(src)
-
- if endOfLine == 0 {
- return "", nil, nil
- }
- }
-
- // Convert line to rune away to do accurate countback of runes
- line := []rune(string(src[0:endOfLine]))
-
- // Assume end of line is end of var
- endOfVar := len(line)
- if endOfVar == 0 {
- return "", src[endOfLine:], nil
- }
-
- // Work backwards to check if the line ends in whitespace then
- // a comment (ie asdasd # some comment)
- for i := endOfVar - 1; i >= 0; i-- {
- if line[i] == charComment && i > 0 {
- if isSpace(line[i-1]) {
- endOfVar = i
- break
- }
- }
- }
-
- trimmed := strings.TrimFunc(string(line[0:endOfVar]), isSpace)
-
- return expandVariables(trimmed, vars), src[endOfLine:], nil
- }
-
- // lookup quoted string terminator
- for i := 1; i < len(src); i++ {
- if char := src[i]; char != quote {
- continue
- }
-
- // skip escaped quote symbol (\" or \', depends on quote)
- if prevChar := src[i-1]; prevChar == '\\' {
- continue
- }
-
- // trim quotes
- trimFunc := isCharFunc(rune(quote))
- value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc))
- if quote == prefixDoubleQuote {
- // unescape newlines for double quote (this is compat feature)
- // and expand environment variables
- value = expandVariables(expandEscapes(value), vars)
- }
-
- return value, src[i+1:], nil
- }
-
- // return formatted error if quoted string is not terminated
- valEndIndex := bytes.IndexFunc(src, isCharFunc('\n'))
- if valEndIndex == -1 {
- valEndIndex = len(src)
- }
-
- return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex])
-}
-
-func expandEscapes(str string) string {
- out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string {
- c := strings.TrimPrefix(match, `\`)
- switch c {
- case "n":
- return "\n"
- case "r":
- return "\r"
- default:
- return match
- }
- })
- return unescapeCharsRegex.ReplaceAllString(out, "$1")
-}
-
-func indexOfNonSpaceChar(src []byte) int {
- return bytes.IndexFunc(src, func(r rune) bool {
- return !unicode.IsSpace(r)
- })
-}
-
-// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character
-func hasQuotePrefix(src []byte) (prefix byte, isQuored bool) {
- if len(src) == 0 {
- return 0, false
- }
-
- switch prefix := src[0]; prefix {
- case prefixDoubleQuote, prefixSingleQuote:
- return prefix, true
- default:
- return 0, false
- }
-}
-
-func isCharFunc(char rune) func(rune) bool {
- return func(v rune) bool {
- return v == char
- }
-}
-
-// isSpace reports whether the rune is a space character but not line break character
-//
-// this differs from unicode.IsSpace, which also applies line break as space
-func isSpace(r rune) bool {
- switch r {
- case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0:
- return true
- }
- return false
-}
-
-func isLineEnd(r rune) bool {
- if r == '\n' || r == '\r' {
- return true
- }
- return false
-}
-
-var (
- escapeRegex = regexp.MustCompile(`\\.`)
- expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`)
- unescapeCharsRegex = regexp.MustCompile(`\\([^$])`)
-)
-
-func expandVariables(v string, m map[string]string) string {
- return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string {
- submatch := expandVarRegex.FindStringSubmatch(s)
-
- if submatch == nil {
- return s
- }
- if submatch[1] == "\\" || submatch[2] == "(" {
- return submatch[0][1:]
- } else if submatch[4] != "" {
- return m[submatch[4]]
- }
- return s
- })
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/LICENSE b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/LICENSE
deleted file mode 100644
index 87d5574..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/LICENSE
+++ /dev/null
@@ -1,304 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2019 Klaus Post. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-------------------
-
-Files: gzhttp/*
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2016-2017 The New York Times Company
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-------------------
-
-Files: s2/cmd/internal/readahead/*
-
-The MIT License (MIT)
-
-Copyright (c) 2015 Klaus Post
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
----------------------
-Files: snappy/*
-Files: internal/snapref/*
-
-Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
------------------
-
-Files: s2/cmd/internal/filepathx/*
-
-Copyright 2016 The filepathx Authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/deflate.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/deflate.go
deleted file mode 100644
index 66d1657..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/deflate.go
+++ /dev/null
@@ -1,1017 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Copyright (c) 2015 Klaus Post
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math"
-)
-
-const (
- NoCompression = 0
- BestSpeed = 1
- BestCompression = 9
- DefaultCompression = -1
-
- // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
- // entropy encoding. This mode is useful in compressing data that has
- // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
- // that lacks an entropy encoder. Compression gains are achieved when
- // certain bytes in the input stream occur more frequently than others.
- //
- // Note that HuffmanOnly produces a compressed output that is
- // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
- // continue to be able to decompress this output.
- HuffmanOnly = -2
- ConstantCompression = HuffmanOnly // compatibility alias.
-
- logWindowSize = 15
- windowSize = 1 << logWindowSize
- windowMask = windowSize - 1
- logMaxOffsetSize = 15 // Standard DEFLATE
- minMatchLength = 4 // The smallest match that the compressor looks for
- maxMatchLength = 258 // The longest match for the compressor
- minOffsetSize = 1 // The shortest offset that makes any sense
-
- // The maximum number of tokens we will encode at the time.
- // Smaller sizes usually creates less optimal blocks.
- // Bigger can make context switching slow.
- // We use this for levels 7-9, so we make it big.
- maxFlateBlockTokens = 1 << 15
- maxStoreBlockSize = 65535
- hashBits = 17 // After 17 performance degrades
- hashSize = 1 << hashBits
- hashMask = (1 << hashBits) - 1
- hashShift = (hashBits + minMatchLength - 1) / minMatchLength
- maxHashOffset = 1 << 28
-
- skipNever = math.MaxInt32
-
- debugDeflate = false
-)
-
-type compressionLevel struct {
- good, lazy, nice, chain, fastSkipHashing, level int
-}
-
-// Compression levels have been rebalanced from zlib deflate defaults
-// to give a bigger spread in speed and compression.
-// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
-var levels = []compressionLevel{
- {}, // 0
- // Level 1-6 uses specialized algorithm - values not used
- {0, 0, 0, 0, 0, 1},
- {0, 0, 0, 0, 0, 2},
- {0, 0, 0, 0, 0, 3},
- {0, 0, 0, 0, 0, 4},
- {0, 0, 0, 0, 0, 5},
- {0, 0, 0, 0, 0, 6},
- // Levels 7-9 use increasingly more lazy matching
- // and increasingly stringent conditions for "good enough".
- {8, 12, 16, 24, skipNever, 7},
- {16, 30, 40, 64, skipNever, 8},
- {32, 258, 258, 1024, skipNever, 9},
-}
-
-// advancedState contains state for the advanced levels, with bigger hash tables, etc.
-type advancedState struct {
- // deflate state
- length int
- offset int
- maxInsertIndex int
- chainHead int
- hashOffset int
-
- ii uint16 // position of last match, intended to overflow to reset.
-
- // input window: unprocessed data is window[index:windowEnd]
- index int
- hashMatch [maxMatchLength + minMatchLength]uint32
-
- // Input hash chains
- // hashHead[hashValue] contains the largest inputIndex with the specified hash value
- // If hashHead[hashValue] is within the current window, then
- // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
- // with the same hash value.
- hashHead [hashSize]uint32
- hashPrev [windowSize]uint32
-}
-
-type compressor struct {
- compressionLevel
-
- h *huffmanEncoder
- w *huffmanBitWriter
-
- // compression algorithm
- fill func(*compressor, []byte) int // copy data to window
- step func(*compressor) // process window
-
- window []byte
- windowEnd int
- blockStart int // window index where current tokens start
- err error
-
- // queued output tokens
- tokens tokens
- fast fastEnc
- state *advancedState
-
- sync bool // requesting flush
- byteAvailable bool // if true, still need to process window[index-1].
-}
-
-func (d *compressor) fillDeflate(b []byte) int {
- s := d.state
- if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
- // shift the window by windowSize
- //copy(d.window[:], d.window[windowSize:2*windowSize])
- *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
- s.index -= windowSize
- d.windowEnd -= windowSize
- if d.blockStart >= windowSize {
- d.blockStart -= windowSize
- } else {
- d.blockStart = math.MaxInt32
- }
- s.hashOffset += windowSize
- if s.hashOffset > maxHashOffset {
- delta := s.hashOffset - 1
- s.hashOffset -= delta
- s.chainHead -= delta
- // Iterate over slices instead of arrays to avoid copying
- // the entire table onto the stack (Issue #18625).
- for i, v := range s.hashPrev[:] {
- if int(v) > delta {
- s.hashPrev[i] = uint32(int(v) - delta)
- } else {
- s.hashPrev[i] = 0
- }
- }
- for i, v := range s.hashHead[:] {
- if int(v) > delta {
- s.hashHead[i] = uint32(int(v) - delta)
- } else {
- s.hashHead[i] = 0
- }
- }
- }
- }
- n := copy(d.window[d.windowEnd:], b)
- d.windowEnd += n
- return n
-}
-
-func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
- if index > 0 || eof {
- var window []byte
- if d.blockStart <= index {
- window = d.window[d.blockStart:index]
- }
- d.blockStart = index
- //d.w.writeBlock(tok, eof, window)
- d.w.writeBlockDynamic(tok, eof, window, d.sync)
- return d.w.err
- }
- return nil
-}
-
-// writeBlockSkip writes the current block and uses the number of tokens
-// to determine if the block should be stored on no matches, or
-// only huffman encoded.
-func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
- if index > 0 || eof {
- if d.blockStart <= index {
- window := d.window[d.blockStart:index]
- // If we removed less than a 64th of all literals
- // we huffman compress the block.
- if int(tok.n) > len(window)-int(tok.n>>6) {
- d.w.writeBlockHuff(eof, window, d.sync)
- } else {
- // Write a dynamic huffman block.
- d.w.writeBlockDynamic(tok, eof, window, d.sync)
- }
- } else {
- d.w.writeBlock(tok, eof, nil)
- }
- d.blockStart = index
- return d.w.err
- }
- return nil
-}
-
-// fillWindow will fill the current window with the supplied
-// dictionary and calculate all hashes.
-// This is much faster than doing a full encode.
-// Should only be used after a start/reset.
-func (d *compressor) fillWindow(b []byte) {
- // Do not fill window if we are in store-only or huffman mode.
- if d.level <= 0 && d.level > -MinCustomWindowSize {
- return
- }
- if d.fast != nil {
- // encode the last data, but discard the result
- if len(b) > maxMatchOffset {
- b = b[len(b)-maxMatchOffset:]
- }
- d.fast.Encode(&d.tokens, b)
- d.tokens.Reset()
- return
- }
- s := d.state
- // If we are given too much, cut it.
- if len(b) > windowSize {
- b = b[len(b)-windowSize:]
- }
- // Add all to window.
- n := copy(d.window[d.windowEnd:], b)
-
- // Calculate 256 hashes at the time (more L1 cache hits)
- loops := (n + 256 - minMatchLength) / 256
- for j := 0; j < loops; j++ {
- startindex := j * 256
- end := startindex + 256 + minMatchLength - 1
- if end > n {
- end = n
- }
- tocheck := d.window[startindex:end]
- dstSize := len(tocheck) - minMatchLength + 1
-
- if dstSize <= 0 {
- continue
- }
-
- dst := s.hashMatch[:dstSize]
- bulkHash4(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
- newH = val & hashMask
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- s.hashPrev[di&windowMask] = s.hashHead[newH]
- // Set the head of the hash chain to us.
- s.hashHead[newH] = uint32(di + s.hashOffset)
- }
- }
- // Update window information.
- d.windowEnd += n
- s.index = n
-}
-
-// Try to find a match starting at index whose length is greater than prevSize.
-// We only look at chainCount possibilities before giving up.
-// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
-func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
- minMatchLook := maxMatchLength
- if lookahead < minMatchLook {
- minMatchLook = lookahead
- }
-
- win := d.window[0 : pos+minMatchLook]
-
- // We quit when we get a match that's at least nice long
- nice := len(win) - pos
- if d.nice < nice {
- nice = d.nice
- }
-
- // If we've got a match that's good enough, only look in 1/4 the chain.
- tries := d.chain
- length = minMatchLength - 1
-
- wEnd := win[pos+length]
- wPos := win[pos:]
- minIndex := pos - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
- offset = 0
-
- if d.chain < 100 {
- for i := prevHead; tries > 0; tries-- {
- if wEnd == win[i+length] {
- n := matchLen(win[i:i+minMatchLook], wPos)
- if n > length {
- length = n
- offset = pos - i
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
- }
- wEnd = win[pos+n]
- }
- }
- if i <= minIndex {
- // hashPrev[i & windowMask] has already been overwritten, so stop now.
- break
- }
- i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
- if i < minIndex {
- break
- }
- }
- return
- }
-
- // Minimum gain to accept a match.
- cGain := 4
-
- // Some like it higher (CSV), some like it lower (JSON)
- const baseCost = 3
- // Base is 4 bytes at with an additional cost.
- // Matches must be better than this.
-
- for i := prevHead; tries > 0; tries-- {
- if wEnd == win[i+length] {
- n := matchLen(win[i:i+minMatchLook], wPos)
- if n > length {
- // Calculate gain. Estimate
- newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
-
- //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
- if newGain > cGain {
- length = n
- offset = pos - i
- cGain = newGain
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
- }
- wEnd = win[pos+n]
- }
- }
- }
- if i <= minIndex {
- // hashPrev[i & windowMask] has already been overwritten, so stop now.
- break
- }
- i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
- if i < minIndex {
- break
- }
- }
- return
-}
-
-func (d *compressor) writeStoredBlock(buf []byte) error {
- if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
- return d.w.err
- }
- d.w.writeBytes(buf)
- return d.w.err
-}
-
-// hash4 returns a hash representation of the first 4 bytes
-// of the supplied slice.
-// The caller must ensure that len(b) >= 4.
-func hash4(b []byte) uint32 {
- return hash4u(binary.LittleEndian.Uint32(b), hashBits)
-}
-
-// hash4 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4u(u uint32, h uint8) uint32 {
- return (u * prime4bytes) >> (32 - h)
-}
-
-// bulkHash4 will compute hashes using the same
-// algorithm as hash4
-func bulkHash4(b []byte, dst []uint32) {
- if len(b) < 4 {
- return
- }
- hb := binary.LittleEndian.Uint32(b)
-
- dst[0] = hash4u(hb, hashBits)
- end := len(b) - 4 + 1
- for i := 1; i < end; i++ {
- hb = (hb >> 8) | uint32(b[i+3])<<24
- dst[i] = hash4u(hb, hashBits)
- }
-}
-
-func (d *compressor) initDeflate() {
- d.window = make([]byte, 2*windowSize)
- d.byteAvailable = false
- d.err = nil
- if d.state == nil {
- return
- }
- s := d.state
- s.index = 0
- s.hashOffset = 1
- s.length = minMatchLength - 1
- s.offset = 0
- s.chainHead = -1
-}
-
-// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
-// meaning it always has lazy matching on.
-func (d *compressor) deflateLazy() {
- s := d.state
- // Sanity enables additional runtime tests.
- // It's intended to be used during development
- // to supplement the currently ad-hoc unit tests.
- const sanity = debugDeflate
-
- if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
- return
- }
- if d.windowEnd != s.index && d.chain > 100 {
- // Get literal huffman coder.
- if d.h == nil {
- d.h = newHuffmanEncoder(maxFlateBlockTokens)
- }
- var tmp [256]uint16
- for _, v := range d.window[s.index:d.windowEnd] {
- tmp[v]++
- }
- d.h.generate(tmp[:], 15)
- }
-
- s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
-
- for {
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- lookahead := d.windowEnd - s.index
- if lookahead < minMatchLength+maxMatchLength {
- if !d.sync {
- return
- }
- if sanity && s.index > d.windowEnd {
- panic("index > windowEnd")
- }
- if lookahead == 0 {
- // Flush current output block if any.
- if d.byteAvailable {
- // There is still one pending token that needs to be flushed
- d.tokens.AddLiteral(d.window[s.index-1])
- d.byteAvailable = false
- }
- if d.tokens.n > 0 {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- return
- }
- }
- if s.index < s.maxInsertIndex {
- // Update the hash
- hash := hash4(d.window[s.index:])
- ch := s.hashHead[hash]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[hash] = uint32(s.index + s.hashOffset)
- }
- prevLength := s.length
- prevOffset := s.offset
- s.length = minMatchLength - 1
- s.offset = 0
- minIndex := s.index - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
-
- if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
- s.length = newLength
- s.offset = newOffset
- }
- }
-
- if prevLength >= minMatchLength && s.length <= prevLength {
- // No better match, but check for better match at end...
- //
- // Skip forward a number of bytes.
- // Offset of 2 seems to yield best results. 3 is sometimes better.
- const checkOff = 2
-
- // Check all, except full length
- if prevLength < maxMatchLength-checkOff {
- prevIndex := s.index - 1
- if prevIndex+prevLength < s.maxInsertIndex {
- end := lookahead
- if lookahead > maxMatchLength+checkOff {
- end = maxMatchLength + checkOff
- }
- end += prevIndex
-
- // Hash at match end.
- h := hash4(d.window[prevIndex+prevLength:])
- ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
- if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
- length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
- // It seems like a pure length metric is best.
- if length > prevLength {
- prevLength = length
- prevOffset = prevIndex - ch2
-
- // Extend back...
- for i := checkOff - 1; i >= 0; i-- {
- if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
- // Emit tokens we "owe"
- for j := 0; j <= i; j++ {
- d.tokens.AddLiteral(d.window[prevIndex+j])
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- s.index++
- if s.index < s.maxInsertIndex {
- h := hash4(d.window[s.index:])
- ch := s.hashHead[h]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[h] = uint32(s.index + s.hashOffset)
- }
- }
- break
- } else {
- prevLength++
- }
- }
- } else if false {
- // Check one further ahead.
- // Only rarely better, disabled for now.
- prevIndex++
- h := hash4(d.window[prevIndex+prevLength:])
- ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
- if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
- length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
- // It seems like a pure length metric is best.
- if length > prevLength+checkOff {
- prevLength = length
- prevOffset = prevIndex - ch2
- prevIndex--
-
- // Extend back...
- for i := checkOff; i >= 0; i-- {
- if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
- // Emit tokens we "owe"
- for j := 0; j <= i; j++ {
- d.tokens.AddLiteral(d.window[prevIndex+j])
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- s.index++
- if s.index < s.maxInsertIndex {
- h := hash4(d.window[s.index:])
- ch := s.hashHead[h]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[h] = uint32(s.index + s.hashOffset)
- }
- }
- break
- } else {
- prevLength++
- }
- }
- }
- }
- }
- }
- }
- }
- // There was a match at the previous step, and the current match is
- // not better. Output the previous match.
- d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
-
- // Insert in the hash table all strings up to the end of the match.
- // index and index-1 are already inserted. If there is not enough
- // lookahead, the last two strings are not inserted into the hash
- // table.
- newIndex := s.index + prevLength - 1
- // Calculate missing hashes
- end := newIndex
- if end > s.maxInsertIndex {
- end = s.maxInsertIndex
- }
- end += minMatchLength - 1
- startindex := s.index + 1
- if startindex > s.maxInsertIndex {
- startindex = s.maxInsertIndex
- }
- tocheck := d.window[startindex:end]
- dstSize := len(tocheck) - minMatchLength + 1
- if dstSize > 0 {
- dst := s.hashMatch[:dstSize]
- bulkHash4(tocheck, dst)
- var newH uint32
- for i, val := range dst {
- di := i + startindex
- newH = val & hashMask
- // Get previous value with the same hash.
- // Our chain should point to the previous value.
- s.hashPrev[di&windowMask] = s.hashHead[newH]
- // Set the head of the hash chain to us.
- s.hashHead[newH] = uint32(di + s.hashOffset)
- }
- }
-
- s.index = newIndex
- d.byteAvailable = false
- s.length = minMatchLength - 1
- if d.tokens.n == maxFlateBlockTokens {
- // The block includes the current character
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- s.ii = 0
- } else {
- // Reset, if we got a match this run.
- if s.length >= minMatchLength {
- s.ii = 0
- }
- // We have a byte waiting. Emit it.
- if d.byteAvailable {
- s.ii++
- d.tokens.AddLiteral(d.window[s.index-1])
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- s.index++
-
- // If we have a long run of no matches, skip additional bytes
- // Resets when s.ii overflows after 64KB.
- if n := int(s.ii) - d.chain; n > 0 {
- n = 1 + int(n>>6)
- for j := 0; j < n; j++ {
- if s.index >= d.windowEnd-1 {
- break
- }
- d.tokens.AddLiteral(d.window[s.index-1])
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- // Index...
- if s.index < s.maxInsertIndex {
- h := hash4(d.window[s.index:])
- ch := s.hashHead[h]
- s.chainHead = int(ch)
- s.hashPrev[s.index&windowMask] = ch
- s.hashHead[h] = uint32(s.index + s.hashOffset)
- }
- s.index++
- }
- // Flush last byte
- d.tokens.AddLiteral(d.window[s.index-1])
- d.byteAvailable = false
- // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
- if d.tokens.n == maxFlateBlockTokens {
- if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
- return
- }
- d.tokens.Reset()
- }
- }
- } else {
- s.index++
- d.byteAvailable = true
- }
- }
- }
-}
-
-func (d *compressor) store() {
- if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- d.windowEnd = 0
- }
-}
-
-// fillWindow will fill the buffer with data for huffman-only compression.
-// The number of bytes copied is returned.
-func (d *compressor) fillBlock(b []byte) int {
- n := copy(d.window[d.windowEnd:], b)
- d.windowEnd += n
- return n
-}
-
-// storeHuff will compress and store the currently added data,
-// if enough has been accumulated or we at the end of the stream.
-// Any error that occurred will be in d.err
-func (d *compressor) storeHuff() {
- if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
- return
- }
- d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- d.windowEnd = 0
-}
-
-// storeFast will compress and store the currently added data,
-// if enough has been accumulated or we at the end of the stream.
-// Any error that occurred will be in d.err
-func (d *compressor) storeFast() {
- // We only compress if we have maxStoreBlockSize.
- if d.windowEnd < len(d.window) {
- if !d.sync {
- return
- }
- // Handle extremely small sizes.
- if d.windowEnd < 128 {
- if d.windowEnd == 0 {
- return
- }
- if d.windowEnd <= 32 {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- } else {
- d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
- d.err = d.w.err
- }
- d.tokens.Reset()
- d.windowEnd = 0
- d.fast.Reset()
- return
- }
- }
-
- d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
- // If we made zero matches, store the block as is.
- if d.tokens.n == 0 {
- d.err = d.writeStoredBlock(d.window[:d.windowEnd])
- // If we removed less than 1/16th, huffman compress the block.
- } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
- d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- } else {
- d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
- d.err = d.w.err
- }
- d.tokens.Reset()
- d.windowEnd = 0
-}
-
-// write will add input byte to the stream.
-// Unless an error occurs all bytes will be consumed.
-func (d *compressor) write(b []byte) (n int, err error) {
- if d.err != nil {
- return 0, d.err
- }
- n = len(b)
- for len(b) > 0 {
- if d.windowEnd == len(d.window) || d.sync {
- d.step(d)
- }
- b = b[d.fill(d, b):]
- if d.err != nil {
- return 0, d.err
- }
- }
- return n, d.err
-}
-
-func (d *compressor) syncFlush() error {
- d.sync = true
- if d.err != nil {
- return d.err
- }
- d.step(d)
- if d.err == nil {
- d.w.writeStoredHeader(0, false)
- d.w.flush()
- d.err = d.w.err
- }
- d.sync = false
- return d.err
-}
-
-func (d *compressor) init(w io.Writer, level int) (err error) {
- d.w = newHuffmanBitWriter(w)
-
- switch {
- case level == NoCompression:
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).store
- case level == ConstantCompression:
- d.w.logNewTablePenalty = 10
- d.window = make([]byte, 32<<10)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).storeHuff
- case level == DefaultCompression:
- level = 5
- fallthrough
- case level >= 1 && level <= 6:
- d.w.logNewTablePenalty = 7
- d.fast = newFastEnc(level)
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).storeFast
- case 7 <= level && level <= 9:
- d.w.logNewTablePenalty = 8
- d.state = &advancedState{}
- d.compressionLevel = levels[level]
- d.initDeflate()
- d.fill = (*compressor).fillDeflate
- d.step = (*compressor).deflateLazy
- case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
- d.w.logNewTablePenalty = 7
- d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
- d.window = make([]byte, maxStoreBlockSize)
- d.fill = (*compressor).fillBlock
- d.step = (*compressor).storeFast
- default:
- return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
- }
- d.level = level
- return nil
-}
-
-// reset the state of the compressor.
-func (d *compressor) reset(w io.Writer) {
- d.w.reset(w)
- d.sync = false
- d.err = nil
- // We only need to reset a few things for Snappy.
- if d.fast != nil {
- d.fast.Reset()
- d.windowEnd = 0
- d.tokens.Reset()
- return
- }
- switch d.compressionLevel.chain {
- case 0:
- // level was NoCompression or ConstantCompresssion.
- d.windowEnd = 0
- default:
- s := d.state
- s.chainHead = -1
- for i := range s.hashHead {
- s.hashHead[i] = 0
- }
- for i := range s.hashPrev {
- s.hashPrev[i] = 0
- }
- s.hashOffset = 1
- s.index, d.windowEnd = 0, 0
- d.blockStart, d.byteAvailable = 0, false
- d.tokens.Reset()
- s.length = minMatchLength - 1
- s.offset = 0
- s.ii = 0
- s.maxInsertIndex = 0
- }
-}
-
-func (d *compressor) close() error {
- if d.err != nil {
- return d.err
- }
- d.sync = true
- d.step(d)
- if d.err != nil {
- return d.err
- }
- if d.w.writeStoredHeader(0, true); d.w.err != nil {
- return d.w.err
- }
- d.w.flush()
- d.w.reset(nil)
- return d.w.err
-}
-
-// NewWriter returns a new Writer compressing data at the given level.
-// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
-// higher levels typically run slower but compress more.
-// Level 0 (NoCompression) does not attempt any compression; it only adds the
-// necessary DEFLATE framing.
-// Level -1 (DefaultCompression) uses the default compression level.
-// Level -2 (ConstantCompression) will use Huffman compression only, giving
-// a very fast compression for all types of input, but sacrificing considerable
-// compression efficiency.
-//
-// If level is in the range [-2, 9] then the error returned will be nil.
-// Otherwise the error returned will be non-nil.
-func NewWriter(w io.Writer, level int) (*Writer, error) {
- var dw Writer
- if err := dw.d.init(w, level); err != nil {
- return nil, err
- }
- return &dw, nil
-}
-
-// NewWriterDict is like NewWriter but initializes the new
-// Writer with a preset dictionary. The returned Writer behaves
-// as if the dictionary had been written to it without producing
-// any compressed output. The compressed data written to w
-// can only be decompressed by a Reader initialized with the
-// same dictionary.
-func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
- zw, err := NewWriter(w, level)
- if err != nil {
- return nil, err
- }
- zw.d.fillWindow(dict)
- zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
- return zw, err
-}
-
-// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
-const MinCustomWindowSize = 32
-
-// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
-const MaxCustomWindowSize = windowSize
-
-// NewWriterWindow returns a new Writer compressing data with a custom window size.
-// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
-func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
- if windowSize < MinCustomWindowSize {
- return nil, errors.New("flate: requested window size less than MinWindowSize")
- }
- if windowSize > MaxCustomWindowSize {
- return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
- }
- var dw Writer
- if err := dw.d.init(w, -windowSize); err != nil {
- return nil, err
- }
- return &dw, nil
-}
-
-// A Writer takes data written to it and writes the compressed
-// form of that data to an underlying writer (see NewWriter).
-type Writer struct {
- d compressor
- dict []byte
-}
-
-// Write writes data to w, which will eventually write the
-// compressed form of data to its underlying writer.
-func (w *Writer) Write(data []byte) (n int, err error) {
- return w.d.write(data)
-}
-
-// Flush flushes any pending data to the underlying writer.
-// It is useful mainly in compressed network protocols, to ensure that
-// a remote reader has enough data to reconstruct a packet.
-// Flush does not return until the data has been written.
-// Calling Flush when there is no pending data still causes the Writer
-// to emit a sync marker of at least 4 bytes.
-// If the underlying writer returns an error, Flush returns that error.
-//
-// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
-func (w *Writer) Flush() error {
- // For more about flushing:
- // http://www.bolet.org/~pornin/deflate-flush.html
- return w.d.syncFlush()
-}
-
-// Close flushes and closes the writer.
-func (w *Writer) Close() error {
- return w.d.close()
-}
-
-// Reset discards the writer's state and makes it equivalent to
-// the result of NewWriter or NewWriterDict called with dst
-// and w's level and dictionary.
-func (w *Writer) Reset(dst io.Writer) {
- if len(w.dict) > 0 {
- // w was created with NewWriterDict
- w.d.reset(dst)
- if dst != nil {
- w.d.fillWindow(w.dict)
- }
- } else {
- // w was created with NewWriter
- w.d.reset(dst)
- }
-}
-
-// ResetDict discards the writer's state and makes it equivalent to
-// the result of NewWriter or NewWriterDict called with dst
-// and w's level, but sets a specific dictionary.
-func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
- w.dict = dict
- w.d.reset(dst)
- w.d.fillWindow(w.dict)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/dict_decoder.go
deleted file mode 100644
index bb36351..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/dict_decoder.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
-// LZ77 decompresses data through sequences of two forms of commands:
-//
-// - Literal insertions: Runs of one or more symbols are inserted into the data
-// stream as is. This is accomplished through the writeByte method for a
-// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
-// Any valid stream must start with a literal insertion if no preset dictionary
-// is used.
-//
-// - Backward copies: Runs of one or more symbols are copied from previously
-// emitted data. Backward copies come as the tuple (dist, length) where dist
-// determines how far back in the stream to copy from and length determines how
-// many bytes to copy. Note that it is valid for the length to be greater than
-// the distance. Since LZ77 uses forward copies, that situation is used to
-// perform a form of run-length encoding on repeated runs of symbols.
-// The writeCopy and tryWriteCopy are used to implement this command.
-//
-// For performance reasons, this implementation performs little to no sanity
-// checks about the arguments. As such, the invariants documented for each
-// method call must be respected.
-type dictDecoder struct {
- hist []byte // Sliding window history
-
- // Invariant: 0 <= rdPos <= wrPos <= len(hist)
- wrPos int // Current output position in buffer
- rdPos int // Have emitted hist[:rdPos] already
- full bool // Has a full window length been written yet?
-}
-
-// init initializes dictDecoder to have a sliding window dictionary of the given
-// size. If a preset dict is provided, it will initialize the dictionary with
-// the contents of dict.
-func (dd *dictDecoder) init(size int, dict []byte) {
- *dd = dictDecoder{hist: dd.hist}
-
- if cap(dd.hist) < size {
- dd.hist = make([]byte, size)
- }
- dd.hist = dd.hist[:size]
-
- if len(dict) > len(dd.hist) {
- dict = dict[len(dict)-len(dd.hist):]
- }
- dd.wrPos = copy(dd.hist, dict)
- if dd.wrPos == len(dd.hist) {
- dd.wrPos = 0
- dd.full = true
- }
- dd.rdPos = dd.wrPos
-}
-
-// histSize reports the total amount of historical data in the dictionary.
-func (dd *dictDecoder) histSize() int {
- if dd.full {
- return len(dd.hist)
- }
- return dd.wrPos
-}
-
-// availRead reports the number of bytes that can be flushed by readFlush.
-func (dd *dictDecoder) availRead() int {
- return dd.wrPos - dd.rdPos
-}
-
-// availWrite reports the available amount of output buffer space.
-func (dd *dictDecoder) availWrite() int {
- return len(dd.hist) - dd.wrPos
-}
-
-// writeSlice returns a slice of the available buffer to write data to.
-//
-// This invariant will be kept: len(s) <= availWrite()
-func (dd *dictDecoder) writeSlice() []byte {
- return dd.hist[dd.wrPos:]
-}
-
-// writeMark advances the writer pointer by cnt.
-//
-// This invariant must be kept: 0 <= cnt <= availWrite()
-func (dd *dictDecoder) writeMark(cnt int) {
- dd.wrPos += cnt
-}
-
-// writeByte writes a single byte to the dictionary.
-//
-// This invariant must be kept: 0 < availWrite()
-func (dd *dictDecoder) writeByte(c byte) {
- dd.hist[dd.wrPos] = c
- dd.wrPos++
-}
-
-// writeCopy copies a string at a given (dist, length) to the output.
-// This returns the number of bytes copied and may be less than the requested
-// length if the available space in the output buffer is too small.
-//
-// This invariant must be kept: 0 < dist <= histSize()
-func (dd *dictDecoder) writeCopy(dist, length int) int {
- dstBase := dd.wrPos
- dstPos := dstBase
- srcPos := dstPos - dist
- endPos := dstPos + length
- if endPos > len(dd.hist) {
- endPos = len(dd.hist)
- }
-
- // Copy non-overlapping section after destination position.
- //
- // This section is non-overlapping in that the copy length for this section
- // is always less than or equal to the backwards distance. This can occur
- // if a distance refers to data that wraps-around in the buffer.
- // Thus, a backwards copy is performed here; that is, the exact bytes in
- // the source prior to the copy is placed in the destination.
- if srcPos < 0 {
- srcPos += len(dd.hist)
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
- srcPos = 0
- }
-
- // Copy possibly overlapping section before destination position.
- //
- // This section can overlap if the copy length for this section is larger
- // than the backwards distance. This is allowed by LZ77 so that repeated
- // strings can be succinctly represented using (dist, length) pairs.
- // Thus, a forwards copy is performed here; that is, the bytes copied is
- // possibly dependent on the resulting bytes in the destination as the copy
- // progresses along. This is functionally equivalent to the following:
- //
- // for i := 0; i < endPos-dstPos; i++ {
- // dd.hist[dstPos+i] = dd.hist[srcPos+i]
- // }
- // dstPos = endPos
- //
- for dstPos < endPos {
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
- }
-
- dd.wrPos = dstPos
- return dstPos - dstBase
-}
-
-// tryWriteCopy tries to copy a string at a given (distance, length) to the
-// output. This specialized version is optimized for short distances.
-//
-// This method is designed to be inlined for performance reasons.
-//
-// This invariant must be kept: 0 < dist <= histSize()
-func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
- dstPos := dd.wrPos
- endPos := dstPos + length
- if dstPos < dist || endPos > len(dd.hist) {
- return 0
- }
- dstBase := dstPos
- srcPos := dstPos - dist
-
- // Copy possibly overlapping section before destination position.
-loop:
- dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
- if dstPos < endPos {
- goto loop // Avoid for-loop so that this function can be inlined
- }
-
- dd.wrPos = dstPos
- return dstPos - dstBase
-}
-
-// readFlush returns a slice of the historical buffer that is ready to be
-// emitted to the user. The data returned by readFlush must be fully consumed
-// before calling any other dictDecoder methods.
-func (dd *dictDecoder) readFlush() []byte {
- toRead := dd.hist[dd.rdPos:dd.wrPos]
- dd.rdPos = dd.wrPos
- if dd.wrPos == len(dd.hist) {
- dd.wrPos, dd.rdPos = 0, 0
- dd.full = true
- }
- return toRead
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/fast_encoder.go
deleted file mode 100644
index c8124b5..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Modified for deflate by Klaus Post (c) 2015.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "encoding/binary"
- "fmt"
-)
-
-type fastEnc interface {
- Encode(dst *tokens, src []byte)
- Reset()
-}
-
-func newFastEnc(level int) fastEnc {
- switch level {
- case 1:
- return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 2:
- return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 3:
- return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 4:
- return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 5:
- return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
- case 6:
- return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
- default:
- panic("invalid level specified")
- }
-}
-
-const (
- tableBits = 15 // Bits used in the table
- tableSize = 1 << tableBits // Size of the table
- tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
- baseMatchOffset = 1 // The smallest match offset
- baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
- maxMatchOffset = 1 << 15 // The largest match offset
-
- bTableBits = 17 // Bits used in the big tables
- bTableSize = 1 << bTableBits // Size of the table
- allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
- bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
-)
-
-const (
- prime3bytes = 506832829
- prime4bytes = 2654435761
- prime5bytes = 889523592379
- prime6bytes = 227718039650203
- prime7bytes = 58295818150454627
- prime8bytes = 0xcf1bbcdcb7a56463
-)
-
-func load3232(b []byte, i int32) uint32 {
- return binary.LittleEndian.Uint32(b[i:])
-}
-
-func load6432(b []byte, i int32) uint64 {
- return binary.LittleEndian.Uint64(b[i:])
-}
-
-type tableEntry struct {
- offset int32
-}
-
-// fastGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type fastGen struct {
- hist []byte
- cur int32
-}
-
-func (e *fastGen) addBlock(src []byte) int32 {
- // check if we have space already
- if len(e.hist)+len(src) > cap(e.hist) {
- if cap(e.hist) == 0 {
- e.hist = make([]byte, 0, allocHistory)
- } else {
- if cap(e.hist) < maxMatchOffset*2 {
- panic("unexpected buffer size")
- }
- // Move down
- offset := int32(len(e.hist)) - maxMatchOffset
- // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
- *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
- e.cur += offset
- e.hist = e.hist[:maxMatchOffset]
- }
- }
- s := int32(len(e.hist))
- e.hist = append(e.hist, src...)
- return s
-}
-
-type tableEntryPrev struct {
- Cur tableEntry
- Prev tableEntry
-}
-
-// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash7(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
-}
-
-// hashLen returns a hash of the lowest mls bytes of with length output bits.
-// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
-// length should always be < 32.
-// Preferably length and mls should be a constant for inlining.
-func hashLen(u uint64, length, mls uint8) uint32 {
- switch mls {
- case 3:
- return (uint32(u<<8) * prime3bytes) >> (32 - length)
- case 5:
- return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
- case 6:
- return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
- case 7:
- return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
- case 8:
- return uint32((u * prime8bytes) >> (64 - length))
- default:
- return (uint32(u) * prime4bytes) >> (32 - length)
- }
-}
-
-// matchlen will return the match length between offsets and t in src.
-// The maximum length returned is maxMatchLength - 4.
-// It is assumed that s > t, that t >=0 and s < len(src).
-func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
- if debugDecode {
- if t >= s {
- panic(fmt.Sprint("t >=s:", t, s))
- }
- if int(s) >= len(src) {
- panic(fmt.Sprint("s >= len(src):", s, len(src)))
- }
- if t < 0 {
- panic(fmt.Sprint("t < 0:", t))
- }
- if s-t > maxMatchOffset {
- panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
- }
- }
- s1 := int(s) + maxMatchLength - 4
- if s1 > len(src) {
- s1 = len(src)
- }
-
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:s1], src[t:]))
-}
-
-// matchlenLong will return the match length between offsets and t in src.
-// It is assumed that s > t, that t >=0 and s < len(src).
-func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
- if debugDeflate {
- if t >= s {
- panic(fmt.Sprint("t >=s:", t, s))
- }
- if int(s) >= len(src) {
- panic(fmt.Sprint("s >= len(src):", s, len(src)))
- }
- if t < 0 {
- panic(fmt.Sprint("t < 0:", t))
- }
- if s-t > maxMatchOffset {
- panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
- }
- }
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
-
-// Reset the encoding table.
-func (e *fastGen) Reset() {
- if cap(e.hist) < allocHistory {
- e.hist = make([]byte, 0, allocHistory)
- }
- // We offset current position so everything will be out of reach.
- // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
- if e.cur <= bufferReset {
- e.cur += maxMatchOffset + int32(len(e.hist))
- }
- e.hist = e.hist[:0]
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
deleted file mode 100644
index f70594c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ /dev/null
@@ -1,1182 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "math"
-)
-
-const (
- // The largest offset code.
- offsetCodeCount = 30
-
- // The special code used to mark the end of a block.
- endBlockMarker = 256
-
- // The first length code.
- lengthCodesStart = 257
-
- // The number of codegen codes.
- codegenCodeCount = 19
- badCode = 255
-
- // maxPredefinedTokens is the maximum number of tokens
- // where we check if fixed size is smaller.
- maxPredefinedTokens = 250
-
- // bufferFlushSize indicates the buffer size
- // after which bytes are flushed to the writer.
- // Should preferably be a multiple of 6, since
- // we accumulate 6 bytes between writes to the buffer.
- bufferFlushSize = 246
-)
-
-// Minimum length code that emits bits.
-const lengthExtraBitsMinCode = 8
-
-// The number of extra bits needed by length code X - LENGTH_CODES_START.
-var lengthExtraBits = [32]uint8{
- /* 257 */ 0, 0, 0,
- /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
- /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
- /* 280 */ 4, 5, 5, 5, 5, 0,
-}
-
-// The length indicated by length code X - LENGTH_CODES_START.
-var lengthBase = [32]uint8{
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
- 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
- 64, 80, 96, 112, 128, 160, 192, 224, 255,
-}
-
-// Minimum offset code that emits bits.
-const offsetExtraBitsMinCode = 4
-
-// offset code word extra bits.
-var offsetExtraBits = [32]int8{
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
- 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
- 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
- /* extended window */
- 14, 14,
-}
-
-var offsetCombined = [32]uint32{}
-
-func init() {
- var offsetBase = [32]uint32{
- /* normal deflate */
- 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
- 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
- 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
- 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
- 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
- 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
-
- /* extended window */
- 0x008000, 0x00c000,
- }
-
- for i := range offsetCombined[:] {
- // Don't use extended window values...
- if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
- continue
- }
- offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
- }
-}
-
-// The odd order in which the codegen code sizes are written.
-var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
-
-type huffmanBitWriter struct {
- // writer is the underlying writer.
- // Do not use it directly; use the write method, which ensures
- // that Write errors are sticky.
- writer io.Writer
-
- // Data waiting to be written is bytes[0:nbytes]
- // and then the low nbits of bits.
- bits uint64
- nbits uint8
- nbytes uint8
- lastHuffMan bool
- literalEncoding *huffmanEncoder
- tmpLitEncoding *huffmanEncoder
- offsetEncoding *huffmanEncoder
- codegenEncoding *huffmanEncoder
- err error
- lastHeader int
- // Set between 0 (reused block can be up to 2x the size)
- logNewTablePenalty uint
- bytes [256 + 8]byte
- literalFreq [lengthCodesStart + 32]uint16
- offsetFreq [32]uint16
- codegenFreq [codegenCodeCount]uint16
-
- // codegen must have an extra space for the final symbol.
- codegen [literalCount + offsetCodeCount + 1]uint8
-}
-
-// Huffman reuse.
-//
-// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
-//
-// This is controlled by several variables:
-//
-// If lastHeader is non-zero the Huffman table can be reused.
-// This also indicates that a Huffman table has been generated that can output all
-// possible symbols.
-// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
-// an EOB with the previous table must be written.
-//
-// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
-//
-// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
-// optimal size and adding a penalty in 'logNewTablePenalty'.
-// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
-// is slower both for compression and decompression.
-
-func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
- return &huffmanBitWriter{
- writer: w,
- literalEncoding: newHuffmanEncoder(literalCount),
- tmpLitEncoding: newHuffmanEncoder(literalCount),
- codegenEncoding: newHuffmanEncoder(codegenCodeCount),
- offsetEncoding: newHuffmanEncoder(offsetCodeCount),
- }
-}
-
-func (w *huffmanBitWriter) reset(writer io.Writer) {
- w.writer = writer
- w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
- w.lastHeader = 0
- w.lastHuffMan = false
-}
-
-func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
- a := t.offHist[:offsetCodeCount]
- b := w.offsetEncoding.codes
- b = b[:len(a)]
- for i, v := range a {
- if v != 0 && b[i].zero() {
- return false
- }
- }
-
- a = t.extraHist[:literalCount-256]
- b = w.literalEncoding.codes[256:literalCount]
- b = b[:len(a)]
- for i, v := range a {
- if v != 0 && b[i].zero() {
- return false
- }
- }
-
- a = t.litHist[:256]
- b = w.literalEncoding.codes[:len(a)]
- for i, v := range a {
- if v != 0 && b[i].zero() {
- return false
- }
- }
- return true
-}
-
-func (w *huffmanBitWriter) flush() {
- if w.err != nil {
- w.nbits = 0
- return
- }
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
- n := w.nbytes
- for w.nbits != 0 {
- w.bytes[n] = byte(w.bits)
- w.bits >>= 8
- if w.nbits > 8 { // Avoid underflow
- w.nbits -= 8
- } else {
- w.nbits = 0
- }
- n++
- }
- w.bits = 0
- w.write(w.bytes[:n])
- w.nbytes = 0
-}
-
-func (w *huffmanBitWriter) write(b []byte) {
- if w.err != nil {
- return
- }
- _, w.err = w.writer.Write(b)
-}
-
-func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
- w.bits |= uint64(b) << (w.nbits & 63)
- w.nbits += nb
- if w.nbits >= 48 {
- w.writeOutBits()
- }
-}
-
-func (w *huffmanBitWriter) writeBytes(bytes []byte) {
- if w.err != nil {
- return
- }
- n := w.nbytes
- if w.nbits&7 != 0 {
- w.err = InternalError("writeBytes with unfinished bits")
- return
- }
- for w.nbits != 0 {
- w.bytes[n] = byte(w.bits)
- w.bits >>= 8
- w.nbits -= 8
- n++
- }
- if n != 0 {
- w.write(w.bytes[:n])
- }
- w.nbytes = 0
- w.write(bytes)
-}
-
-// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
-// the literal and offset lengths arrays (which are concatenated into a single
-// array). This method generates that run-length encoding.
-//
-// The result is written into the codegen array, and the frequencies
-// of each code is written into the codegenFreq array.
-// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
-// information. Code badCode is an end marker
-//
-// numLiterals The number of literals in literalEncoding
-// numOffsets The number of offsets in offsetEncoding
-// litenc, offenc The literal and offset encoder to use
-func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
- for i := range w.codegenFreq {
- w.codegenFreq[i] = 0
- }
- // Note that we are using codegen both as a temporary variable for holding
- // a copy of the frequencies, and as the place where we put the result.
- // This is fine because the output is always shorter than the input used
- // so far.
- codegen := w.codegen[:] // cache
- // Copy the concatenated code sizes to codegen. Put a marker at the end.
- cgnl := codegen[:numLiterals]
- for i := range cgnl {
- cgnl[i] = litEnc.codes[i].len()
- }
-
- cgnl = codegen[numLiterals : numLiterals+numOffsets]
- for i := range cgnl {
- cgnl[i] = offEnc.codes[i].len()
- }
- codegen[numLiterals+numOffsets] = badCode
-
- size := codegen[0]
- count := 1
- outIndex := 0
- for inIndex := 1; size != badCode; inIndex++ {
- // INVARIANT: We have seen "count" copies of size that have not yet
- // had output generated for them.
- nextSize := codegen[inIndex]
- if nextSize == size {
- count++
- continue
- }
- // We need to generate codegen indicating "count" of size.
- if size != 0 {
- codegen[outIndex] = size
- outIndex++
- w.codegenFreq[size]++
- count--
- for count >= 3 {
- n := 6
- if n > count {
- n = count
- }
- codegen[outIndex] = 16
- outIndex++
- codegen[outIndex] = uint8(n - 3)
- outIndex++
- w.codegenFreq[16]++
- count -= n
- }
- } else {
- for count >= 11 {
- n := 138
- if n > count {
- n = count
- }
- codegen[outIndex] = 18
- outIndex++
- codegen[outIndex] = uint8(n - 11)
- outIndex++
- w.codegenFreq[18]++
- count -= n
- }
- if count >= 3 {
- // count >= 3 && count <= 10
- codegen[outIndex] = 17
- outIndex++
- codegen[outIndex] = uint8(count - 3)
- outIndex++
- w.codegenFreq[17]++
- count = 0
- }
- }
- count--
- for ; count >= 0; count-- {
- codegen[outIndex] = size
- outIndex++
- w.codegenFreq[size]++
- }
- // Set up invariant for next time through the loop.
- size = nextSize
- count = 1
- }
- // Marker indicating the end of the codegen.
- codegen[outIndex] = badCode
-}
-
-func (w *huffmanBitWriter) codegens() int {
- numCodegens := len(w.codegenFreq)
- for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
- numCodegens--
- }
- return numCodegens
-}
-
-func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
- numCodegens = len(w.codegenFreq)
- for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
- numCodegens--
- }
- return 3 + 5 + 5 + 4 + (3 * numCodegens) +
- w.codegenEncoding.bitLength(w.codegenFreq[:]) +
- int(w.codegenFreq[16])*2 +
- int(w.codegenFreq[17])*3 +
- int(w.codegenFreq[18])*7, numCodegens
-}
-
-// dynamicSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
- size = litEnc.bitLength(w.literalFreq[:]) +
- offEnc.bitLength(w.offsetFreq[:])
- return size
-}
-
-// dynamicSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
- header, numCodegens := w.headerSize()
- size = header +
- litEnc.bitLength(w.literalFreq[:]) +
- offEnc.bitLength(w.offsetFreq[:]) +
- extraBits
- return size, numCodegens
-}
-
-// extraBitSize will return the number of bits that will be written
-// as "extra" bits on matches.
-func (w *huffmanBitWriter) extraBitSize() int {
- total := 0
- for i, n := range w.literalFreq[257:literalCount] {
- total += int(n) * int(lengthExtraBits[i&31])
- }
- for i, n := range w.offsetFreq[:offsetCodeCount] {
- total += int(n) * int(offsetExtraBits[i&31])
- }
- return total
-}
-
-// fixedSize returns the size of dynamically encoded data in bits.
-func (w *huffmanBitWriter) fixedSize(extraBits int) int {
- return 3 +
- fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
- fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
- extraBits
-}
-
-// storedSize calculates the stored size, including header.
-// The function returns the size in bits and whether the block
-// fits inside a single block.
-func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
- if in == nil {
- return 0, false
- }
- if len(in) <= maxStoreBlockSize {
- return (len(in) + 5) * 8, true
- }
- return 0, false
-}
-
-func (w *huffmanBitWriter) writeCode(c hcode) {
- // The function does not get inlined if we "& 63" the shift.
- w.bits |= c.code64() << (w.nbits & 63)
- w.nbits += c.len()
- if w.nbits >= 48 {
- w.writeOutBits()
- }
-}
-
-// writeOutBits will write bits to the buffer.
-func (w *huffmanBitWriter) writeOutBits() {
- bits := w.bits
- w.bits >>= 48
- w.nbits -= 48
- n := w.nbytes
-
- // We over-write, but faster...
- binary.LittleEndian.PutUint64(w.bytes[n:], bits)
- n += 6
-
- if n >= bufferFlushSize {
- if w.err != nil {
- n = 0
- return
- }
- w.write(w.bytes[:n])
- n = 0
- }
-
- w.nbytes = n
-}
-
-// Write the header of a dynamic Huffman block to the output stream.
-//
-// numLiterals The number of literals specified in codegen
-// numOffsets The number of offsets specified in codegen
-// numCodegens The number of codegens used in codegen
-func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
- if w.err != nil {
- return
- }
- var firstBits int32 = 4
- if isEof {
- firstBits = 5
- }
- w.writeBits(firstBits, 3)
- w.writeBits(int32(numLiterals-257), 5)
- w.writeBits(int32(numOffsets-1), 5)
- w.writeBits(int32(numCodegens-4), 4)
-
- for i := 0; i < numCodegens; i++ {
- value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
- w.writeBits(int32(value), 3)
- }
-
- i := 0
- for {
- var codeWord = uint32(w.codegen[i])
- i++
- if codeWord == badCode {
- break
- }
- w.writeCode(w.codegenEncoding.codes[codeWord])
-
- switch codeWord {
- case 16:
- w.writeBits(int32(w.codegen[i]), 2)
- i++
- case 17:
- w.writeBits(int32(w.codegen[i]), 3)
- i++
- case 18:
- w.writeBits(int32(w.codegen[i]), 7)
- i++
- }
- }
-}
-
-// writeStoredHeader will write a stored header.
-// If the stored block is only used for EOF,
-// it is replaced with a fixed huffman block.
-func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
- if w.err != nil {
- return
- }
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
-
- // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
- if length == 0 && isEof {
- w.writeFixedHeader(isEof)
- // EOB: 7 bits, value: 0
- w.writeBits(0, 7)
- w.flush()
- return
- }
-
- var flag int32
- if isEof {
- flag = 1
- }
- w.writeBits(flag, 3)
- w.flush()
- w.writeBits(int32(length), 16)
- w.writeBits(int32(^uint16(length)), 16)
-}
-
-func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
- if w.err != nil {
- return
- }
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
-
- // Indicate that we are a fixed Huffman block
- var value int32 = 2
- if isEof {
- value = 3
- }
- w.writeBits(value, 3)
-}
-
-// writeBlock will write a block of tokens with the smallest encoding.
-// The original input can be supplied, and if the huffman encoded data
-// is larger than the original bytes, the data will be written as a
-// stored block.
-// If the input is nil, the tokens will always be Huffman encoded.
-func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
- if w.err != nil {
- return
- }
-
- tokens.AddEOB()
- if w.lastHeader > 0 {
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
- numLiterals, numOffsets := w.indexTokens(tokens, false)
- w.generate()
- var extraBits int
- storedSize, storable := w.storedSize(input)
- if storable {
- extraBits = w.extraBitSize()
- }
-
- // Figure out smallest code.
- // Fixed Huffman baseline.
- var literalEncoding = fixedLiteralEncoding
- var offsetEncoding = fixedOffsetEncoding
- var size = math.MaxInt32
- if tokens.n < maxPredefinedTokens {
- size = w.fixedSize(extraBits)
- }
-
- // Dynamic Huffman?
- var numCodegens int
-
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
-
- if dynamicSize < size {
- size = dynamicSize
- literalEncoding = w.literalEncoding
- offsetEncoding = w.offsetEncoding
- }
-
- // Stored bytes?
- if storable && storedSize <= size {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- // Huffman.
- if literalEncoding == fixedLiteralEncoding {
- w.writeFixedHeader(eof)
- } else {
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- }
-
- // Write the tokens.
- w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
-}
-
-// writeBlockDynamic encodes a block using a dynamic Huffman table.
-// This should be used if the symbols used have a disproportionate
-// histogram distribution.
-// If input is supplied and the compression savings are below 1/16th of the
-// input size the block is stored.
-func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
- if w.err != nil {
- return
- }
-
- sync = sync || eof
- if sync {
- tokens.AddEOB()
- }
-
- // We cannot reuse pure huffman table, and must mark as EOF.
- if (w.lastHuffMan || eof) && w.lastHeader > 0 {
- // We will not try to reuse.
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- w.lastHuffMan = false
- }
-
- // fillReuse enables filling of empty values.
- // This will make encodings always reusable without testing.
- // However, this does not appear to benefit on most cases.
- const fillReuse = false
-
- // Check if we can reuse...
- if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- }
-
- numLiterals, numOffsets := w.indexTokens(tokens, !sync)
- extraBits := 0
- ssize, storable := w.storedSize(input)
-
- const usePrefs = true
- if storable || w.lastHeader > 0 {
- extraBits = w.extraBitSize()
- }
-
- var size int
-
- // Check if we should reuse.
- if w.lastHeader > 0 {
- // Estimate size for using a new table.
- // Use the previous header size as the best estimate.
- newSize := w.lastHeader + tokens.EstimatedBits()
- newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
-
- // The estimated size is calculated as an optimal table.
- // We add a penalty to make it more realistic and re-use a bit more.
- reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
-
- // Check if a new table is better.
- if newSize < reuseSize {
- // Write the EOB we owe.
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- size = newSize
- w.lastHeader = 0
- } else {
- size = reuseSize
- }
-
- if tokens.n < maxPredefinedTokens {
- if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
- // Check if we get a reasonable size decrease.
- if storable && ssize <= size {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
- w.writeFixedHeader(eof)
- if !sync {
- tokens.AddEOB()
- }
- w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
- return
- }
- }
- // Check if we get a reasonable size decrease.
- if storable && ssize <= size {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
- }
-
- // We want a new block/table
- if w.lastHeader == 0 {
- if fillReuse && !sync {
- w.fillTokens()
- numLiterals, numOffsets = maxNumLit, maxNumDist
- } else {
- w.literalFreq[endBlockMarker] = 1
- }
-
- w.generate()
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
-
- var numCodegens int
- if fillReuse && !sync {
- // Reindex for accurate size...
- w.indexTokens(tokens, true)
- }
- size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
-
- // Store predefined, if we don't get a reasonable improvement.
- if tokens.n < maxPredefinedTokens {
- if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
- // Store bytes, if we don't get an improvement.
- if storable && ssize <= preSize {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
- w.writeFixedHeader(eof)
- if !sync {
- tokens.AddEOB()
- }
- w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
- return
- }
- }
-
- if storable && ssize <= size {
- // Store bytes, if we don't get an improvement.
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- // Write Huffman table.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- if !sync {
- w.lastHeader, _ = w.headerSize()
- }
- w.lastHuffMan = false
- }
-
- if sync {
- w.lastHeader = 0
- }
- // Write the tokens.
- w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
-}
-
-func (w *huffmanBitWriter) fillTokens() {
- for i, v := range w.literalFreq[:literalCount] {
- if v == 0 {
- w.literalFreq[i] = 1
- }
- }
- for i, v := range w.offsetFreq[:offsetCodeCount] {
- if v == 0 {
- w.offsetFreq[i] = 1
- }
- }
-}
-
-// indexTokens indexes a slice of tokens, and updates
-// literalFreq and offsetFreq, and generates literalEncoding
-// and offsetEncoding.
-// The number of literal and offset tokens is returned.
-func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
- //copy(w.literalFreq[:], t.litHist[:])
- *(*[256]uint16)(w.literalFreq[:]) = t.litHist
- //copy(w.literalFreq[256:], t.extraHist[:])
- *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
- w.offsetFreq = t.offHist
-
- if t.n == 0 {
- return
- }
- if filled {
- return maxNumLit, maxNumDist
- }
- // get the number of literals
- numLiterals = len(w.literalFreq)
- for w.literalFreq[numLiterals-1] == 0 {
- numLiterals--
- }
- // get the number of offsets
- numOffsets = len(w.offsetFreq)
- for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
- numOffsets--
- }
- if numOffsets == 0 {
- // We haven't found a single match. If we want to go with the dynamic encoding,
- // we should count at least one offset to be sure that the offset huffman tree could be encoded.
- w.offsetFreq[0] = 1
- numOffsets = 1
- }
- return
-}
-
-func (w *huffmanBitWriter) generate() {
- w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
- w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
-}
-
-// writeTokens writes a slice of tokens to the output.
-// codes for literal and offset encoding must be supplied.
-func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
- if w.err != nil {
- return
- }
- if len(tokens) == 0 {
- return
- }
-
- // Only last token should be endBlockMarker.
- var deferEOB bool
- if tokens[len(tokens)-1] == endBlockMarker {
- tokens = tokens[:len(tokens)-1]
- deferEOB = true
- }
-
- // Create slices up to the next power of two to avoid bounds checks.
- lits := leCodes[:256]
- offs := oeCodes[:32]
- lengths := leCodes[lengthCodesStart:]
- lengths = lengths[:32]
-
- // Go 1.16 LOVES having these on stack.
- bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
-
- for _, t := range tokens {
- if t < 256 {
- //w.writeCode(lits[t.literal()])
- c := lits[t]
- bits |= c.code64() << (nbits & 63)
- nbits += c.len()
- if nbits >= 48 {
- binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
- //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
- bits >>= 48
- nbits -= 48
- nbytes += 6
- if nbytes >= bufferFlushSize {
- if w.err != nil {
- nbytes = 0
- return
- }
- _, w.err = w.writer.Write(w.bytes[:nbytes])
- nbytes = 0
- }
- }
- continue
- }
-
- // Write the length
- length := t.length()
- lengthCode := lengthCode(length) & 31
- if false {
- w.writeCode(lengths[lengthCode])
- } else {
- // inlined
- c := lengths[lengthCode]
- bits |= c.code64() << (nbits & 63)
- nbits += c.len()
- if nbits >= 48 {
- binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
- //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
- bits >>= 48
- nbits -= 48
- nbytes += 6
- if nbytes >= bufferFlushSize {
- if w.err != nil {
- nbytes = 0
- return
- }
- _, w.err = w.writer.Write(w.bytes[:nbytes])
- nbytes = 0
- }
- }
- }
-
- if lengthCode >= lengthExtraBitsMinCode {
- extraLengthBits := lengthExtraBits[lengthCode]
- //w.writeBits(extraLength, extraLengthBits)
- extraLength := int32(length - lengthBase[lengthCode])
- bits |= uint64(extraLength) << (nbits & 63)
- nbits += extraLengthBits
- if nbits >= 48 {
- binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
- //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
- bits >>= 48
- nbits -= 48
- nbytes += 6
- if nbytes >= bufferFlushSize {
- if w.err != nil {
- nbytes = 0
- return
- }
- _, w.err = w.writer.Write(w.bytes[:nbytes])
- nbytes = 0
- }
- }
- }
- // Write the offset
- offset := t.offset()
- offsetCode := (offset >> 16) & 31
- if false {
- w.writeCode(offs[offsetCode])
- } else {
- // inlined
- c := offs[offsetCode]
- bits |= c.code64() << (nbits & 63)
- nbits += c.len()
- if nbits >= 48 {
- binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
- //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
- bits >>= 48
- nbits -= 48
- nbytes += 6
- if nbytes >= bufferFlushSize {
- if w.err != nil {
- nbytes = 0
- return
- }
- _, w.err = w.writer.Write(w.bytes[:nbytes])
- nbytes = 0
- }
- }
- }
-
- if offsetCode >= offsetExtraBitsMinCode {
- offsetComb := offsetCombined[offsetCode]
- //w.writeBits(extraOffset, extraOffsetBits)
- bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
- nbits += uint8(offsetComb)
- if nbits >= 48 {
- binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
- //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
- bits >>= 48
- nbits -= 48
- nbytes += 6
- if nbytes >= bufferFlushSize {
- if w.err != nil {
- nbytes = 0
- return
- }
- _, w.err = w.writer.Write(w.bytes[:nbytes])
- nbytes = 0
- }
- }
- }
- }
- // Restore...
- w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
-
- if deferEOB {
- w.writeCode(leCodes[endBlockMarker])
- }
-}
-
-// huffOffset is a static offset encoder used for huffman only encoding.
-// It can be reused since we will not be encoding offset values.
-var huffOffset *huffmanEncoder
-
-func init() {
- w := newHuffmanBitWriter(nil)
- w.offsetFreq[0] = 1
- huffOffset = newHuffmanEncoder(offsetCodeCount)
- huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
-}
-
-// writeBlockHuff encodes a block of bytes as either
-// Huffman encoded literals or uncompressed bytes if the
-// results only gains very little from compression.
-func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
- if w.err != nil {
- return
- }
-
- // Clear histogram
- for i := range w.literalFreq[:] {
- w.literalFreq[i] = 0
- }
- if !w.lastHuffMan {
- for i := range w.offsetFreq[:] {
- w.offsetFreq[i] = 0
- }
- }
-
- const numLiterals = endBlockMarker + 1
- const numOffsets = 1
-
- // Add everything as literals
- // We have to estimate the header size.
- // Assume header is around 70 bytes:
- // https://stackoverflow.com/a/25454430
- const guessHeaderSizeBits = 70 * 8
- histogram(input, w.literalFreq[:numLiterals])
- ssize, storable := w.storedSize(input)
- if storable && len(input) > 1024 {
- // Quick check for incompressible content.
- abs := float64(0)
- avg := float64(len(input)) / 256
- max := float64(len(input) * 2)
- for _, v := range w.literalFreq[:256] {
- diff := float64(v) - avg
- abs += diff * diff
- if abs > max {
- break
- }
- }
- if abs < max {
- if debugDeflate {
- fmt.Println("stored", abs, "<", max)
- }
- // No chance we can compress this...
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
- }
- w.literalFreq[endBlockMarker] = 1
- w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
- estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
- if estBits < math.MaxInt32 {
- estBits += w.lastHeader
- if w.lastHeader == 0 {
- estBits += guessHeaderSizeBits
- }
- estBits += estBits >> w.logNewTablePenalty
- }
-
- // Store bytes, if we don't get a reasonable improvement.
- if storable && ssize <= estBits {
- if debugDeflate {
- fmt.Println("stored,", ssize, "<=", estBits)
- }
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
- return
- }
-
- if w.lastHeader > 0 {
- reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
-
- if estBits < reuseSize {
- if debugDeflate {
- fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
- }
- // We owe an EOB
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- } else if debugDeflate {
- fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
- }
- }
-
- count := 0
- if w.lastHeader == 0 {
- // Use the temp encoding, so swap.
- w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
- // Generate codegen and codegenFrequencies, which indicates how to encode
- // the literalEncoding and the offsetEncoding.
- w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
- w.codegenEncoding.generate(w.codegenFreq[:], 7)
- numCodegens := w.codegens()
-
- // Huffman.
- w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- w.lastHuffMan = true
- w.lastHeader, _ = w.headerSize()
- if debugDeflate {
- count += w.lastHeader
- fmt.Println("header:", count/8)
- }
- }
-
- encoding := w.literalEncoding.codes[:256]
- // Go 1.16 LOVES having these on stack. At least 1.5x the speed.
- bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
-
- if debugDeflate {
- count -= int(nbytes)*8 + int(nbits)
- }
- // Unroll, write 3 codes/loop.
- // Fastest number of unrolls.
- for len(input) > 3 {
- // We must have at least 48 bits free.
- if nbits >= 8 {
- n := nbits >> 3
- binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
- bits >>= (n * 8) & 63
- nbits -= n * 8
- nbytes += n
- }
- if nbytes >= bufferFlushSize {
- if w.err != nil {
- nbytes = 0
- return
- }
- if debugDeflate {
- count += int(nbytes) * 8
- }
- _, w.err = w.writer.Write(w.bytes[:nbytes])
- nbytes = 0
- }
- a, b := encoding[input[0]], encoding[input[1]]
- bits |= a.code64() << (nbits & 63)
- bits |= b.code64() << ((nbits + a.len()) & 63)
- c := encoding[input[2]]
- nbits += b.len() + a.len()
- bits |= c.code64() << (nbits & 63)
- nbits += c.len()
- input = input[3:]
- }
-
- // Remaining...
- for _, t := range input {
- if nbits >= 48 {
- binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
- //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
- bits >>= 48
- nbits -= 48
- nbytes += 6
- if nbytes >= bufferFlushSize {
- if w.err != nil {
- nbytes = 0
- return
- }
- if debugDeflate {
- count += int(nbytes) * 8
- }
- _, w.err = w.writer.Write(w.bytes[:nbytes])
- nbytes = 0
- }
- }
- // Bitwriting inlined, ~30% speedup
- c := encoding[t]
- bits |= c.code64() << (nbits & 63)
-
- nbits += c.len()
- if debugDeflate {
- count += int(c.len())
- }
- }
- // Restore...
- w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
-
- if debugDeflate {
- nb := count + int(nbytes)*8 + int(nbits)
- fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
- }
- // Flush if needed to have space.
- if w.nbits >= 48 {
- w.writeOutBits()
- }
-
- if eof || sync {
- w.writeCode(w.literalEncoding.codes[endBlockMarker])
- w.lastHeader = 0
- w.lastHuffMan = false
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_code.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_code.go
deleted file mode 100644
index be7b58b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ /dev/null
@@ -1,417 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "math"
- "math/bits"
-)
-
-const (
- maxBitsLimit = 16
- // number of valid literals
- literalCount = 286
-)
-
-// hcode is a huffman code with a bit code and bit length.
-type hcode uint32
-
-func (h hcode) len() uint8 {
- return uint8(h)
-}
-
-func (h hcode) code64() uint64 {
- return uint64(h >> 8)
-}
-
-func (h hcode) zero() bool {
- return h == 0
-}
-
-type huffmanEncoder struct {
- codes []hcode
- bitCount [17]int32
-
- // Allocate a reusable buffer with the longest possible frequency table.
- // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
- // The largest of these is literalCount, so we allocate for that case.
- freqcache [literalCount + 1]literalNode
-}
-
-type literalNode struct {
- literal uint16
- freq uint16
-}
-
-// A levelInfo describes the state of the constructed tree for a given depth.
-type levelInfo struct {
- // Our level. for better printing
- level int32
-
- // The frequency of the last node at this level
- lastFreq int32
-
- // The frequency of the next character to add to this level
- nextCharFreq int32
-
- // The frequency of the next pair (from level below) to add to this level.
- // Only valid if the "needed" value of the next lower level is 0.
- nextPairFreq int32
-
- // The number of chains remaining to generate for this level before moving
- // up to the next level
- needed int32
-}
-
-// set sets the code and length of an hcode.
-func (h *hcode) set(code uint16, length uint8) {
- *h = hcode(length) | (hcode(code) << 8)
-}
-
-func newhcode(code uint16, length uint8) hcode {
- return hcode(length) | (hcode(code) << 8)
-}
-
-func reverseBits(number uint16, bitLength byte) uint16 {
- return bits.Reverse16(number << ((16 - bitLength) & 15))
-}
-
-func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
-
-func newHuffmanEncoder(size int) *huffmanEncoder {
- // Make capacity to next power of two.
- c := uint(bits.Len32(uint32(size - 1)))
- return &huffmanEncoder{codes: make([]hcode, size, 1<
= 3
-// The cases of 0, 1, and 2 literals are handled by special case code.
-//
-// list An array of the literals with non-zero frequencies
-//
-// and their associated frequencies. The array is in order of increasing
-// frequency, and has as its last element a special element with frequency
-// MaxInt32
-//
-// maxBits The maximum number of bits that should be used to encode any literal.
-//
-// Must be less than 16.
-//
-// return An integer array in which array[i] indicates the number of literals
-//
-// that should be encoded in i bits.
-func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
- if maxBits >= maxBitsLimit {
- panic("flate: maxBits too large")
- }
- n := int32(len(list))
- list = list[0 : n+1]
- list[n] = maxNode()
-
- // The tree can't have greater depth than n - 1, no matter what. This
- // saves a little bit of work in some small cases
- if maxBits > n-1 {
- maxBits = n - 1
- }
-
- // Create information about each of the levels.
- // A bogus "Level 0" whose sole purpose is so that
- // level1.prev.needed==0. This makes level1.nextPairFreq
- // be a legitimate value that never gets chosen.
- var levels [maxBitsLimit]levelInfo
- // leafCounts[i] counts the number of literals at the left
- // of ancestors of the rightmost node at level i.
- // leafCounts[i][j] is the number of literals at the left
- // of the level j ancestor.
- var leafCounts [maxBitsLimit][maxBitsLimit]int32
-
- // Descending to only have 1 bounds check.
- l2f := int32(list[2].freq)
- l1f := int32(list[1].freq)
- l0f := int32(list[0].freq) + int32(list[1].freq)
-
- for level := int32(1); level <= maxBits; level++ {
- // For every level, the first two items are the first two characters.
- // We initialize the levels as if we had already figured this out.
- levels[level] = levelInfo{
- level: level,
- lastFreq: l1f,
- nextCharFreq: l2f,
- nextPairFreq: l0f,
- }
- leafCounts[level][level] = 2
- if level == 1 {
- levels[level].nextPairFreq = math.MaxInt32
- }
- }
-
- // We need a total of 2*n - 2 items at top level and have already generated 2.
- levels[maxBits].needed = 2*n - 4
-
- level := uint32(maxBits)
- for level < 16 {
- l := &levels[level]
- if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
- // We've run out of both leafs and pairs.
- // End all calculations for this level.
- // To make sure we never come back to this level or any lower level,
- // set nextPairFreq impossibly large.
- l.needed = 0
- levels[level+1].nextPairFreq = math.MaxInt32
- level++
- continue
- }
-
- prevFreq := l.lastFreq
- if l.nextCharFreq < l.nextPairFreq {
- // The next item on this row is a leaf node.
- n := leafCounts[level][level] + 1
- l.lastFreq = l.nextCharFreq
- // Lower leafCounts are the same of the previous node.
- leafCounts[level][level] = n
- e := list[n]
- if e.literal < math.MaxUint16 {
- l.nextCharFreq = int32(e.freq)
- } else {
- l.nextCharFreq = math.MaxInt32
- }
- } else {
- // The next item on this row is a pair from the previous row.
- // nextPairFreq isn't valid until we generate two
- // more values in the level below
- l.lastFreq = l.nextPairFreq
- // Take leaf counts from the lower level, except counts[level] remains the same.
- if true {
- save := leafCounts[level][level]
- leafCounts[level] = leafCounts[level-1]
- leafCounts[level][level] = save
- } else {
- copy(leafCounts[level][:level], leafCounts[level-1][:level])
- }
- levels[l.level-1].needed = 2
- }
-
- if l.needed--; l.needed == 0 {
- // We've done everything we need to do for this level.
- // Continue calculating one level up. Fill in nextPairFreq
- // of that level with the sum of the two nodes we've just calculated on
- // this level.
- if l.level == maxBits {
- // All done!
- break
- }
- levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
- level++
- } else {
- // If we stole from below, move down temporarily to replenish it.
- for levels[level-1].needed > 0 {
- level--
- }
- }
- }
-
- // Somethings is wrong if at the end, the top level is null or hasn't used
- // all of the leaves.
- if leafCounts[maxBits][maxBits] != n {
- panic("leafCounts[maxBits][maxBits] != n")
- }
-
- bitCount := h.bitCount[:maxBits+1]
- bits := 1
- counts := &leafCounts[maxBits]
- for level := maxBits; level > 0; level-- {
- // chain.leafCount gives the number of literals requiring at least "bits"
- // bits to encode.
- bitCount[bits] = counts[level] - counts[level-1]
- bits++
- }
- return bitCount
-}
-
-// Look at the leaves and assign them a bit count and an encoding as specified
-// in RFC 1951 3.2.2
-func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
- code := uint16(0)
- for n, bits := range bitCount {
- code <<= 1
- if n == 0 || bits == 0 {
- continue
- }
- // The literals list[len(list)-bits] .. list[len(list)-bits]
- // are encoded using "bits" bits, and get the values
- // code, code + 1, .... The code values are
- // assigned in literal order (not frequency order).
- chunk := list[len(list)-int(bits):]
-
- sortByLiteral(chunk)
- for _, node := range chunk {
- h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
- code++
- }
- list = list[0 : len(list)-int(bits)]
- }
-}
-
-// Update this Huffman Code object to be the minimum code for the specified frequency count.
-//
-// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
-// maxBits The maximum number of bits to use for any literal.
-func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
- list := h.freqcache[:len(freq)+1]
- codes := h.codes[:len(freq)]
- // Number of non-zero literals
- count := 0
- // Set list to be the set of all non-zero literals and their frequencies
- for i, f := range freq {
- if f != 0 {
- list[count] = literalNode{uint16(i), f}
- count++
- } else {
- codes[i] = 0
- }
- }
- list[count] = literalNode{}
-
- list = list[:count]
- if count <= 2 {
- // Handle the small cases here, because they are awkward for the general case code. With
- // two or fewer literals, everything has bit length 1.
- for i, node := range list {
- // "list" is in order of increasing literal value.
- h.codes[node.literal].set(uint16(i), 1)
- }
- return
- }
- sortByFreq(list)
-
- // Get the number of literals for each bit count
- bitCount := h.bitCounts(list, maxBits)
- // And do the assignment
- h.assignEncodingAndSize(bitCount, list)
-}
-
-// atLeastOne clamps the result between 1 and 15.
-func atLeastOne(v float32) float32 {
- if v < 1 {
- return 1
- }
- if v > 15 {
- return 15
- }
- return v
-}
-
-func histogram(b []byte, h []uint16) {
- if true && len(b) >= 8<<10 {
- // Split for bigger inputs
- histogramSplit(b, h)
- } else {
- h = h[:256]
- for _, t := range b {
- h[t]++
- }
- }
-}
-
-func histogramSplit(b []byte, h []uint16) {
- // Tested, and slightly faster than 2-way.
- // Writing to separate arrays and combining is also slightly slower.
- h = h[:256]
- for len(b)&3 != 0 {
- h[b[0]]++
- b = b[1:]
- }
- n := len(b) / 4
- x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
- y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
- for i, t := range x {
- v0 := &h[t]
- v1 := &h[y[i]]
- v3 := &h[w[i]]
- v2 := &h[z[i]]
- *v0++
- *v1++
- *v2++
- *v3++
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
deleted file mode 100644
index 6c05ba8..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// Sort sorts data.
-// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
-// data.Less and data.Swap. The sort is not guaranteed to be stable.
-func sortByFreq(data []literalNode) {
- n := len(data)
- quickSortByFreq(data, 0, n, maxDepth(n))
-}
-
-func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
- for b-a > 12 { // Use ShellSort for slices <= 12 elements
- if maxDepth == 0 {
- heapSort(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivotByFreq(data, a, b)
- // Avoiding recursion on the larger subproblem guarantees
- // a stack depth of at most lg(b-a).
- if mlo-a < b-mhi {
- quickSortByFreq(data, a, mlo, maxDepth)
- a = mhi // i.e., quickSortByFreq(data, mhi, b)
- } else {
- quickSortByFreq(data, mhi, b, maxDepth)
- b = mlo // i.e., quickSortByFreq(data, a, mlo)
- }
- }
- if b-a > 1 {
- // Do ShellSort pass with gap 6
- // It could be written in this simplified form cause b-a <= 12
- for i := a + 6; i < b; i++ {
- if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
- data[i], data[i-6] = data[i-6], data[i]
- }
- }
- insertionSortByFreq(data, a, b)
- }
-}
-
-func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
- if hi-lo > 40 {
- // Tukey's ``Ninther,'' median of three medians of three.
- s := (hi - lo) / 8
- medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
- medianOfThreeSortByFreq(data, m, m-s, m+s)
- medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThreeSortByFreq(data, lo, m, hi-1)
-
- // Invariants are:
- // data[lo] = pivot (set up by ChoosePivot)
- // data[lo < i < a] < pivot
- // data[a <= i < b] <= pivot
- // data[b <= i < c] unexamined
- // data[c <= i < hi-1] > pivot
- // data[hi-1] >= pivot
- pivot := lo
- a, c := lo+1, hi-1
-
- for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
- }
- b := a
- for {
- for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
- }
- for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
- }
- if b >= c {
- break
- }
- // data[b] > pivot; data[c-1] <= pivot
- data[b], data[c-1] = data[c-1], data[b]
- b++
- c--
- }
- // If hi-c<3 then there are duplicates (by property of median of nine).
- // Let's be a bit more conservative, and set border to 5.
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- // Lets test some points for equality to pivot
- dups := 0
- if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
- data[c], data[hi-1] = data[hi-1], data[c]
- c++
- dups++
- }
- if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
- b--
- dups++
- }
- // m-lo = (hi-lo)/2 > 6
- // b-lo > (hi-lo)*3/4-1 > 8
- // ==> m < b ==> data[m] <= pivot
- if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
- data[m], data[b-1] = data[b-1], data[m]
- b--
- dups++
- }
- // if at least 2 points are equal to pivot, assume skewed distribution
- protect = dups > 1
- }
- if protect {
- // Protect against a lot of duplicates
- // Add invariant:
- // data[a <= i < b] unexamined
- // data[b <= i < c] = pivot
- for {
- for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
- }
- for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
- }
- if a >= b {
- break
- }
- // data[a] == pivot; data[b-1] < pivot
- data[a], data[b-1] = data[b-1], data[a]
- a++
- b--
- }
- }
- // Swap pivot into middle
- data[pivot], data[b-1] = data[b-1], data[pivot]
- return b - 1, c
-}
-
-// Insertion sort
-func insertionSortByFreq(data []literalNode, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// quickSortByFreq, loosely following Bentley and McIlroy,
-// ``Engineering a Sort Function,'' SP&E November 1993.
-
-// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
-func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
- // sort 3 elements
- if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
- data[m1], data[m0] = data[m0], data[m1]
- }
- // data[m0] <= data[m1]
- if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
- data[m2], data[m1] = data[m1], data[m2]
- // data[m0] <= data[m2] && data[m1] < data[m2]
- if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
- data[m1], data[m0] = data[m0], data[m1]
- }
- }
- // now data[m0] <= data[m1] <= data[m2]
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
deleted file mode 100644
index 93f1aea..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-// Sort sorts data.
-// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
-// data.Less and data.Swap. The sort is not guaranteed to be stable.
-func sortByLiteral(data []literalNode) {
- n := len(data)
- quickSort(data, 0, n, maxDepth(n))
-}
-
-func quickSort(data []literalNode, a, b, maxDepth int) {
- for b-a > 12 { // Use ShellSort for slices <= 12 elements
- if maxDepth == 0 {
- heapSort(data, a, b)
- return
- }
- maxDepth--
- mlo, mhi := doPivot(data, a, b)
- // Avoiding recursion on the larger subproblem guarantees
- // a stack depth of at most lg(b-a).
- if mlo-a < b-mhi {
- quickSort(data, a, mlo, maxDepth)
- a = mhi // i.e., quickSort(data, mhi, b)
- } else {
- quickSort(data, mhi, b, maxDepth)
- b = mlo // i.e., quickSort(data, a, mlo)
- }
- }
- if b-a > 1 {
- // Do ShellSort pass with gap 6
- // It could be written in this simplified form cause b-a <= 12
- for i := a + 6; i < b; i++ {
- if data[i].literal < data[i-6].literal {
- data[i], data[i-6] = data[i-6], data[i]
- }
- }
- insertionSort(data, a, b)
- }
-}
-func heapSort(data []literalNode, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDown(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDown(data, lo, i, first)
- }
-}
-
-// siftDown implements the heap property on data[lo, hi).
-// first is an offset into the array where the root of the heap lies.
-func siftDown(data []literalNode, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
- child++
- }
- if data[first+root].literal > data[first+child].literal {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
- m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
- if hi-lo > 40 {
- // Tukey's ``Ninther,'' median of three medians of three.
- s := (hi - lo) / 8
- medianOfThree(data, lo, lo+s, lo+2*s)
- medianOfThree(data, m, m-s, m+s)
- medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
- }
- medianOfThree(data, lo, m, hi-1)
-
- // Invariants are:
- // data[lo] = pivot (set up by ChoosePivot)
- // data[lo < i < a] < pivot
- // data[a <= i < b] <= pivot
- // data[b <= i < c] unexamined
- // data[c <= i < hi-1] > pivot
- // data[hi-1] >= pivot
- pivot := lo
- a, c := lo+1, hi-1
-
- for ; a < c && data[a].literal < data[pivot].literal; a++ {
- }
- b := a
- for {
- for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
- }
- for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
- }
- if b >= c {
- break
- }
- // data[b] > pivot; data[c-1] <= pivot
- data[b], data[c-1] = data[c-1], data[b]
- b++
- c--
- }
- // If hi-c<3 then there are duplicates (by property of median of nine).
- // Let's be a bit more conservative, and set border to 5.
- protect := hi-c < 5
- if !protect && hi-c < (hi-lo)/4 {
- // Lets test some points for equality to pivot
- dups := 0
- if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
- data[c], data[hi-1] = data[hi-1], data[c]
- c++
- dups++
- }
- if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
- b--
- dups++
- }
- // m-lo = (hi-lo)/2 > 6
- // b-lo > (hi-lo)*3/4-1 > 8
- // ==> m < b ==> data[m] <= pivot
- if data[m].literal > data[pivot].literal { // data[m] = pivot
- data[m], data[b-1] = data[b-1], data[m]
- b--
- dups++
- }
- // if at least 2 points are equal to pivot, assume skewed distribution
- protect = dups > 1
- }
- if protect {
- // Protect against a lot of duplicates
- // Add invariant:
- // data[a <= i < b] unexamined
- // data[b <= i < c] = pivot
- for {
- for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
- }
- for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
- }
- if a >= b {
- break
- }
- // data[a] == pivot; data[b-1] < pivot
- data[a], data[b-1] = data[b-1], data[a]
- a++
- b--
- }
- }
- // Swap pivot into middle
- data[pivot], data[b-1] = data[b-1], data[pivot]
- return b - 1, c
-}
-
-// Insertion sort
-func insertionSort(data []literalNode, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// maxDepth returns a threshold at which quicksort should switch
-// to heapsort. It returns 2*ceil(lg(n+1)).
-func maxDepth(n int) int {
- var depth int
- for i := n; i > 0; i >>= 1 {
- depth++
- }
- return depth * 2
-}
-
-// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
-func medianOfThree(data []literalNode, m1, m0, m2 int) {
- // sort 3 elements
- if data[m1].literal < data[m0].literal {
- data[m1], data[m0] = data[m0], data[m1]
- }
- // data[m0] <= data[m1]
- if data[m2].literal < data[m1].literal {
- data[m2], data[m1] = data[m1], data[m2]
- // data[m0] <= data[m2] && data[m1] < data[m2]
- if data[m1].literal < data[m0].literal {
- data[m1], data[m0] = data[m0], data[m1]
- }
- }
- // now data[m0] <= data[m1] <= data[m2]
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/inflate.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/inflate.go
deleted file mode 100644
index 2f410d6..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/inflate.go
+++ /dev/null
@@ -1,829 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package flate implements the DEFLATE compressed data format, described in
-// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
-// formats.
-package flate
-
-import (
- "bufio"
- "compress/flate"
- "fmt"
- "io"
- "math/bits"
- "sync"
-)
-
-const (
- maxCodeLen = 16 // max length of Huffman code
- maxCodeLenMask = 15 // mask for max length of Huffman code
- // The next three numbers come from the RFC section 3.2.7, with the
- // additional proviso in section 3.2.5 which implies that distance codes
- // 30 and 31 should never occur in compressed data.
- maxNumLit = 286
- maxNumDist = 30
- numCodes = 19 // number of codes in Huffman meta-code
-
- debugDecode = false
-)
-
-// Value of length - 3 and extra bits.
-type lengthExtra struct {
- length, extra uint8
-}
-
-var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
-
-var bitMask32 = [32]uint32{
- 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
- 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
- 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
- 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
-} // up to 32 bits
-
-// Initialize the fixedHuffmanDecoder only once upon first use.
-var fixedOnce sync.Once
-var fixedHuffmanDecoder huffmanDecoder
-
-// A CorruptInputError reports the presence of corrupt input at a given offset.
-type CorruptInputError = flate.CorruptInputError
-
-// An InternalError reports an error in the flate code itself.
-type InternalError string
-
-func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
-
-// A ReadError reports an error encountered while reading input.
-//
-// Deprecated: No longer returned.
-type ReadError = flate.ReadError
-
-// A WriteError reports an error encountered while writing output.
-//
-// Deprecated: No longer returned.
-type WriteError = flate.WriteError
-
-// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
-// to switch to a new underlying Reader. This permits reusing a ReadCloser
-// instead of allocating a new one.
-type Resetter interface {
- // Reset discards any buffered data and resets the Resetter as if it was
- // newly initialized with the given reader.
- Reset(r io.Reader, dict []byte) error
-}
-
-// The data structure for decoding Huffman tables is based on that of
-// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
-// For codes smaller than the table width, there are multiple entries
-// (each combination of trailing bits has the same value). For codes
-// larger than the table width, the table contains a link to an overflow
-// table. The width of each entry in the link table is the maximum code
-// size minus the chunk width.
-//
-// Note that you can do a lookup in the table even without all bits
-// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
-// have the property that shorter codes come before longer ones, the
-// bit length estimate in the result is a lower bound on the actual
-// number of bits.
-//
-// See the following:
-// http://www.gzip.org/algorithm.txt
-
-// chunk & 15 is number of bits
-// chunk >> 4 is value, including table link
-
-const (
- huffmanChunkBits = 9
- huffmanNumChunks = 1 << huffmanChunkBits
- huffmanCountMask = 15
- huffmanValueShift = 4
-)
-
-type huffmanDecoder struct {
- maxRead int // the maximum number of bits we can read and not overread
- chunks *[huffmanNumChunks]uint16 // chunks as described above
- links [][]uint16 // overflow links
- linkMask uint32 // mask the width of the link table
-}
-
-// Initialize Huffman decoding tables from array of code lengths.
-// Following this function, h is guaranteed to be initialized into a complete
-// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
-// degenerate case where the tree has only a single symbol with length 1. Empty
-// trees are permitted.
-func (h *huffmanDecoder) init(lengths []int) bool {
- // Sanity enables additional runtime tests during Huffman
- // table construction. It's intended to be used during
- // development to supplement the currently ad-hoc unit tests.
- const sanity = false
-
- if h.chunks == nil {
- h.chunks = new([huffmanNumChunks]uint16)
- }
-
- if h.maxRead != 0 {
- *h = huffmanDecoder{chunks: h.chunks, links: h.links}
- }
-
- // Count number of codes of each length,
- // compute maxRead and max length.
- var count [maxCodeLen]int
- var min, max int
- for _, n := range lengths {
- if n == 0 {
- continue
- }
- if min == 0 || n < min {
- min = n
- }
- if n > max {
- max = n
- }
- count[n&maxCodeLenMask]++
- }
-
- // Empty tree. The decompressor.huffSym function will fail later if the tree
- // is used. Technically, an empty tree is only valid for the HDIST tree and
- // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
- // is guaranteed to fail since it will attempt to use the tree to decode the
- // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
- // guaranteed to fail later since the compressed data section must be
- // composed of at least one symbol (the end-of-block marker).
- if max == 0 {
- return true
- }
-
- code := 0
- var nextcode [maxCodeLen]int
- for i := min; i <= max; i++ {
- code <<= 1
- nextcode[i&maxCodeLenMask] = code
- code += count[i&maxCodeLenMask]
- }
-
- // Check that the coding is complete (i.e., that we've
- // assigned all 2-to-the-max possible bit sequences).
- // Exception: To be compatible with zlib, we also need to
- // accept degenerate single-code codings. See also
- // TestDegenerateHuffmanCoding.
- if code != 1< huffmanChunkBits {
- numLinks := 1 << (uint(max) - huffmanChunkBits)
- h.linkMask = uint32(numLinks - 1)
-
- // create link tables
- link := nextcode[huffmanChunkBits+1] >> 1
- if cap(h.links) < huffmanNumChunks-link {
- h.links = make([][]uint16, huffmanNumChunks-link)
- } else {
- h.links = h.links[:huffmanNumChunks-link]
- }
- for j := uint(link); j < huffmanNumChunks; j++ {
- reverse := int(bits.Reverse16(uint16(j)))
- reverse >>= uint(16 - huffmanChunkBits)
- off := j - uint(link)
- if sanity && h.chunks[reverse] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[reverse] = uint16(off<>= uint(16 - n)
- if n <= huffmanChunkBits {
- for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
- // We should never need to overwrite
- // an existing chunk. Also, 0 is
- // never a valid chunk, because the
- // lower 4 "count" bits should be
- // between 1 and 15.
- if sanity && h.chunks[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- h.chunks[off] = chunk
- }
- } else {
- j := reverse & (huffmanNumChunks - 1)
- if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
- // Longer codes should have been
- // associated with a link table above.
- panic("impossible: not an indirect chunk")
- }
- value := h.chunks[j] >> huffmanValueShift
- linktab := h.links[value]
- reverse >>= huffmanChunkBits
- for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
- if sanity && linktab[off] != 0 {
- panic("impossible: overwriting existing chunk")
- }
- linktab[off] = chunk
- }
- }
- }
-
- if sanity {
- // Above we've sanity checked that we never overwrote
- // an existing entry. Here we additionally check that
- // we filled the tables completely.
- for i, chunk := range h.chunks {
- if chunk == 0 {
- // As an exception, in the degenerate
- // single-code case, we allow odd
- // chunks to be missing.
- if code == 1 && i%2 == 1 {
- continue
- }
- panic("impossible: missing chunk")
- }
- }
- for _, linktab := range h.links {
- for _, chunk := range linktab {
- if chunk == 0 {
- panic("impossible: missing chunk")
- }
- }
- }
- }
-
- return true
-}
-
-// Reader is the actual read interface needed by NewReader.
-// If the passed in io.Reader does not also have ReadByte,
-// the NewReader will introduce its own buffering.
-type Reader interface {
- io.Reader
- io.ByteReader
-}
-
-type step uint8
-
-const (
- copyData step = iota + 1
- nextBlock
- huffmanBytesBuffer
- huffmanBytesReader
- huffmanBufioReader
- huffmanStringsReader
- huffmanGenericReader
-)
-
-// Decompress state.
-type decompressor struct {
- // Input source.
- r Reader
- roffset int64
-
- // Huffman decoders for literal/length, distance.
- h1, h2 huffmanDecoder
-
- // Length arrays used to define Huffman codes.
- bits *[maxNumLit + maxNumDist]int
- codebits *[numCodes]int
-
- // Output history, buffer.
- dict dictDecoder
-
- // Next step in the decompression,
- // and decompression state.
- step step
- stepState int
- err error
- toRead []byte
- hl, hd *huffmanDecoder
- copyLen int
- copyDist int
-
- // Temporary buffer (avoids repeated allocation).
- buf [4]byte
-
- // Input bits, in top of b.
- b uint32
-
- nb uint
- final bool
-}
-
-func (f *decompressor) nextBlock() {
- for f.nb < 1+2 {
- if f.err = f.moreBits(); f.err != nil {
- return
- }
- }
- f.final = f.b&1 == 1
- f.b >>= 1
- typ := f.b & 3
- f.b >>= 2
- f.nb -= 1 + 2
- switch typ {
- case 0:
- f.dataBlock()
- if debugDecode {
- fmt.Println("stored block")
- }
- case 1:
- // compressed, fixed Huffman tables
- f.hl = &fixedHuffmanDecoder
- f.hd = nil
- f.huffmanBlockDecoder()
- if debugDecode {
- fmt.Println("predefinied huffman block")
- }
- case 2:
- // compressed, dynamic Huffman tables
- if f.err = f.readHuffman(); f.err != nil {
- break
- }
- f.hl = &f.h1
- f.hd = &f.h2
- f.huffmanBlockDecoder()
- if debugDecode {
- fmt.Println("dynamic huffman block")
- }
- default:
- // 3 is reserved.
- if debugDecode {
- fmt.Println("reserved data block encountered")
- }
- f.err = CorruptInputError(f.roffset)
- }
-}
-
-func (f *decompressor) Read(b []byte) (int, error) {
- for {
- if len(f.toRead) > 0 {
- n := copy(b, f.toRead)
- f.toRead = f.toRead[n:]
- if len(f.toRead) == 0 {
- return n, f.err
- }
- return n, nil
- }
- if f.err != nil {
- return 0, f.err
- }
-
- f.doStep()
-
- if f.err != nil && len(f.toRead) == 0 {
- f.toRead = f.dict.readFlush() // Flush what's left in case of error
- }
- }
-}
-
-// WriteTo implements the io.WriteTo interface for io.Copy and friends.
-func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
- total := int64(0)
- flushed := false
- for {
- if len(f.toRead) > 0 {
- n, err := w.Write(f.toRead)
- total += int64(n)
- if err != nil {
- f.err = err
- return total, err
- }
- if n != len(f.toRead) {
- return total, io.ErrShortWrite
- }
- f.toRead = f.toRead[:0]
- }
- if f.err != nil && flushed {
- if f.err == io.EOF {
- return total, nil
- }
- return total, f.err
- }
- if f.err == nil {
- f.doStep()
- }
- if len(f.toRead) == 0 && f.err != nil && !flushed {
- f.toRead = f.dict.readFlush() // Flush what's left in case of error
- flushed = true
- }
- }
-}
-
-func (f *decompressor) Close() error {
- if f.err == io.EOF {
- return nil
- }
- return f.err
-}
-
-// RFC 1951 section 3.2.7.
-// Compression with dynamic Huffman codes
-
-var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
-
-func (f *decompressor) readHuffman() error {
- // HLIT[5], HDIST[5], HCLEN[4].
- for f.nb < 5+5+4 {
- if err := f.moreBits(); err != nil {
- return err
- }
- }
- nlit := int(f.b&0x1F) + 257
- if nlit > maxNumLit {
- if debugDecode {
- fmt.Println("nlit > maxNumLit", nlit)
- }
- return CorruptInputError(f.roffset)
- }
- f.b >>= 5
- ndist := int(f.b&0x1F) + 1
- if ndist > maxNumDist {
- if debugDecode {
- fmt.Println("ndist > maxNumDist", ndist)
- }
- return CorruptInputError(f.roffset)
- }
- f.b >>= 5
- nclen := int(f.b&0xF) + 4
- // numCodes is 19, so nclen is always valid.
- f.b >>= 4
- f.nb -= 5 + 5 + 4
-
- // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
- for i := 0; i < nclen; i++ {
- for f.nb < 3 {
- if err := f.moreBits(); err != nil {
- return err
- }
- }
- f.codebits[codeOrder[i]] = int(f.b & 0x7)
- f.b >>= 3
- f.nb -= 3
- }
- for i := nclen; i < len(codeOrder); i++ {
- f.codebits[codeOrder[i]] = 0
- }
- if !f.h1.init(f.codebits[0:]) {
- if debugDecode {
- fmt.Println("init codebits failed")
- }
- return CorruptInputError(f.roffset)
- }
-
- // HLIT + 257 code lengths, HDIST + 1 code lengths,
- // using the code length Huffman code.
- for i, n := 0, nlit+ndist; i < n; {
- x, err := f.huffSym(&f.h1)
- if err != nil {
- return err
- }
- if x < 16 {
- // Actual length.
- f.bits[i] = x
- i++
- continue
- }
- // Repeat previous length or zero.
- var rep int
- var nb uint
- var b int
- switch x {
- default:
- return InternalError("unexpected length code")
- case 16:
- rep = 3
- nb = 2
- if i == 0 {
- if debugDecode {
- fmt.Println("i==0")
- }
- return CorruptInputError(f.roffset)
- }
- b = f.bits[i-1]
- case 17:
- rep = 3
- nb = 3
- b = 0
- case 18:
- rep = 11
- nb = 7
- b = 0
- }
- for f.nb < nb {
- if err := f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits:", err)
- }
- return err
- }
- }
- rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1))
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
- if i+rep > n {
- if debugDecode {
- fmt.Println("i+rep > n", i, rep, n)
- }
- return CorruptInputError(f.roffset)
- }
- for j := 0; j < rep; j++ {
- f.bits[i] = b
- i++
- }
- }
-
- if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
- if debugDecode {
- fmt.Println("init2 failed")
- }
- return CorruptInputError(f.roffset)
- }
-
- // As an optimization, we can initialize the maxRead bits to read at a time
- // for the HLIT tree to the length of the EOB marker since we know that
- // every block must terminate with one. This preserves the property that
- // we never read any extra bytes after the end of the DEFLATE stream.
- if f.h1.maxRead < f.bits[endBlockMarker] {
- f.h1.maxRead = f.bits[endBlockMarker]
- }
- if !f.final {
- // If not the final block, the smallest block possible is
- // a predefined table, BTYPE=01, with a single EOB marker.
- // This will take up 3 + 7 bits.
- f.h1.maxRead += 10
- }
-
- return nil
-}
-
-// Copy a single uncompressed data block from input to output.
-func (f *decompressor) dataBlock() {
- // Uncompressed.
- // Discard current half-byte.
- left := (f.nb) & 7
- f.nb -= left
- f.b >>= left
-
- offBytes := f.nb >> 3
- // Unfilled values will be overwritten.
- f.buf[0] = uint8(f.b)
- f.buf[1] = uint8(f.b >> 8)
- f.buf[2] = uint8(f.b >> 16)
- f.buf[3] = uint8(f.b >> 24)
-
- f.roffset += int64(offBytes)
- f.nb, f.b = 0, 0
-
- // Length then ones-complement of length.
- nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
- f.roffset += int64(nr)
- if err != nil {
- f.err = noEOF(err)
- return
- }
- n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
- nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
- if nn != ^n {
- if debugDecode {
- ncomp := ^n
- fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- if n == 0 {
- f.toRead = f.dict.readFlush()
- f.finishBlock()
- return
- }
-
- f.copyLen = int(n)
- f.copyData()
-}
-
-// copyData copies f.copyLen bytes from the underlying reader into f.hist.
-// It pauses for reads when f.hist is full.
-func (f *decompressor) copyData() {
- buf := f.dict.writeSlice()
- if len(buf) > f.copyLen {
- buf = buf[:f.copyLen]
- }
-
- cnt, err := io.ReadFull(f.r, buf)
- f.roffset += int64(cnt)
- f.copyLen -= cnt
- f.dict.writeMark(cnt)
- if err != nil {
- f.err = noEOF(err)
- return
- }
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = copyData
- return
- }
- f.finishBlock()
-}
-
-func (f *decompressor) finishBlock() {
- if f.final {
- if f.dict.availRead() > 0 {
- f.toRead = f.dict.readFlush()
- }
- f.err = io.EOF
- }
- f.step = nextBlock
-}
-
-func (f *decompressor) doStep() {
- switch f.step {
- case copyData:
- f.copyData()
- case nextBlock:
- f.nextBlock()
- case huffmanBytesBuffer:
- f.huffmanBytesBuffer()
- case huffmanBytesReader:
- f.huffmanBytesReader()
- case huffmanBufioReader:
- f.huffmanBufioReader()
- case huffmanStringsReader:
- f.huffmanStringsReader()
- case huffmanGenericReader:
- f.huffmanGenericReader()
- default:
- panic("BUG: unexpected step state")
- }
-}
-
-// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
-func noEOF(e error) error {
- if e == io.EOF {
- return io.ErrUnexpectedEOF
- }
- return e
-}
-
-func (f *decompressor) moreBits() error {
- c, err := f.r.ReadByte()
- if err != nil {
- return noEOF(err)
- }
- f.roffset++
- f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
- f.nb += 8
- return nil
-}
-
-// Read the next Huffman-encoded symbol from f according to h.
-func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(h.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := f.r.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- return 0, noEOF(err)
- }
- f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
- }
- chunk := h.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return 0, f.err
- }
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
- return int(chunk >> huffmanValueShift), nil
- }
- }
-}
-
-func makeReader(r io.Reader) Reader {
- if rr, ok := r.(Reader); ok {
- return rr
- }
- return bufio.NewReader(r)
-}
-
-func fixedHuffmanDecoderInit() {
- fixedOnce.Do(func() {
- // These come from the RFC section 3.2.6.
- var bits [288]int
- for i := 0; i < 144; i++ {
- bits[i] = 8
- }
- for i := 144; i < 256; i++ {
- bits[i] = 9
- }
- for i := 256; i < 280; i++ {
- bits[i] = 7
- }
- for i := 280; i < 288; i++ {
- bits[i] = 8
- }
- fixedHuffmanDecoder.init(bits[:])
- })
-}
-
-func (f *decompressor) Reset(r io.Reader, dict []byte) error {
- *f = decompressor{
- r: makeReader(r),
- bits: f.bits,
- codebits: f.codebits,
- h1: f.h1,
- h2: f.h2,
- dict: f.dict,
- step: nextBlock,
- }
- f.dict.init(maxMatchOffset, dict)
- return nil
-}
-
-// NewReader returns a new ReadCloser that can be used
-// to read the uncompressed version of r.
-// If r does not also implement io.ByteReader,
-// the decompressor may read more data than necessary from r.
-// It is the caller's responsibility to call Close on the ReadCloser
-// when finished reading.
-//
-// The ReadCloser returned by NewReader also implements Resetter.
-func NewReader(r io.Reader) io.ReadCloser {
- fixedHuffmanDecoderInit()
-
- var f decompressor
- f.r = makeReader(r)
- f.bits = new([maxNumLit + maxNumDist]int)
- f.codebits = new([numCodes]int)
- f.step = nextBlock
- f.dict.init(maxMatchOffset, nil)
- return &f
-}
-
-// NewReaderDict is like NewReader but initializes the reader
-// with a preset dictionary. The returned Reader behaves as if
-// the uncompressed data stream started with the given dictionary,
-// which has already been read. NewReaderDict is typically used
-// to read data compressed by NewWriterDict.
-//
-// The ReadCloser returned by NewReader also implements Resetter.
-func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
- fixedHuffmanDecoderInit()
-
- var f decompressor
- f.r = makeReader(r)
- f.bits = new([maxNumLit + maxNumDist]int)
- f.codebits = new([numCodes]int)
- f.step = nextBlock
- f.dict.init(maxMatchOffset, dict)
- return &f
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/inflate_gen.go
deleted file mode 100644
index 2b2f993..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ /dev/null
@@ -1,1283 +0,0 @@
-// Code generated by go generate gen_inflate.go. DO NOT EDIT.
-
-package flate
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "math/bits"
- "strings"
-)
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBytesBuffer() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*bytes.Buffer)
-
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- fnb, fb, dict := f.nb, f.b, &f.dict
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var length int
- switch {
- case v < 256:
- dict.writeByte(byte(v))
- if dict.availWrite() == 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanBytesBuffer
- f.stepState = stateInit
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- case v == 256:
- f.b, f.nb = fb, fnb
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- case v < maxNumLit:
- val := decCodeToLen[(v - 257)]
- length = int(val.length) + 3
- n := uint(val.extra)
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- length += int(fb & bitMask32[n])
- fb >>= n & regSizeMaskUint32
- fnb -= n
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- f.b, f.nb = fb, fnb
- return
- }
-
- var dist uint32
- if f.hd == nil {
- for fnb < 5 {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
- fb >>= 5
- fnb -= 5
- } else {
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hd.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- dist = uint32(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for fnb < nb {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- fnb -= nb
- dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
- // slower: dist = bitMask32[nb+1] + 2 + extra
- default:
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(dict.histSize()) {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist > dict.histSize():", dist, dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanBytesBuffer // We need to continue this work
- f.stepState = stateDict
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- }
- // Not reached
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBytesReader() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*bytes.Reader)
-
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- fnb, fb, dict := f.nb, f.b, &f.dict
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var length int
- switch {
- case v < 256:
- dict.writeByte(byte(v))
- if dict.availWrite() == 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanBytesReader
- f.stepState = stateInit
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- case v == 256:
- f.b, f.nb = fb, fnb
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- case v < maxNumLit:
- val := decCodeToLen[(v - 257)]
- length = int(val.length) + 3
- n := uint(val.extra)
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- length += int(fb & bitMask32[n])
- fb >>= n & regSizeMaskUint32
- fnb -= n
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- f.b, f.nb = fb, fnb
- return
- }
-
- var dist uint32
- if f.hd == nil {
- for fnb < 5 {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
- fb >>= 5
- fnb -= 5
- } else {
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hd.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- dist = uint32(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for fnb < nb {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- fnb -= nb
- dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
- // slower: dist = bitMask32[nb+1] + 2 + extra
- default:
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(dict.histSize()) {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist > dict.histSize():", dist, dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanBytesReader // We need to continue this work
- f.stepState = stateDict
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- }
- // Not reached
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBufioReader() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*bufio.Reader)
-
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- fnb, fb, dict := f.nb, f.b, &f.dict
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var length int
- switch {
- case v < 256:
- dict.writeByte(byte(v))
- if dict.availWrite() == 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanBufioReader
- f.stepState = stateInit
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- case v == 256:
- f.b, f.nb = fb, fnb
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- case v < maxNumLit:
- val := decCodeToLen[(v - 257)]
- length = int(val.length) + 3
- n := uint(val.extra)
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- length += int(fb & bitMask32[n])
- fb >>= n & regSizeMaskUint32
- fnb -= n
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- f.b, f.nb = fb, fnb
- return
- }
-
- var dist uint32
- if f.hd == nil {
- for fnb < 5 {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
- fb >>= 5
- fnb -= 5
- } else {
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hd.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- dist = uint32(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for fnb < nb {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- fnb -= nb
- dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
- // slower: dist = bitMask32[nb+1] + 2 + extra
- default:
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(dict.histSize()) {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist > dict.histSize():", dist, dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanBufioReader // We need to continue this work
- f.stepState = stateDict
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- }
- // Not reached
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanStringsReader() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(*strings.Reader)
-
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- fnb, fb, dict := f.nb, f.b, &f.dict
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var length int
- switch {
- case v < 256:
- dict.writeByte(byte(v))
- if dict.availWrite() == 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanStringsReader
- f.stepState = stateInit
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- case v == 256:
- f.b, f.nb = fb, fnb
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- case v < maxNumLit:
- val := decCodeToLen[(v - 257)]
- length = int(val.length) + 3
- n := uint(val.extra)
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- length += int(fb & bitMask32[n])
- fb >>= n & regSizeMaskUint32
- fnb -= n
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- f.b, f.nb = fb, fnb
- return
- }
-
- var dist uint32
- if f.hd == nil {
- for fnb < 5 {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
- fb >>= 5
- fnb -= 5
- } else {
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hd.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- dist = uint32(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for fnb < nb {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- fnb -= nb
- dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
- // slower: dist = bitMask32[nb+1] + 2 + extra
- default:
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(dict.histSize()) {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist > dict.histSize():", dist, dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanStringsReader // We need to continue this work
- f.stepState = stateDict
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- }
- // Not reached
-}
-
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanGenericReader() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
- fr := f.r.(Reader)
-
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- fnb, fb, dict := f.nb, f.b, &f.dict
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var length int
- switch {
- case v < 256:
- dict.writeByte(byte(v))
- if dict.availWrite() == 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanGenericReader
- f.stepState = stateInit
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- case v == 256:
- f.b, f.nb = fb, fnb
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- case v < maxNumLit:
- val := decCodeToLen[(v - 257)]
- length = int(val.length) + 3
- n := uint(val.extra)
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- length += int(fb & bitMask32[n])
- fb >>= n & regSizeMaskUint32
- fnb -= n
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- f.b, f.nb = fb, fnb
- return
- }
-
- var dist uint32
- if f.hd == nil {
- for fnb < 5 {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
- fb >>= 5
- fnb -= 5
- } else {
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hd.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- for {
- for fnb < n {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- fb |= uint32(c) << (fnb & regSizeMaskUint32)
- fnb += 8
- }
- chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= fnb {
- if n == 0 {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- fb = fb >> (n & regSizeMaskUint32)
- fnb = fnb - n
- dist = uint32(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for fnb < nb {
- c, err := fr.ReadByte()
- if err != nil {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("morebits f.nb>= nb & regSizeMaskUint32
- fnb -= nb
- dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
- // slower: dist = bitMask32[nb+1] + 2 + extra
- default:
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(dict.histSize()) {
- f.b, f.nb = fb, fnb
- if debugDecode {
- fmt.Println("dist > dict.histSize():", dist, dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = dict.readFlush()
- f.step = huffmanGenericReader // We need to continue this work
- f.stepState = stateDict
- f.b, f.nb = fb, fnb
- return
- }
- goto readLiteral
- }
- // Not reached
-}
-
-func (f *decompressor) huffmanBlockDecoder() {
- switch f.r.(type) {
- case *bytes.Buffer:
- f.huffmanBytesBuffer()
- case *bytes.Reader:
- f.huffmanBytesReader()
- case *bufio.Reader:
- f.huffmanBufioReader()
- case *strings.Reader:
- f.huffmanStringsReader()
- case Reader:
- f.huffmanGenericReader()
- default:
- f.huffmanGenericReader()
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level1.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level1.go
deleted file mode 100644
index 703b9a8..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level1.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package flate
-
-import (
- "encoding/binary"
- "fmt"
- "math/bits"
-)
-
-// fastGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type fastEncL1 struct {
- fastGen
- table [tableSize]tableEntry
-}
-
-// EncodeL1 uses a similar algorithm to level 1
-func (e *fastEncL1) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- hashBytes = 5
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
-
- for {
- const skipLog = 5
- const doEvery = 2
-
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hashLen(cv, tableBits, hashBytes)
- candidate = e.table[nextHash]
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
-
- now := load6432(src, nextS)
- e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hashLen(now, tableBits, hashBytes)
-
- offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
- e.table[nextHash] = tableEntry{offset: nextS + e.cur}
- break
- }
-
- // Do one right away...
- cv = now
- s = nextS
- nextS++
- candidate = e.table[nextHash]
- now >>= 8
- e.table[nextHash] = tableEntry{offset: s + e.cur}
-
- offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
- e.table[nextHash] = tableEntry{offset: nextS + e.cur}
- break
- }
- cv = now
- s = nextS
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- t := candidate.offset - e.cur
- var l = int32(4)
- if false {
- l = e.matchlenLong(s+4, t+4, src) + 4
- } else {
- // inlined:
- a := src[s+4:]
- b := src[t+4:]
- for len(a) >= 8 {
- if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
- l += int32(bits.TrailingZeros64(diff) >> 3)
- break
- }
- l += 8
- a = a[8:]
- b = b[8:]
- }
- if len(a) < 8 {
- b = b[:len(a)]
- for i := range a {
- if a[i] != b[i] {
- break
- }
- l++
- }
- }
- }
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
-
- // Save the match found
- if false {
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- } else {
- // Inlined...
- xoffset := uint32(s - t - baseMatchOffset)
- xlength := l
- oc := offsetCode(xoffset)
- xoffset |= oc << 16
- for xlength > 0 {
- xl := xlength
- if xl > 258 {
- if xl > 258+baseMatchLength {
- xl = 258
- } else {
- xl = 258 - baseMatchLength
- }
- }
- xlength -= xl
- xl -= baseMatchLength
- dst.extraHist[lengthCodes1[uint8(xl)]]++
- dst.offHist[oc]++
- dst.tokens[dst.n] = token(matchType | uint32(xl)<= s {
- s = nextS + 1
- }
- if s >= sLimit {
- // Index first pair after match end.
- if int(s+l+8) < len(src) {
- cv := load6432(src, s)
- e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
- }
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-2 and at s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6432(src, s-2)
- o := e.cur + s - 2
- prevHash := hashLen(x, tableBits, hashBytes)
- e.table[prevHash] = tableEntry{offset: o}
- x >>= 16
- currHash := hashLen(x, tableBits, hashBytes)
- candidate = e.table[currHash]
- e.table[currHash] = tableEntry{offset: o + 2}
-
- offset := s - (candidate.offset - e.cur)
- if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
- cv = x >> 8
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level2.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level2.go
deleted file mode 100644
index 876dfbe..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level2.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package flate
-
-import "fmt"
-
-// fastGen maintains the table for matches,
-// and the previous byte block for level 2.
-// This is the generic implementation.
-type fastEncL2 struct {
- fastGen
- table [bTableSize]tableEntry
-}
-
-// EncodeL2 uses a similar algorithm to level 1, but is capable
-// of matching across blocks giving better compression at a small slowdown.
-func (e *fastEncL2) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- hashBytes = 5
- )
-
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- for {
- // When should we start skipping if we haven't found matches in a long while.
- const skipLog = 5
- const doEvery = 2
-
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hashLen(cv, bTableBits, hashBytes)
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- candidate = e.table[nextHash]
- now := load6432(src, nextS)
- e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hashLen(now, bTableBits, hashBytes)
-
- offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
- e.table[nextHash] = tableEntry{offset: nextS + e.cur}
- break
- }
-
- // Do one right away...
- cv = now
- s = nextS
- nextS++
- candidate = e.table[nextHash]
- now >>= 8
- e.table[nextHash] = tableEntry{offset: s + e.cur}
-
- offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
- break
- }
- cv = now
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- // Index first pair after match end.
- if int(s+l+8) < len(src) {
- cv := load6432(src, s)
- e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
- }
- goto emitRemainder
- }
-
- // Store every second hash in-between, but offset by 1.
- for i := s - l + 2; i < s-5; i += 7 {
- x := load6432(src, i)
- nextHash := hashLen(x, bTableBits, hashBytes)
- e.table[nextHash] = tableEntry{offset: e.cur + i}
- // Skip one
- x >>= 16
- nextHash = hashLen(x, bTableBits, hashBytes)
- e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
- // Skip one
- x >>= 16
- nextHash = hashLen(x, bTableBits, hashBytes)
- e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-2 to s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6432(src, s-2)
- o := e.cur + s - 2
- prevHash := hashLen(x, bTableBits, hashBytes)
- prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
- e.table[prevHash] = tableEntry{offset: o}
- e.table[prevHash2] = tableEntry{offset: o + 1}
- currHash := hashLen(x>>16, bTableBits, hashBytes)
- candidate = e.table[currHash]
- e.table[currHash] = tableEntry{offset: o + 2}
-
- offset := s - (candidate.offset - e.cur)
- if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
- cv = x >> 24
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level3.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level3.go
deleted file mode 100644
index 7aa2b72..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level3.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package flate
-
-import "fmt"
-
-// fastEncL3
-type fastEncL3 struct {
- fastGen
- table [1 << 16]tableEntryPrev
-}
-
-// Encode uses a similar algorithm to level 2, will check up to two candidates.
-func (e *fastEncL3) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- tableBits = 16
- tableSize = 1 << tableBits
- hashBytes = 5
- )
-
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntryPrev{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i]
- if v.Cur.offset <= minOff {
- v.Cur.offset = 0
- } else {
- v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
- }
- if v.Prev.offset <= minOff {
- v.Prev.offset = 0
- } else {
- v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
- }
- e.table[i] = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // Skip if too small.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- for {
- const skipLog = 7
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hashLen(cv, tableBits, hashBytes)
- s = nextS
- nextS = s + 1 + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- candidates := e.table[nextHash]
- now := load6432(src, nextS)
-
- // Safe offset distance until s + 4...
- minOffset := e.cur + s - (maxMatchOffset - 4)
- e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
-
- // Check both candidates
- candidate = candidates.Cur
- if candidate.offset < minOffset {
- cv = now
- // Previous will also be invalid, we have nothing.
- continue
- }
-
- if uint32(cv) == load3232(src, candidate.offset-e.cur) {
- if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
- break
- }
- // Both match and are valid, pick longest.
- offset := s - (candidate.offset - e.cur)
- o2 := s - (candidates.Prev.offset - e.cur)
- l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
- if l2 > l1 {
- candidate = candidates.Prev
- }
- break
- } else {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
- candidate = candidates.Prev
- if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
- break
- }
- }
- cv = now
- }
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- //
- t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- t += l
- // Index first pair after match end.
- if int(t+8) < len(src) && t > 0 {
- cv = load6432(src, t)
- nextHash := hashLen(cv, tableBits, hashBytes)
- e.table[nextHash] = tableEntryPrev{
- Prev: e.table[nextHash].Cur,
- Cur: tableEntry{offset: e.cur + t},
- }
- }
- goto emitRemainder
- }
-
- // Store every 5th hash in-between.
- for i := s - l + 2; i < s-5; i += 6 {
- nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
- e.table[nextHash] = tableEntryPrev{
- Prev: e.table[nextHash].Cur,
- Cur: tableEntry{offset: e.cur + i}}
- }
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-2 to s.
- x := load6432(src, s-2)
- prevHash := hashLen(x, tableBits, hashBytes)
-
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 2},
- }
- x >>= 8
- prevHash = hashLen(x, tableBits, hashBytes)
-
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 1},
- }
- x >>= 8
- currHash := hashLen(x, tableBits, hashBytes)
- candidates := e.table[currHash]
- cv = x
- e.table[currHash] = tableEntryPrev{
- Prev: candidates.Cur,
- Cur: tableEntry{offset: s + e.cur},
- }
-
- // Check both candidates
- candidate = candidates.Cur
- minOffset := e.cur + s - (maxMatchOffset - 4)
-
- if candidate.offset > minOffset {
- if uint32(cv) == load3232(src, candidate.offset-e.cur) {
- // Found a match...
- continue
- }
- candidate = candidates.Prev
- if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
- // Match at prev...
- continue
- }
- }
- cv = x >> 8
- s++
- break
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level4.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level4.go
deleted file mode 100644
index 23c08b3..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level4.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package flate
-
-import "fmt"
-
-type fastEncL4 struct {
- fastGen
- table [tableSize]tableEntry
- bTable [tableSize]tableEntry
-}
-
-func (e *fastEncL4) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- hashShortBytes = 4
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.bTable[:] {
- e.bTable[i] = tableEntry{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- for i := range e.bTable[:] {
- v := e.bTable[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.bTable[i].offset = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- for {
- const skipLog = 6
- const doEvery = 1
-
- nextS := s
- var t int32
- for {
- nextHashS := hashLen(cv, tableBits, hashShortBytes)
- nextHashL := hash7(cv, tableBits)
-
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- // Fetch a short+long candidate
- sCandidate := e.table[nextHashS]
- lCandidate := e.bTable[nextHashL]
- next := load6432(src, nextS)
- entry := tableEntry{offset: s + e.cur}
- e.table[nextHashS] = entry
- e.bTable[nextHashL] = entry
-
- t = lCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
- // We got a long match. Use that.
- break
- }
-
- t = sCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
- // Found a 4 match...
- lCandidate = e.bTable[hash7(next, tableBits)]
-
- // If the next long is a candidate, check if we should use that instead...
- lOff := nextS - (lCandidate.offset - e.cur)
- if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
- l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
- if l2 > l1 {
- s = nextS
- t = lCandidate.offset - e.cur
- }
- }
- break
- }
- cv = next
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- // Extend the 4-byte match as long as possible.
- l := e.matchlenLong(s+4, t+4, src) + 4
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
- if debugDeflate {
- if t >= s {
- panic("s-t")
- }
- if (s - t) > maxMatchOffset {
- panic(fmt.Sprintln("mmo", t))
- }
- if l < baseMatchLength {
- panic("bml")
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- // Index first pair after match end.
- if int(s+8) < len(src) {
- cv := load6432(src, s)
- e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
- e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
- }
- goto emitRemainder
- }
-
- // Store every 3rd hash in-between
- if true {
- i := nextS
- if i < s-1 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- e.bTable[hash7(cv, tableBits)] = t
- e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
-
- i += 3
- for ; i < s-1; i += 3 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- e.bTable[hash7(cv, tableBits)] = t
- e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
- }
- }
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s.
- x := load6432(src, s-1)
- o := e.cur + s - 1
- prevHashS := hashLen(x, tableBits, hashShortBytes)
- prevHashL := hash7(x, tableBits)
- e.table[prevHashS] = tableEntry{offset: o}
- e.bTable[prevHashL] = tableEntry{offset: o}
- cv = x >> 8
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level5.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level5.go
deleted file mode 100644
index 1f61ec1..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level5.go
+++ /dev/null
@@ -1,708 +0,0 @@
-package flate
-
-import "fmt"
-
-type fastEncL5 struct {
- fastGen
- table [tableSize]tableEntry
- bTable [tableSize]tableEntryPrev
-}
-
-func (e *fastEncL5) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- hashShortBytes = 4
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.bTable[:] {
- e.bTable[i] = tableEntryPrev{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- for i := range e.bTable[:] {
- v := e.bTable[i]
- if v.Cur.offset <= minOff {
- v.Cur.offset = 0
- v.Prev.offset = 0
- } else {
- v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
- if v.Prev.offset <= minOff {
- v.Prev.offset = 0
- } else {
- v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
- }
- }
- e.bTable[i] = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- for {
- const skipLog = 6
- const doEvery = 1
-
- nextS := s
- var l int32
- var t int32
- for {
- nextHashS := hashLen(cv, tableBits, hashShortBytes)
- nextHashL := hash7(cv, tableBits)
-
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- // Fetch a short+long candidate
- sCandidate := e.table[nextHashS]
- lCandidate := e.bTable[nextHashL]
- next := load6432(src, nextS)
- entry := tableEntry{offset: s + e.cur}
- e.table[nextHashS] = entry
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = entry, eLong.Cur
-
- nextHashS = hashLen(next, tableBits, hashShortBytes)
- nextHashL = hash7(next, tableBits)
-
- t = lCandidate.Cur.offset - e.cur
- if s-t < maxMatchOffset {
- if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- t2 := lCandidate.Prev.offset - e.cur
- if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- l = e.matchlen(s+4, t+4, src) + 4
- ml1 := e.matchlen(s+4, t2+4, src) + 4
- if ml1 > l {
- t = t2
- l = ml1
- break
- }
- }
- break
- }
- t = lCandidate.Prev.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
- break
- }
- }
-
- t = sCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
- // Found a 4 match...
- l = e.matchlen(s+4, t+4, src) + 4
- lCandidate = e.bTable[nextHashL]
- // Store the next match
-
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- // If the next long is a candidate, use that...
- t2 := lCandidate.Cur.offset - e.cur
- if nextS-t2 < maxMatchOffset {
- if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- // If the previous long is a candidate, use that...
- t2 = lCandidate.Prev.offset - e.cur
- if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- }
- break
- }
- cv = next
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- if l == 0 {
- // Extend the 4-byte match as long as possible.
- l = e.matchlenLong(s+4, t+4, src) + 4
- } else if l == maxMatchLength {
- l += e.matchlenLong(s+l, t+l, src)
- }
-
- // Try to locate a better match by checking the end of best match...
- if sAt := s + l; l < 30 && sAt < sLimit {
- // Allow some bytes at the beginning to mismatch.
- // Sweet spot is 2/3 bytes depending on input.
- // 3 is only a little better when it is but sometimes a lot worse.
- // The skipped bytes are tested in Extend backwards,
- // and still picked up as part of the match if they do.
- const skipBeginning = 2
- eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
- t2 := eLong - e.cur - l + skipBeginning
- s2 := s + skipBeginning
- off := s2 - t2
- if t2 >= 0 && off < maxMatchOffset && off > 0 {
- if l2 := e.matchlenLong(s2, t2, src); l2 > l {
- t = t2
- l = l2
- s = s2
- }
- }
- }
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
- if debugDeflate {
- if t >= s {
- panic(fmt.Sprintln("s-t", s, t))
- }
- if (s - t) > maxMatchOffset {
- panic(fmt.Sprintln("mmo", s-t))
- }
- if l < baseMatchLength {
- panic("bml")
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- goto emitRemainder
- }
-
- // Store every 3rd hash in-between.
- if true {
- const hashEvery = 3
- i := s - l + 1
- if i < s-1 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- e.table[hashLen(cv, tableBits, hashShortBytes)] = t
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
-
- // Do an long at i+1
- cv >>= 8
- t = tableEntry{offset: t.offset + 1}
- eLong = &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
-
- // We only have enough bits for a short entry at i+2
- cv >>= 8
- t = tableEntry{offset: t.offset + 1}
- e.table[hashLen(cv, tableBits, hashShortBytes)] = t
-
- // Skip one - otherwise we risk hitting 's'
- i += 4
- for ; i < s-1; i += hashEvery {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
- e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
- }
- }
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s.
- x := load6432(src, s-1)
- o := e.cur + s - 1
- prevHashS := hashLen(x, tableBits, hashShortBytes)
- prevHashL := hash7(x, tableBits)
- e.table[prevHashS] = tableEntry{offset: o}
- eLong := &e.bTable[prevHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
- cv = x >> 8
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
-
-// fastEncL5Window is a level 5 encoder,
-// but with a custom window size.
-type fastEncL5Window struct {
- hist []byte
- cur int32
- maxOffset int32
- table [tableSize]tableEntry
- bTable [tableSize]tableEntryPrev
-}
-
-func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- hashShortBytes = 4
- )
- maxMatchOffset := e.maxOffset
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.bTable[:] {
- e.bTable[i] = tableEntryPrev{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- for i := range e.bTable[:] {
- v := e.bTable[i]
- if v.Cur.offset <= minOff {
- v.Cur.offset = 0
- v.Prev.offset = 0
- } else {
- v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
- if v.Prev.offset <= minOff {
- v.Prev.offset = 0
- } else {
- v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
- }
- }
- e.bTable[i] = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- for {
- const skipLog = 6
- const doEvery = 1
-
- nextS := s
- var l int32
- var t int32
- for {
- nextHashS := hashLen(cv, tableBits, hashShortBytes)
- nextHashL := hash7(cv, tableBits)
-
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- // Fetch a short+long candidate
- sCandidate := e.table[nextHashS]
- lCandidate := e.bTable[nextHashL]
- next := load6432(src, nextS)
- entry := tableEntry{offset: s + e.cur}
- e.table[nextHashS] = entry
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = entry, eLong.Cur
-
- nextHashS = hashLen(next, tableBits, hashShortBytes)
- nextHashL = hash7(next, tableBits)
-
- t = lCandidate.Cur.offset - e.cur
- if s-t < maxMatchOffset {
- if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- t2 := lCandidate.Prev.offset - e.cur
- if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- l = e.matchlen(s+4, t+4, src) + 4
- ml1 := e.matchlen(s+4, t2+4, src) + 4
- if ml1 > l {
- t = t2
- l = ml1
- break
- }
- }
- break
- }
- t = lCandidate.Prev.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
- break
- }
- }
-
- t = sCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
- // Found a 4 match...
- l = e.matchlen(s+4, t+4, src) + 4
- lCandidate = e.bTable[nextHashL]
- // Store the next match
-
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- // If the next long is a candidate, use that...
- t2 := lCandidate.Cur.offset - e.cur
- if nextS-t2 < maxMatchOffset {
- if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- // If the previous long is a candidate, use that...
- t2 = lCandidate.Prev.offset - e.cur
- if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- }
- break
- }
- cv = next
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- if l == 0 {
- // Extend the 4-byte match as long as possible.
- l = e.matchlenLong(s+4, t+4, src) + 4
- } else if l == maxMatchLength {
- l += e.matchlenLong(s+l, t+l, src)
- }
-
- // Try to locate a better match by checking the end of best match...
- if sAt := s + l; l < 30 && sAt < sLimit {
- // Allow some bytes at the beginning to mismatch.
- // Sweet spot is 2/3 bytes depending on input.
- // 3 is only a little better when it is but sometimes a lot worse.
- // The skipped bytes are tested in Extend backwards,
- // and still picked up as part of the match if they do.
- const skipBeginning = 2
- eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
- t2 := eLong - e.cur - l + skipBeginning
- s2 := s + skipBeginning
- off := s2 - t2
- if t2 >= 0 && off < maxMatchOffset && off > 0 {
- if l2 := e.matchlenLong(s2, t2, src); l2 > l {
- t = t2
- l = l2
- s = s2
- }
- }
- }
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
- if debugDeflate {
- if t >= s {
- panic(fmt.Sprintln("s-t", s, t))
- }
- if (s - t) > maxMatchOffset {
- panic(fmt.Sprintln("mmo", s-t))
- }
- if l < baseMatchLength {
- panic("bml")
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- goto emitRemainder
- }
-
- // Store every 3rd hash in-between.
- if true {
- const hashEvery = 3
- i := s - l + 1
- if i < s-1 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- e.table[hashLen(cv, tableBits, hashShortBytes)] = t
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
-
- // Do an long at i+1
- cv >>= 8
- t = tableEntry{offset: t.offset + 1}
- eLong = &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
-
- // We only have enough bits for a short entry at i+2
- cv >>= 8
- t = tableEntry{offset: t.offset + 1}
- e.table[hashLen(cv, tableBits, hashShortBytes)] = t
-
- // Skip one - otherwise we risk hitting 's'
- i += 4
- for ; i < s-1; i += hashEvery {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = t, eLong.Cur
- e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
- }
- }
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s.
- x := load6432(src, s-1)
- o := e.cur + s - 1
- prevHashS := hashLen(x, tableBits, hashShortBytes)
- prevHashL := hash7(x, tableBits)
- e.table[prevHashS] = tableEntry{offset: o}
- eLong := &e.bTable[prevHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
- cv = x >> 8
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
-
-// Reset the encoding table.
-func (e *fastEncL5Window) Reset() {
- // We keep the same allocs, since we are compressing the same block sizes.
- if cap(e.hist) < allocHistory {
- e.hist = make([]byte, 0, allocHistory)
- }
-
- // We offset current position so everything will be out of reach.
- // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
- if e.cur <= int32(bufferReset) {
- e.cur += e.maxOffset + int32(len(e.hist))
- }
- e.hist = e.hist[:0]
-}
-
-func (e *fastEncL5Window) addBlock(src []byte) int32 {
- // check if we have space already
- maxMatchOffset := e.maxOffset
-
- if len(e.hist)+len(src) > cap(e.hist) {
- if cap(e.hist) == 0 {
- e.hist = make([]byte, 0, allocHistory)
- } else {
- if cap(e.hist) < int(maxMatchOffset*2) {
- panic("unexpected buffer size")
- }
- // Move down
- offset := int32(len(e.hist)) - maxMatchOffset
- copy(e.hist[0:maxMatchOffset], e.hist[offset:])
- e.cur += offset
- e.hist = e.hist[:maxMatchOffset]
- }
- }
- s := int32(len(e.hist))
- e.hist = append(e.hist, src...)
- return s
-}
-
-// matchlen will return the match length between offsets and t in src.
-// The maximum length returned is maxMatchLength - 4.
-// It is assumed that s > t, that t >=0 and s < len(src).
-func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
- if debugDecode {
- if t >= s {
- panic(fmt.Sprint("t >=s:", t, s))
- }
- if int(s) >= len(src) {
- panic(fmt.Sprint("s >= len(src):", s, len(src)))
- }
- if t < 0 {
- panic(fmt.Sprint("t < 0:", t))
- }
- if s-t > e.maxOffset {
- panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
- }
- }
- s1 := int(s) + maxMatchLength - 4
- if s1 > len(src) {
- s1 = len(src)
- }
-
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:s1], src[t:]))
-}
-
-// matchlenLong will return the match length between offsets and t in src.
-// It is assumed that s > t, that t >=0 and s < len(src).
-func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
- if debugDeflate {
- if t >= s {
- panic(fmt.Sprint("t >=s:", t, s))
- }
- if int(s) >= len(src) {
- panic(fmt.Sprint("s >= len(src):", s, len(src)))
- }
- if t < 0 {
- panic(fmt.Sprint("t < 0:", t))
- }
- if s-t > e.maxOffset {
- panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
- }
- }
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level6.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level6.go
deleted file mode 100644
index f1e9d98..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/level6.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package flate
-
-import "fmt"
-
-type fastEncL6 struct {
- fastGen
- table [tableSize]tableEntry
- bTable [tableSize]tableEntryPrev
-}
-
-func (e *fastEncL6) Encode(dst *tokens, src []byte) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- hashShortBytes = 4
- )
- if debugDeflate && e.cur < 0 {
- panic(fmt.Sprint("e.cur < 0: ", e.cur))
- }
-
- // Protect against e.cur wraparound.
- for e.cur >= bufferReset {
- if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.bTable[:] {
- e.bTable[i] = tableEntryPrev{}
- }
- e.cur = maxMatchOffset
- break
- }
- // Shift down everything in the table that isn't already too far away.
- minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
- for i := range e.table[:] {
- v := e.table[i].offset
- if v <= minOff {
- v = 0
- } else {
- v = v - e.cur + maxMatchOffset
- }
- e.table[i].offset = v
- }
- for i := range e.bTable[:] {
- v := e.bTable[i]
- if v.Cur.offset <= minOff {
- v.Cur.offset = 0
- v.Prev.offset = 0
- } else {
- v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
- if v.Prev.offset <= minOff {
- v.Prev.offset = 0
- } else {
- v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
- }
- }
- e.bTable[i] = v
- }
- e.cur = maxMatchOffset
- }
-
- s := e.addBlock(src)
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = uint16(len(src))
- return
- }
-
- // Override src
- src = e.hist
- nextEmit := s
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int32(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load6432(src, s)
- // Repeat MUST be > 1 and within range
- repeat := int32(1)
- for {
- const skipLog = 7
- const doEvery = 1
-
- nextS := s
- var l int32
- var t int32
- for {
- nextHashS := hashLen(cv, tableBits, hashShortBytes)
- nextHashL := hash7(cv, tableBits)
- s = nextS
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit {
- goto emitRemainder
- }
- // Fetch a short+long candidate
- sCandidate := e.table[nextHashS]
- lCandidate := e.bTable[nextHashL]
- next := load6432(src, nextS)
- entry := tableEntry{offset: s + e.cur}
- e.table[nextHashS] = entry
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = entry, eLong.Cur
-
- // Calculate hashes of 'next'
- nextHashS = hashLen(next, tableBits, hashShortBytes)
- nextHashL = hash7(next, tableBits)
-
- t = lCandidate.Cur.offset - e.cur
- if s-t < maxMatchOffset {
- if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
- // Long candidate matches at least 4 bytes.
-
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- // Check the previous long candidate as well.
- t2 := lCandidate.Prev.offset - e.cur
- if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- l = e.matchlen(s+4, t+4, src) + 4
- ml1 := e.matchlen(s+4, t2+4, src) + 4
- if ml1 > l {
- t = t2
- l = ml1
- break
- }
- }
- break
- }
- // Current value did not match, but check if previous long value does.
- t = lCandidate.Prev.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
- break
- }
- }
-
- t = sCandidate.offset - e.cur
- if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
- // Found a 4 match...
- l = e.matchlen(s+4, t+4, src) + 4
-
- // Look up next long candidate (at nextS)
- lCandidate = e.bTable[nextHashL]
-
- // Store the next match
- e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
- eLong := &e.bTable[nextHashL]
- eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
-
- // Check repeat at s + repOff
- const repOff = 1
- t2 := s - repeat + repOff
- if load3232(src, t2) == uint32(cv>>(8*repOff)) {
- ml := e.matchlen(s+4+repOff, t2+4, src) + 4
- if ml > l {
- t = t2
- l = ml
- s += repOff
- // Not worth checking more.
- break
- }
- }
-
- // If the next long is a candidate, use that...
- t2 = lCandidate.Cur.offset - e.cur
- if nextS-t2 < maxMatchOffset {
- if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- // This is ok, but check previous as well.
- }
- }
- // If the previous long is a candidate, use that...
- t2 = lCandidate.Prev.offset - e.cur
- if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
- ml := e.matchlen(nextS+4, t2+4, src) + 4
- if ml > l {
- t = t2
- s = nextS
- l = ml
- break
- }
- }
- }
- break
- }
- cv = next
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
-
- // Extend the 4-byte match as long as possible.
- if l == 0 {
- l = e.matchlenLong(s+4, t+4, src) + 4
- } else if l == maxMatchLength {
- l += e.matchlenLong(s+l, t+l, src)
- }
-
- // Try to locate a better match by checking the end-of-match...
- if sAt := s + l; sAt < sLimit {
- // Allow some bytes at the beginning to mismatch.
- // Sweet spot is 2/3 bytes depending on input.
- // 3 is only a little better when it is but sometimes a lot worse.
- // The skipped bytes are tested in Extend backwards,
- // and still picked up as part of the match if they do.
- const skipBeginning = 2
- eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
- // Test current
- t2 := eLong.Cur.offset - e.cur - l + skipBeginning
- s2 := s + skipBeginning
- off := s2 - t2
- if off < maxMatchOffset {
- if off > 0 && t2 >= 0 {
- if l2 := e.matchlenLong(s2, t2, src); l2 > l {
- t = t2
- l = l2
- s = s2
- }
- }
- // Test next:
- t2 = eLong.Prev.offset - e.cur - l + skipBeginning
- off := s2 - t2
- if off > 0 && off < maxMatchOffset && t2 >= 0 {
- if l2 := e.matchlenLong(s2, t2, src); l2 > l {
- t = t2
- l = l2
- s = s2
- }
- }
- }
- }
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
- if false {
- if t >= s {
- panic(fmt.Sprintln("s-t", s, t))
- }
- if (s - t) > maxMatchOffset {
- panic(fmt.Sprintln("mmo", s-t))
- }
- if l < baseMatchLength {
- panic("bml")
- }
- }
-
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
- repeat = s - t
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
-
- if s >= sLimit {
- // Index after match end.
- for i := nextS + 1; i < int32(len(src))-8; i += 2 {
- cv := load6432(src, i)
- e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
- }
- goto emitRemainder
- }
-
- // Store every long hash in-between and every second short.
- if true {
- for i := nextS + 1; i < s-1; i += 2 {
- cv := load6432(src, i)
- t := tableEntry{offset: i + e.cur}
- t2 := tableEntry{offset: t.offset + 1}
- eLong := &e.bTable[hash7(cv, tableBits)]
- eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
- e.table[hashLen(cv, tableBits, hashShortBytes)] = t
- eLong.Cur, eLong.Prev = t, eLong.Cur
- eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
- }
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s.
- cv = load6432(src, s)
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
-
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
deleted file mode 100644
index 4bd3885..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
-
-// Copyright 2019+ Klaus Post. All rights reserved.
-// License information can be found in the LICENSE file.
-
-package flate
-
-// matchLen returns how many bytes match in a and b
-//
-// It assumes that:
-//
-// len(a) <= len(b) and len(a) > 0
-//
-//go:noescape
-func matchLen(a []byte, b []byte) int
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
deleted file mode 100644
index 9a7655c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copied from S2 implementation.
-
-//go:build !appengine && !noasm && gc && !noasm
-
-#include "textflag.h"
-
-// func matchLen(a []byte, b []byte) int
-// Requires: BMI
-TEXT ·matchLen(SB), NOSPLIT, $0-56
- MOVQ a_base+0(FP), AX
- MOVQ b_base+24(FP), CX
- MOVQ a_len+8(FP), DX
-
- // matchLen
- XORL SI, SI
- CMPL DX, $0x08
- JB matchlen_match4_standalone
-
-matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
-
-#ifdef GOAMD64_v3
- TZCNTQ BX, BX
-#else
- BSFQ BX, BX
-#endif
- SARQ $0x03, BX
- LEAL (SI)(BX*1), SI
- JMP gen_match_len_end
-
-matchlen_loop_standalone:
- LEAL -8(DX), DX
- LEAL 8(SI), SI
- CMPL DX, $0x08
- JAE matchlen_loopback_standalone
-
-matchlen_match4_standalone:
- CMPL DX, $0x04
- JB matchlen_match2_standalone
- MOVL (AX)(SI*1), BX
- CMPL (CX)(SI*1), BX
- JNE matchlen_match2_standalone
- LEAL -4(DX), DX
- LEAL 4(SI), SI
-
-matchlen_match2_standalone:
- CMPL DX, $0x02
- JB matchlen_match1_standalone
- MOVW (AX)(SI*1), BX
- CMPW (CX)(SI*1), BX
- JNE matchlen_match1_standalone
- LEAL -2(DX), DX
- LEAL 2(SI), SI
-
-matchlen_match1_standalone:
- CMPL DX, $0x01
- JB gen_match_len_end
- MOVB (AX)(SI*1), BL
- CMPB (CX)(SI*1), BL
- JNE gen_match_len_end
- INCL SI
-
-gen_match_len_end:
- MOVQ SI, ret+48(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
deleted file mode 100644
index ad5cd81..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
+++ /dev/null
@@ -1,33 +0,0 @@
-//go:build !amd64 || appengine || !gc || noasm
-// +build !amd64 appengine !gc noasm
-
-// Copyright 2019+ Klaus Post. All rights reserved.
-// License information can be found in the LICENSE file.
-
-package flate
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// matchLen returns the maximum common prefix length of a and b.
-// a must be the shortest of the two.
-func matchLen(a, b []byte) (n int) {
- for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
- diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
- if diff != 0 {
- return n + bits.TrailingZeros64(diff)>>3
- }
- n += 8
- }
-
- for i := range a {
- if a[i] != b[i] {
- break
- }
- n++
- }
- return n
-
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
deleted file mode 100644
index 6ed2806..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package flate
-
-const (
- // Masks for shifts with register sizes of the shift value.
- // This can be used to work around the x86 design of shifting by mod register size.
- // It can be used when a variable shift is always smaller than the register size.
-
- // reg8SizeMaskX - shift value is 8 bits, shifted is X
- reg8SizeMask8 = 7
- reg8SizeMask16 = 15
- reg8SizeMask32 = 31
- reg8SizeMask64 = 63
-
- // reg16SizeMaskX - shift value is 16 bits, shifted is X
- reg16SizeMask8 = reg8SizeMask8
- reg16SizeMask16 = reg8SizeMask16
- reg16SizeMask32 = reg8SizeMask32
- reg16SizeMask64 = reg8SizeMask64
-
- // reg32SizeMaskX - shift value is 32 bits, shifted is X
- reg32SizeMask8 = reg8SizeMask8
- reg32SizeMask16 = reg8SizeMask16
- reg32SizeMask32 = reg8SizeMask32
- reg32SizeMask64 = reg8SizeMask64
-
- // reg64SizeMaskX - shift value is 64 bits, shifted is X
- reg64SizeMask8 = reg8SizeMask8
- reg64SizeMask16 = reg8SizeMask16
- reg64SizeMask32 = reg8SizeMask32
- reg64SizeMask64 = reg8SizeMask64
-
- // regSizeMaskUintX - shift value is uint, shifted is X
- regSizeMaskUint8 = reg8SizeMask8
- regSizeMaskUint16 = reg8SizeMask16
- regSizeMaskUint32 = reg8SizeMask32
- regSizeMaskUint64 = reg8SizeMask64
-)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/regmask_other.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/regmask_other.go
deleted file mode 100644
index 1b7a2cb..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/regmask_other.go
+++ /dev/null
@@ -1,40 +0,0 @@
-//go:build !amd64
-// +build !amd64
-
-package flate
-
-const (
- // Masks for shifts with register sizes of the shift value.
- // This can be used to work around the x86 design of shifting by mod register size.
- // It can be used when a variable shift is always smaller than the register size.
-
- // reg8SizeMaskX - shift value is 8 bits, shifted is X
- reg8SizeMask8 = 0xff
- reg8SizeMask16 = 0xff
- reg8SizeMask32 = 0xff
- reg8SizeMask64 = 0xff
-
- // reg16SizeMaskX - shift value is 16 bits, shifted is X
- reg16SizeMask8 = 0xffff
- reg16SizeMask16 = 0xffff
- reg16SizeMask32 = 0xffff
- reg16SizeMask64 = 0xffff
-
- // reg32SizeMaskX - shift value is 32 bits, shifted is X
- reg32SizeMask8 = 0xffffffff
- reg32SizeMask16 = 0xffffffff
- reg32SizeMask32 = 0xffffffff
- reg32SizeMask64 = 0xffffffff
-
- // reg64SizeMaskX - shift value is 64 bits, shifted is X
- reg64SizeMask8 = 0xffffffffffffffff
- reg64SizeMask16 = 0xffffffffffffffff
- reg64SizeMask32 = 0xffffffffffffffff
- reg64SizeMask64 = 0xffffffffffffffff
-
- // regSizeMaskUintX - shift value is uint, shifted is X
- regSizeMaskUint8 = ^uint(0)
- regSizeMaskUint16 = ^uint(0)
- regSizeMaskUint32 = ^uint(0)
- regSizeMaskUint64 = ^uint(0)
-)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/stateless.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/stateless.go
deleted file mode 100644
index f3d4139..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/stateless.go
+++ /dev/null
@@ -1,318 +0,0 @@
-package flate
-
-import (
- "io"
- "math"
- "sync"
-)
-
-const (
- maxStatelessBlock = math.MaxInt16
- // dictionary will be taken from maxStatelessBlock, so limit it.
- maxStatelessDict = 8 << 10
-
- slTableBits = 13
- slTableSize = 1 << slTableBits
- slTableShift = 32 - slTableBits
-)
-
-type statelessWriter struct {
- dst io.Writer
- closed bool
-}
-
-func (s *statelessWriter) Close() error {
- if s.closed {
- return nil
- }
- s.closed = true
- // Emit EOF block
- return StatelessDeflate(s.dst, nil, true, nil)
-}
-
-func (s *statelessWriter) Write(p []byte) (n int, err error) {
- err = StatelessDeflate(s.dst, p, false, nil)
- if err != nil {
- return 0, err
- }
- return len(p), nil
-}
-
-func (s *statelessWriter) Reset(w io.Writer) {
- s.dst = w
- s.closed = false
-}
-
-// NewStatelessWriter will do compression but without maintaining any state
-// between Write calls.
-// There will be no memory kept between Write calls,
-// but compression and speed will be suboptimal.
-// Because of this, the size of actual Write calls will affect output size.
-func NewStatelessWriter(dst io.Writer) io.WriteCloser {
- return &statelessWriter{dst: dst}
-}
-
-// bitWriterPool contains bit writers that can be reused.
-var bitWriterPool = sync.Pool{
- New: func() interface{} {
- return newHuffmanBitWriter(nil)
- },
-}
-
-// StatelessDeflate allows compressing directly to a Writer without retaining state.
-// When returning everything will be flushed.
-// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
-// Longer dictionaries will be truncated and will still produce valid output.
-// Sending nil dictionary is perfectly fine.
-func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
- var dst tokens
- bw := bitWriterPool.Get().(*huffmanBitWriter)
- bw.reset(out)
- defer func() {
- // don't keep a reference to our output
- bw.reset(nil)
- bitWriterPool.Put(bw)
- }()
- if eof && len(in) == 0 {
- // Just write an EOF block.
- // Could be faster...
- bw.writeStoredHeader(0, true)
- bw.flush()
- return bw.err
- }
-
- // Truncate dict
- if len(dict) > maxStatelessDict {
- dict = dict[len(dict)-maxStatelessDict:]
- }
-
- // For subsequent loops, keep shallow dict reference to avoid alloc+copy.
- var inDict []byte
-
- for len(in) > 0 {
- todo := in
- if len(inDict) > 0 {
- if len(todo) > maxStatelessBlock-maxStatelessDict {
- todo = todo[:maxStatelessBlock-maxStatelessDict]
- }
- } else if len(todo) > maxStatelessBlock-len(dict) {
- todo = todo[:maxStatelessBlock-len(dict)]
- }
- inOrg := in
- in = in[len(todo):]
- uncompressed := todo
- if len(dict) > 0 {
- // combine dict and source
- bufLen := len(todo) + len(dict)
- combined := make([]byte, bufLen)
- copy(combined, dict)
- copy(combined[len(dict):], todo)
- todo = combined
- }
- // Compress
- if len(inDict) == 0 {
- statelessEnc(&dst, todo, int16(len(dict)))
- } else {
- statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
- }
- isEof := eof && len(in) == 0
-
- if dst.n == 0 {
- bw.writeStoredHeader(len(uncompressed), isEof)
- if bw.err != nil {
- return bw.err
- }
- bw.writeBytes(uncompressed)
- } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
- // If we removed less than 1/16th, huffman compress the block.
- bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
- } else {
- bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
- }
- if len(in) > 0 {
- // Retain a dict if we have more
- inDict = inOrg[len(uncompressed)-maxStatelessDict:]
- dict = nil
- dst.Reset()
- }
- if bw.err != nil {
- return bw.err
- }
- }
- if !eof {
- // Align, only a stored block can do that.
- bw.writeStoredHeader(0, false)
- }
- bw.flush()
- return bw.err
-}
-
-func hashSL(u uint32) uint32 {
- return (u * 0x1e35a7bd) >> slTableShift
-}
-
-func load3216(b []byte, i int16) uint32 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:4]
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load6416(b []byte, i int16) uint64 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:8]
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func statelessEnc(dst *tokens, src []byte, startAt int16) {
- const (
- inputMargin = 12 - 1
- minNonLiteralBlockSize = 1 + 1 + inputMargin
- )
-
- type tableEntry struct {
- offset int16
- }
-
- var table [slTableSize]tableEntry
-
- // This check isn't in the Snappy implementation, but there, the caller
- // instead of the callee handles this case.
- if len(src)-int(startAt) < minNonLiteralBlockSize {
- // We do not fill the token table.
- // This will be picked up by caller.
- dst.n = 0
- return
- }
- // Index until startAt
- if startAt > 0 {
- cv := load3232(src, 0)
- for i := int16(0); i < startAt; i++ {
- table[hashSL(cv)] = tableEntry{offset: i}
- cv = (cv >> 8) | (uint32(src[i+4]) << 24)
- }
- }
-
- s := startAt + 1
- nextEmit := startAt
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := int16(len(src) - inputMargin)
-
- // nextEmit is where in src the next emitLiteral should start from.
- cv := load3216(src, s)
-
- for {
- const skipLog = 5
- const doEvery = 2
-
- nextS := s
- var candidate tableEntry
- for {
- nextHash := hashSL(cv)
- candidate = table[nextHash]
- nextS = s + doEvery + (s-nextEmit)>>skipLog
- if nextS > sLimit || nextS <= 0 {
- goto emitRemainder
- }
-
- now := load6416(src, nextS)
- table[nextHash] = tableEntry{offset: s}
- nextHash = hashSL(uint32(now))
-
- if cv == load3216(src, candidate.offset) {
- table[nextHash] = tableEntry{offset: nextS}
- break
- }
-
- // Do one right away...
- cv = uint32(now)
- s = nextS
- nextS++
- candidate = table[nextHash]
- now >>= 8
- table[nextHash] = tableEntry{offset: s}
-
- if cv == load3216(src, candidate.offset) {
- table[nextHash] = tableEntry{offset: nextS}
- break
- }
- cv = uint32(now)
- s = nextS
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
-
- // Extend the 4-byte match as long as possible.
- t := candidate.offset
- l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
-
- // Extend backwards
- for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
- s--
- t--
- l++
- }
- if nextEmit < s {
- if false {
- emitLiteral(dst, src[nextEmit:s])
- } else {
- for _, v := range src[nextEmit:s] {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
- }
- }
-
- // Save the match found
- dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
- s += l
- nextEmit = s
- if nextS >= s {
- s = nextS + 1
- }
- if s >= sLimit {
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-2 and at s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load6416(src, s-2)
- o := s - 2
- prevHash := hashSL(uint32(x))
- table[prevHash] = tableEntry{offset: o}
- x >>= 16
- currHash := hashSL(uint32(x))
- candidate = table[currHash]
- table[currHash] = tableEntry{offset: o + 2}
-
- if uint32(x) != load3216(src, candidate.offset) {
- cv = uint32(x >> 8)
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if int(nextEmit) < len(src) {
- // If nothing was added, don't encode literals.
- if dst.n == 0 {
- return
- }
- emitLiteral(dst, src[nextEmit:])
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/token.go b/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/token.go
deleted file mode 100644
index d818790..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/klauspost/compress/flate/token.go
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flate
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "math"
-)
-
-const (
- // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
- // bits 16-22 offsetcode - 5 bits
- // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
- // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
- lengthShift = 22
- offsetMask = 1<maxnumlit
- offHist [32]uint16 // offset codes
- litHist [256]uint16 // codes 0->255
- nFilled int
- n uint16 // Must be able to contain maxStoreBlockSize
- tokens [maxStoreBlockSize + 1]token
-}
-
-func (t *tokens) Reset() {
- if t.n == 0 {
- return
- }
- t.n = 0
- t.nFilled = 0
- for i := range t.litHist[:] {
- t.litHist[i] = 0
- }
- for i := range t.extraHist[:] {
- t.extraHist[i] = 0
- }
- for i := range t.offHist[:] {
- t.offHist[i] = 0
- }
-}
-
-func (t *tokens) Fill() {
- if t.n == 0 {
- return
- }
- for i, v := range t.litHist[:] {
- if v == 0 {
- t.litHist[i] = 1
- t.nFilled++
- }
- }
- for i, v := range t.extraHist[:literalCount-256] {
- if v == 0 {
- t.nFilled++
- t.extraHist[i] = 1
- }
- }
- for i, v := range t.offHist[:offsetCodeCount] {
- if v == 0 {
- t.offHist[i] = 1
- }
- }
-}
-
-func indexTokens(in []token) tokens {
- var t tokens
- t.indexTokens(in)
- return t
-}
-
-func (t *tokens) indexTokens(in []token) {
- t.Reset()
- for _, tok := range in {
- if tok < matchType {
- t.AddLiteral(tok.literal())
- continue
- }
- t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
- }
-}
-
-// emitLiteral writes a literal chunk and returns the number of bytes written.
-func emitLiteral(dst *tokens, lit []byte) {
- for _, v := range lit {
- dst.tokens[dst.n] = token(v)
- dst.litHist[v]++
- dst.n++
- }
-}
-
-func (t *tokens) AddLiteral(lit byte) {
- t.tokens[t.n] = token(lit)
- t.litHist[lit]++
- t.n++
-}
-
-// from https://stackoverflow.com/a/28730362
-func mFastLog2(val float32) float32 {
- ux := int32(math.Float32bits(val))
- log2 := (float32)(((ux >> 23) & 255) - 128)
- ux &= -0x7f800001
- ux += 127 << 23
- uval := math.Float32frombits(uint32(ux))
- log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
- return log2
-}
-
-// EstimatedBits will return an minimum size estimated by an *optimal*
-// compression of the block.
-// The size of the block
-func (t *tokens) EstimatedBits() int {
- shannon := float32(0)
- bits := int(0)
- nMatches := 0
- total := int(t.n) + t.nFilled
- if total > 0 {
- invTotal := 1.0 / float32(total)
- for _, v := range t.litHist[:] {
- if v > 0 {
- n := float32(v)
- shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
- }
- }
- // Just add 15 for EOB
- shannon += 15
- for i, v := range t.extraHist[1 : literalCount-256] {
- if v > 0 {
- n := float32(v)
- shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
- bits += int(lengthExtraBits[i&31]) * int(v)
- nMatches += int(v)
- }
- }
- }
- if nMatches > 0 {
- invTotal := 1.0 / float32(nMatches)
- for i, v := range t.offHist[:offsetCodeCount] {
- if v > 0 {
- n := float32(v)
- shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
- bits += int(offsetExtraBits[i&31]) * int(v)
- }
- }
- }
- return int(shannon) + bits
-}
-
-// AddMatch adds a match to the tokens.
-// This function is very sensitive to inlining and right on the border.
-func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
- if debugDeflate {
- if xlength >= maxMatchLength+baseMatchLength {
- panic(fmt.Errorf("invalid length: %v", xlength))
- }
- if xoffset >= maxMatchOffset+baseMatchOffset {
- panic(fmt.Errorf("invalid offset: %v", xoffset))
- }
- }
- oCode := offsetCode(xoffset)
- xoffset |= oCode << 16
-
- t.extraHist[lengthCodes1[uint8(xlength)]]++
- t.offHist[oCode&31]++
- t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset {
- panic(fmt.Errorf("invalid offset: %v", xoffset))
- }
- }
- oc := offsetCode(xoffset)
- xoffset |= oc << 16
- for xlength > 0 {
- xl := xlength
- if xl > 258 {
- // We need to have at least baseMatchLength left over for next loop.
- if xl > 258+baseMatchLength {
- xl = 258
- } else {
- xl = 258 - baseMatchLength
- }
- }
- xlength -= xl
- xl -= baseMatchLength
- t.extraHist[lengthCodes1[uint8(xl)]]++
- t.offHist[oc&31]++
- t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) }
-
-// Convert length to code.
-func lengthCode(len uint8) uint8 { return lengthCodes[len] }
-
-// Returns the offset code corresponding to a specific offset
-func offsetCode(off uint32) uint32 {
- if false {
- if off < uint32(len(offsetCodes)) {
- return offsetCodes[off&255]
- } else if off>>7 < uint32(len(offsetCodes)) {
- return offsetCodes[(off>>7)&255] + 14
- } else {
- return offsetCodes[(off>>14)&255] + 28
- }
- }
- if off < uint32(len(offsetCodes)) {
- return offsetCodes[uint8(off)]
- }
- return offsetCodes14[uint8(off>>7)]
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.gitignore b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.gitignore
deleted file mode 100644
index ae4871f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.gitignore
+++ /dev/null
@@ -1,45 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-# Emacs
-*~
-\#*\#
-.\#*
-
-# vi/vim
-.??*.swp
-
-# Mac
-.DS_Store
-
-# Eclipse
-.project
-.settings/
-
-# bin
-
-# Goland
-.idea
-
-# VS Code
-.vscode
\ No newline at end of file
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.golangci.yaml b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.golangci.yaml
deleted file mode 100644
index fb548e5..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.golangci.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-issues:
- max-issues-per-linter: 0
- max-same-issues: 0
- exclude-rules:
- - linters:
- - errcheck
- text: "Unsubscribe"
- - linters:
- - errcheck
- text: "Drain"
- - linters:
- - errcheck
- text: "msg.Ack"
- - linters:
- - errcheck
- text: "watcher.Stop"
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.travis.yml b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.travis.yml
deleted file mode 100644
index 9a6b4a8..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.travis.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-language: go
-go:
-- "1.22.x"
-- "1.21.x"
-go_import_path: github.com/nats-io/nats.go
-install:
-- go get -t ./...
-- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then
- go install github.com/mattn/goveralls@latest;
- go install github.com/wadey/gocovmerge@latest;
- go install honnef.co/go/tools/cmd/staticcheck@latest;
- go install github.com/client9/misspell/cmd/misspell@latest;
- fi
-before_script:
-- $(exit $(go fmt ./... | wc -l))
-- go vet -modfile=go_test.mod ./...
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then
- find . -type f -name "*.go" | xargs misspell -error -locale US;
- GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./...;
- fi
-- golangci-lint run ./jetstream/...
-script:
-- go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then ./scripts/cov.sh TRAVIS; else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off -tags=internal_testing; fi
-after_success:
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci; fi
-
-jobs:
- include:
- - name: "Go: 1.22.x (nats-server@main)"
- go: "1.22.x"
- before_script:
- - go get -modfile go_test.mod github.com/nats-io/nats-server/v2@main
- allow_failures:
- - name: "Go: 1.22.x (nats-server@main)"
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.words b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.words
deleted file mode 100644
index 24be7f6..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.words
+++ /dev/null
@@ -1,106 +0,0 @@
-1
-
-derek
-dlc
-ivan
-
-acknowledgement/SM
-arity
-deduplication/S
-demarshal/SDG
-durables
-iff
-observable/S
-redelivery/S
-retransmitting
-retry/SB
-
-SlowConsumer
-
-AppendInt
-ReadMIMEHeader
-
-clientProtoZero
-jetstream
-v1
-v2
-
-ack/SGD
-auth
-authToken
-chans
-creds
-config/S
-cseq
-impl
-msgh
-msgId
-mux/S
-nack
-ptr
-puback
-scanf
-stderr
-stdout
-structs
-tm
-todo
-unsub/S
-
-permessage
-permessage-deflate
-urlA
-urlB
-websocket
-ws
-wss
-
-NKey
-pList
-
-backend/S
-backoff/S
-decompressor/CGS
-inflight
-inlined
-lookups
-reconnection/MS
-redeliver/ADGS
-responder/S
-rewrap/S
-rollup/S
-unreceive/DRSZGB
-variadic
-wakeup/S
-whitespace
-wrap/AS
-
-omitempty
-
-apache
-html
-ietf
-www
-
-sum256
-32bit/S
-64bit/S
-64k
-128k
-512k
-
-hacky
-handroll/D
-
-rfc6455
-rfc7692
-0x00
-0xff
-20x
-40x
-50x
-
-ErrXXX
-
-atlanta
-eu
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.words.readme b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.words.readme
deleted file mode 100644
index 9d9f5cb..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/.words.readme
+++ /dev/null
@@ -1,25 +0,0 @@
-The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries
-but populates the dictionary with identifiers from the Go source.
-
-
-
-Alas, no comments are allowed in the .words file and newer versions of gospel
-error out on seeing them. This is really a hunspell restriction.
-
-We assume en_US hunspell dictionaries are installed and used.
-The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff)
-Invoke `hunspell -D` to see the actual locations.
-
-Words which are in the base dictionary can't have extra affix rules added to
-them, so we have to start with the affixed variant we want to add.
-Thus `creds` rather than `cred/S` and so on.
-
-So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants,
-we have to use unreceive as the stem.
-
-We can't define our own affix or compound rules,
-to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2}
-
-The spelling tokenizer doesn't take "permessage-deflate" as allowing for ...
-"permessage-deflate", which is an RFC7692 registered extension for websockets.
-We have to explicitly list "permessage".
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md
deleted file mode 100644
index b850d49..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Community Code of Conduct
-
-NATS follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/CONTRIBUTING.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/CONTRIBUTING.md
deleted file mode 100644
index cfb7e4e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/CONTRIBUTING.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Contributing
-
-Thanks for your interest in contributing! This document contains `nats-io/nats.go` specific contributing details. If you
-are a first-time contributor, please refer to the general [NATS Contributor Guide](https://nats.io/contributing/) to get
-a comprehensive overview of contributing to the NATS project.
-
-## Getting started
-
-There are three general ways you can contribute to this repo:
-
-- Proposing an enhancement or new feature
-- Reporting a bug or regression
-- Contributing changes to the source code
-
-For the first two, refer to the [GitHub Issues](https://github.com/nats-io/nats.go/issues/new/choose) which guides you
-through the available options along with the needed information to collect.
-
-## Contributing changes
-
-_Prior to opening a pull request, it is recommended to open an issue first to ensure the maintainers can review intended
-changes. Exceptions to this rule include fixing non-functional source such as code comments, documentation or other
-supporting files._
-
-Proposing source code changes is done through GitHub's standard pull request workflow.
-
-If your branch is a work-in-progress then please start by creating your pull requests as draft, by clicking the
-down-arrow next to the `Create pull request` button and instead selecting `Create draft pull request`.
-
-This will defer the automatic process of requesting a review from the NATS team and significantly reduces noise until
-you are ready. Once you are happy, you can click the `Ready for review` button.
-
-### Guidelines
-
-A good pull request includes:
-
-- A high-level description of the changes, including links to any issues that are related by adding comments
- like `Resolves #NNN` to your description.
- See [Linking a Pull Request to an Issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue)
- for more information.
-- An up-to-date parent commit. Please make sure you are pulling in the latest `main` branch and rebasing your work on
- top of it, i.e. `git rebase main`.
-- Unit tests where appropriate. Bug fixes will benefit from the addition of regression tests. New features will not be
- accepted without suitable test coverage!
-- No more commits than necessary. Sometimes having multiple commits is useful for telling a story or isolating changes
- from one another, but please squash down any unnecessary commits that may just be for clean-up, comments or small
- changes.
-- No additional external dependencies that aren't absolutely essential. Please do everything you can to avoid pulling in
- additional libraries/dependencies into `go.mod` as we will be very critical of these.
-
-### Sign-off
-
-In order to accept a contribution, you will first need to certify that the contribution is your original work and that
-you license the work to the project under
-the [Apache-2.0 license](https://github.com/nats-io/nats.go/blob/main/LICENSE).
-
-This is done by using `Signed-off-by` statements, which should appear in **both** your commit messages and your PR
-description. Please note that we can only accept sign-offs under a legal name. Nicknames and aliases are not permitted.
-
-To perform a sign-off with `git`, use `git commit -s` (or `--signoff`).
-
-## Get help
-
-If you have questions about the contribution process, please start
-a [GitHub discussion](https://github.com/nats-io/nats.go/discussions), join the [NATS Slack](https://slack.nats.io/), or
-send your question to the [NATS Google Group](https://groups.google.com/forum/#!forum/natsio).
-
-## Testing
-
-You should use `go_test.mod` to manage your testing dependencies. Please use the following command to update your
-dependencies and avoid changing the main `go.mod` in a PR:
-
-```shell
-go mod tidy -modfile=go_test.mod
-```
-
-To the tests you can pass `-modfile=go_test.mod` flag to `go test` or instead you can also set `GOFLAGS="-modfile=go_test.mod"` as an environment variable:
-
-```shell
-go test ./... -modfile=go_test.mod
-```
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/GOVERNANCE.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/GOVERNANCE.md
deleted file mode 100644
index 1d5a7be..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/GOVERNANCE.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# NATS Go Client Governance
-
-NATS Go Client (go-nats) is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md).
\ No newline at end of file
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/LICENSE b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/MAINTAINERS.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/MAINTAINERS.md
deleted file mode 100644
index 2321465..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/MAINTAINERS.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Maintainers
-
-Maintainership is on a per project basis.
-
-### Maintainers
- - Derek Collison [@derekcollison](https://github.com/derekcollison)
- - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic)
- - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/README.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/README.md
deleted file mode 100644
index 976ed70..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/README.md
+++ /dev/null
@@ -1,495 +0,0 @@
-# NATS - Go Client
-A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io).
-
-[![License Apache 2][License-Image]][License-Url] [![Go Report Card][ReportCard-Image]][ReportCard-Url] [![Build Status][Build-Status-Image]][Build-Status-Url] [![GoDoc][GoDoc-Image]][GoDoc-Url] [![Coverage Status][Coverage-image]][Coverage-Url]
-
-[License-Url]: https://www.apache.org/licenses/LICENSE-2.0
-[License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg
-[ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go
-[ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go
-[Build-Status-Url]: https://travis-ci.com/github/nats-io/nats.go
-[Build-Status-Image]: https://travis-ci.com/nats-io/nats.go.svg?branch=main
-[GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go
-[GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c
-[Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main
-[Coverage-image]: https://coveralls.io/repos/github/nats-io/nats.go/badge.svg?branch=main
-
-**Check out [NATS by example](https://natsbyexample.com) - An evolving collection of runnable, cross-client reference examples for NATS.**
-
-## Installation
-
-```bash
-# Go client
-go get github.com/nats-io/nats.go/
-
-# Server
-go get github.com/nats-io/nats-server
-```
-
-When using or transitioning to Go modules support:
-
-```bash
-# Go client latest or explicit version
-go get github.com/nats-io/nats.go/@latest
-go get github.com/nats-io/nats.go/@v1.35.0
-
-# For latest NATS Server, add /v2 at the end
-go get github.com/nats-io/nats-server/v2
-
-# NATS Server v1 is installed otherwise
-# go get github.com/nats-io/nats-server
-```
-
-## Basic Usage
-
-```go
-import "github.com/nats-io/nats.go"
-
-// Connect to a server
-nc, _ := nats.Connect(nats.DefaultURL)
-
-// Simple Publisher
-nc.Publish("foo", []byte("Hello World"))
-
-// Simple Async Subscriber
-nc.Subscribe("foo", func(m *nats.Msg) {
- fmt.Printf("Received a message: %s\n", string(m.Data))
-})
-
-// Responding to a request message
-nc.Subscribe("request", func(m *nats.Msg) {
- m.Respond([]byte("answer is 42"))
-})
-
-// Simple Sync Subscriber
-sub, err := nc.SubscribeSync("foo")
-m, err := sub.NextMsg(timeout)
-
-// Channel Subscriber
-ch := make(chan *nats.Msg, 64)
-sub, err := nc.ChanSubscribe("foo", ch)
-msg := <- ch
-
-// Unsubscribe
-sub.Unsubscribe()
-
-// Drain
-sub.Drain()
-
-// Requests
-msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond)
-
-// Replies
-nc.Subscribe("help", func(m *nats.Msg) {
- nc.Publish(m.Reply, []byte("I can help!"))
-})
-
-// Drain connection (Preferred for responders)
-// Close() not needed if this is called.
-nc.Drain()
-
-// Close connection
-nc.Close()
-```
-
-## JetStream
-
-JetStream is the built-in NATS persistence system. `nats.go` provides a built-in
-API enabling both managing JetStream assets as well as publishing/consuming
-persistent messages.
-
-### Basic usage
-
-```go
-// connect to nats server
-nc, _ := nats.Connect(nats.DefaultURL)
-
-// create jetstream context from nats connection
-js, _ := jetstream.New(nc)
-
-ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
-defer cancel()
-
-// get existing stream handle
-stream, _ := js.Stream(ctx, "foo")
-
-// retrieve consumer handle from a stream
-cons, _ := stream.Consumer(ctx, "cons")
-
-// consume messages from the consumer in callback
-cc, _ := cons.Consume(func(msg jetstream.Msg) {
- fmt.Println("Received jetstream message: ", string(msg.Data()))
- msg.Ack()
-})
-defer cc.Stop()
-```
-
-To find more information on `nats.go` JetStream API, visit
-[`jetstream/README.md`](jetstream/README.md)
-
-> The current JetStream API replaces the [legacy JetStream API](legacy_jetstream.md)
-
-## Service API
-
-The service API (`micro`) allows you to [easily build NATS services](micro/README.md) The
-services API is currently in beta release.
-
-## Encoded Connections
-
-```go
-
-nc, _ := nats.Connect(nats.DefaultURL)
-c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
-defer c.Close()
-
-// Simple Publisher
-c.Publish("foo", "Hello World")
-
-// Simple Async Subscriber
-c.Subscribe("foo", func(s string) {
- fmt.Printf("Received a message: %s\n", s)
-})
-
-// EncodedConn can Publish any raw Go type using the registered Encoder
-type person struct {
- Name string
- Address string
- Age int
-}
-
-// Go type Subscriber
-c.Subscribe("hello", func(p *person) {
- fmt.Printf("Received a person: %+v\n", p)
-})
-
-me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"}
-
-// Go type Publisher
-c.Publish("hello", me)
-
-// Unsubscribe
-sub, err := c.Subscribe("foo", nil)
-// ...
-sub.Unsubscribe()
-
-// Requests
-var response string
-err = c.Request("help", "help me", &response, 10*time.Millisecond)
-if err != nil {
- fmt.Printf("Request failed: %v\n", err)
-}
-
-// Replying
-c.Subscribe("help", func(subj, reply string, msg string) {
- c.Publish(reply, "I can help!")
-})
-
-// Close connection
-c.Close();
-```
-
-## New Authentication (Nkeys and User Credentials)
-This requires server with version >= 2.0.0
-
-NATS servers have a new security and authentication mechanism to authenticate with user credentials and Nkeys.
-The simplest form is to use the helper method UserCredentials(credsFilepath).
-```go
-nc, err := nats.Connect(url, nats.UserCredentials("user.creds"))
-```
-
-The helper methods creates two callback handlers to present the user JWT and sign the nonce challenge from the server.
-The core client library never has direct access to your private key and simply performs the callback for signing the server challenge.
-The helper will load and wipe and erase memory it uses for each connect or reconnect.
-
-The helper also can take two entries, one for the JWT and one for the NKey seed file.
-```go
-nc, err := nats.Connect(url, nats.UserCredentials("user.jwt", "user.nk"))
-```
-
-You can also set the callback handlers directly and manage challenge signing directly.
-```go
-nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB))
-```
-
-Bare Nkeys are also supported. The nkey seed should be in a read only file, e.g. seed.txt
-```bash
-> cat seed.txt
-# This is my seed nkey!
-SUAGMJH5XLGZKQQWAWKRZJIGMOU4HPFUYLXJMXOO5NLFEO2OOQJ5LPRDPM
-```
-
-This is a helper function which will load and decode and do the proper signing for the server nonce.
-It will clear memory in between invocations.
-You can choose to use the low level option and provide the public key and a signature callback on your own.
-
-```go
-opt, err := nats.NkeyOptionFromSeed("seed.txt")
-nc, err := nats.Connect(serverUrl, opt)
-
-// Direct
-nc, err := nats.Connect(serverUrl, nats.Nkey(pubNkey, sigCB))
-```
-
-## TLS
-
-```go
-// tls as a scheme will enable secure connections by default. This will also verify the server name.
-nc, err := nats.Connect("tls://nats.demo.io:4443")
-
-// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup.
-// We provide a helper method to make this case easier.
-nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem"))
-
-// If the server requires client certificate, there is an helper function for that too:
-cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem")
-nc, err = nats.Connect("tls://localhost:4443", cert)
-
-// You can also supply a complete tls.Config
-
-certFile := "./configs/certs/client-cert.pem"
-keyFile := "./configs/certs/client-key.pem"
-cert, err := tls.LoadX509KeyPair(certFile, keyFile)
-if err != nil {
- t.Fatalf("error parsing X509 certificate/key pair: %v", err)
-}
-
-config := &tls.Config{
- ServerName: opts.Host,
- Certificates: []tls.Certificate{cert},
- RootCAs: pool,
- MinVersion: tls.VersionTLS12,
-}
-
-nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config))
-if err != nil {
- t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err)
-}
-
-```
-
-## Using Go Channels (netchan)
-
-```go
-nc, _ := nats.Connect(nats.DefaultURL)
-ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
-defer ec.Close()
-
-type person struct {
- Name string
- Address string
- Age int
-}
-
-recvCh := make(chan *person)
-ec.BindRecvChan("hello", recvCh)
-
-sendCh := make(chan *person)
-ec.BindSendChan("hello", sendCh)
-
-me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"}
-
-// Send via Go channels
-sendCh <- me
-
-// Receive via Go channels
-who := <- recvCh
-```
-
-## Wildcard Subscriptions
-
-```go
-
-// "*" matches any token, at any level of the subject.
-nc.Subscribe("foo.*.baz", func(m *Msg) {
- fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data));
-})
-
-nc.Subscribe("foo.bar.*", func(m *Msg) {
- fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data));
-})
-
-// ">" matches any length of the tail of a subject, and can only be the last token
-// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22'
-nc.Subscribe("foo.>", func(m *Msg) {
- fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data));
-})
-
-// Matches all of the above
-nc.Publish("foo.bar.baz", []byte("Hello World"))
-
-```
-
-## Queue Groups
-
-```go
-// All subscriptions with the same queue name will form a queue group.
-// Each message will be delivered to only one subscriber per queue group,
-// using queuing semantics. You can have as many queue groups as you wish.
-// Normal subscribers will continue to work as expected.
-
-nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) {
- received += 1;
-})
-```
-
-## Advanced Usage
-
-```go
-
-// Normally, the library will return an error when trying to connect and
-// there is no server running. The RetryOnFailedConnect option will set
-// the connection in reconnecting state if it failed to connect right away.
-nc, err := nats.Connect(nats.DefaultURL,
- nats.RetryOnFailedConnect(true),
- nats.MaxReconnects(10),
- nats.ReconnectWait(time.Second),
- nats.ReconnectHandler(func(_ *nats.Conn) {
- // Note that this will be invoked for the first asynchronous connect.
- }))
-if err != nil {
- // Should not return an error even if it can't connect, but you still
- // need to check in case there are some configuration errors.
-}
-
-// Flush connection to server, returns when all messages have been processed.
-nc.Flush()
-fmt.Println("All clear!")
-
-// FlushTimeout specifies a timeout value as well.
-err := nc.FlushTimeout(1*time.Second)
-if err != nil {
- fmt.Println("All clear!")
-} else {
- fmt.Println("Flushed timed out!")
-}
-
-// Auto-unsubscribe after MAX_WANTED messages received
-const MAX_WANTED = 10
-sub, err := nc.Subscribe("foo")
-sub.AutoUnsubscribe(MAX_WANTED)
-
-// Multiple connections
-nc1 := nats.Connect("nats://host1:4222")
-nc2 := nats.Connect("nats://host2:4222")
-
-nc1.Subscribe("foo", func(m *Msg) {
- fmt.Printf("Received a message: %s\n", string(m.Data))
-})
-
-nc2.Publish("foo", []byte("Hello World!"));
-
-```
-
-## Clustered Usage
-
-```go
-
-var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224"
-
-nc, err := nats.Connect(servers)
-
-// Optionally set ReconnectWait and MaxReconnect attempts.
-// This example means 10 seconds total per backend.
-nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second))
-
-// You can also add some jitter for the reconnection.
-// This call will add up to 500 milliseconds for non TLS connections and 2 seconds for TLS connections.
-// If not specified, the library defaults to 100 milliseconds and 1 second, respectively.
-nc, err = nats.Connect(servers, nats.ReconnectJitter(500*time.Millisecond, 2*time.Second))
-
-// You can also specify a custom reconnect delay handler. If set, the library will invoke it when it has tried
-// all URLs in its list. The value returned will be used as the total sleep time, so add your own jitter.
-// The library will pass the number of times it went through the whole list.
-nc, err = nats.Connect(servers, nats.CustomReconnectDelay(func(attempts int) time.Duration {
- return someBackoffFunction(attempts)
-}))
-
-// Optionally disable randomization of the server pool
-nc, err = nats.Connect(servers, nats.DontRandomize())
-
-// Setup callbacks to be notified on disconnects, reconnects and connection closed.
-nc, err = nats.Connect(servers,
- nats.DisconnectErrHandler(func(nc *nats.Conn, err error) {
- fmt.Printf("Got disconnected! Reason: %q\n", err)
- }),
- nats.ReconnectHandler(func(nc *nats.Conn) {
- fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl())
- }),
- nats.ClosedHandler(func(nc *nats.Conn) {
- fmt.Printf("Connection closed. Reason: %q\n", nc.LastError())
- })
-)
-
-// When connecting to a mesh of servers with auto-discovery capabilities,
-// you may need to provide a username/password or token in order to connect
-// to any server in that mesh when authentication is required.
-// Instead of providing the credentials in the initial URL, you will use
-// new option setters:
-nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar"))
-
-// For token based authentication:
-nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken"))
-
-// You can even pass the two at the same time in case one of the server
-// in the mesh requires token instead of user name and password.
-nc, err = nats.Connect("nats://localhost:4222",
- nats.UserInfo("foo", "bar"),
- nats.Token("S3cretT0ken"))
-
-// Note that if credentials are specified in the initial URLs, they take
-// precedence on the credentials specified through the options.
-// For instance, in the connect call below, the client library will use
-// the user "my" and password "pwd" to connect to localhost:4222, however,
-// it will use username "foo" and password "bar" when (re)connecting to
-// a different server URL that it got as part of the auto-discovery.
-nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar"))
-
-```
-
-## Context support (+Go 1.7)
-
-```go
-ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
-defer cancel()
-
-nc, err := nats.Connect(nats.DefaultURL)
-
-// Request with context
-msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar"))
-
-// Synchronous subscriber with context
-sub, err := nc.SubscribeSync("foo")
-msg, err := sub.NextMsgWithContext(ctx)
-
-// Encoded Request with context
-c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
-type request struct {
- Message string `json:"message"`
-}
-type response struct {
- Code int `json:"code"`
-}
-req := &request{Message: "Hello"}
-resp := &response{}
-err := c.RequestWithContext(ctx, "foo", req, resp)
-```
-
-## Backwards compatibility
-
-In the development of nats.go, we are committed to maintaining backward compatibility and ensuring a stable and reliable experience for all users. In general, we follow the standard go compatibility guidelines.
-However, it's important to clarify our stance on certain types of changes:
-
-- **Expanding structures:**
-Adding new fields to structs is not considered a breaking change.
-
-- **Adding methods to exported interfaces:**
-Extending public interfaces with new methods is also not viewed as a breaking change within the context of this project. It is important to note that no unexported methods will be added to interfaces allowing users to implement them.
-
-Additionally, this library always supports at least 2 latest minor Go versions. For example, if the latest Go version is 1.22, the library will support Go 1.21 and 1.22.
-
-## License
-
-Unless otherwise noted, the NATS source files are distributed
-under the Apache Version 2.0 license found in the LICENSE file.
-
-[](https://app.fossa.io/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats?ref=badge_large)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/context.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/context.go
deleted file mode 100644
index 20f1782..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/context.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2016-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "context"
- "reflect"
-)
-
-// RequestMsgWithContext takes a context, a subject and payload
-// in bytes and request expecting a single response.
-func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) {
- if msg == nil {
- return nil, ErrInvalidMsg
- }
- hdr, err := msg.headerBytes()
- if err != nil {
- return nil, err
- }
- return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data)
-}
-
-// RequestWithContext takes a context, a subject and payload
-// in bytes and request expecting a single response.
-func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) {
- return nc.requestWithContext(ctx, subj, nil, data)
-}
-
-func (nc *Conn) requestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) {
- if ctx == nil {
- return nil, ErrInvalidContext
- }
- if nc == nil {
- return nil, ErrInvalidConnection
- }
- // Check whether the context is done already before making
- // the request.
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
-
- var m *Msg
- var err error
-
- // If user wants the old style.
- if nc.useOldRequestStyle() {
- m, err = nc.oldRequestWithContext(ctx, subj, hdr, data)
- } else {
- mch, token, err := nc.createNewRequestAndSend(subj, hdr, data)
- if err != nil {
- return nil, err
- }
-
- var ok bool
-
- select {
- case m, ok = <-mch:
- if !ok {
- return nil, ErrConnectionClosed
- }
- case <-ctx.Done():
- nc.mu.Lock()
- delete(nc.respMap, token)
- nc.mu.Unlock()
- return nil, ctx.Err()
- }
- }
- // Check for no responder status.
- if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
- m, err = nil, ErrNoResponders
- }
- return m, err
-}
-
-// oldRequestWithContext utilizes inbox and subscription per request.
-func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) {
- inbox := nc.NewInbox()
- ch := make(chan *Msg, RequestChanLen)
-
- s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil)
- if err != nil {
- return nil, err
- }
- s.AutoUnsubscribe(1)
- defer s.Unsubscribe()
-
- err = nc.publish(subj, inbox, hdr, data)
- if err != nil {
- return nil, err
- }
-
- return s.NextMsgWithContext(ctx)
-}
-
-func (s *Subscription) nextMsgWithContext(ctx context.Context, pullSubInternal, waitIfNoMsg bool) (*Msg, error) {
- if ctx == nil {
- return nil, ErrInvalidContext
- }
- if s == nil {
- return nil, ErrBadSubscription
- }
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
-
- s.mu.Lock()
- err := s.validateNextMsgState(pullSubInternal)
- if err != nil {
- s.mu.Unlock()
- return nil, err
- }
-
- // snapshot
- mch := s.mch
- s.mu.Unlock()
-
- var ok bool
- var msg *Msg
-
- // If something is available right away, let's optimize that case.
- select {
- case msg, ok = <-mch:
- if !ok {
- return nil, s.getNextMsgErr()
- }
- if err := s.processNextMsgDelivered(msg); err != nil {
- return nil, err
- }
- return msg, nil
- default:
- // If internal and we don't want to wait, signal that there is no
- // message in the internal queue.
- if pullSubInternal && !waitIfNoMsg {
- return nil, errNoMessages
- }
- }
-
- select {
- case msg, ok = <-mch:
- if !ok {
- return nil, s.getNextMsgErr()
- }
- if err := s.processNextMsgDelivered(msg); err != nil {
- return nil, err
- }
- case <-ctx.Done():
- return nil, ctx.Err()
- }
-
- return msg, nil
-}
-
-// NextMsgWithContext takes a context and returns the next message
-// available to a synchronous subscriber, blocking until it is delivered
-// or context gets canceled.
-func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) {
- return s.nextMsgWithContext(ctx, false, true)
-}
-
-// FlushWithContext will allow a context to control the duration
-// of a Flush() call. This context should be non-nil and should
-// have a deadline set. We will return an error if none is present.
-func (nc *Conn) FlushWithContext(ctx context.Context) error {
- if nc == nil {
- return ErrInvalidConnection
- }
- if ctx == nil {
- return ErrInvalidContext
- }
- _, ok := ctx.Deadline()
- if !ok {
- return ErrNoDeadlineContext
- }
-
- nc.mu.Lock()
- if nc.isClosed() {
- nc.mu.Unlock()
- return ErrConnectionClosed
- }
- // Create a buffered channel to prevent chan send to block
- // in processPong()
- ch := make(chan struct{}, 1)
- nc.sendPing(ch)
- nc.mu.Unlock()
-
- var err error
-
- select {
- case _, ok := <-ch:
- if !ok {
- err = ErrConnectionClosed
- } else {
- close(ch)
- }
- case <-ctx.Done():
- err = ctx.Err()
- }
-
- if err != nil {
- nc.removeFlushEntry(ch)
- }
-
- return err
-}
-
-// RequestWithContext will create an Inbox and perform a Request
-// using the provided cancellation context with the Inbox reply
-// for the data v. A response will be decoded into the vPtr last parameter.
-func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v any, vPtr any) error {
- if ctx == nil {
- return ErrInvalidContext
- }
-
- b, err := c.Enc.Encode(subject, v)
- if err != nil {
- return err
- }
- m, err := c.Conn.RequestWithContext(ctx, subject, b)
- if err != nil {
- return err
- }
- if reflect.TypeOf(vPtr) == emptyMsgType {
- mPtr := vPtr.(*Msg)
- *mPtr = *m
- } else {
- err := c.Enc.Decode(m.Subject, m.Data, vPtr)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/dependencies.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/dependencies.md
deleted file mode 100644
index ec9ab3c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/dependencies.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# External Dependencies
-
-This file lists the dependencies used in this repository.
-
-| Dependency | License |
-|-----------------------------------|--------------|
-| Go | BSD 3-Clause |
-| github.com/golang/protobuf/proto | BSD-3-Clause |
-| github.com/klauspost/compress | BSD-3-Clause |
-| github.com/nats-io/nats-server/v2 | Apache-2.0 |
-| github.com/nats-io/nkeys | Apache-2.0 |
-| github.com/nats-io/nuid | Apache-2.0 |
-| go.uber.org/goleak | MIT |
-| golang.org/x/text | BSD-3-Clause |
-| google.golang.org/protobuf | BSD-3-Clause |
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/enc.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/enc.go
deleted file mode 100644
index 4550f61..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/enc.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2012-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "errors"
- "fmt"
- "reflect"
- "sync"
- "time"
-
- // Default Encoders
- "github.com/nats-io/nats.go/encoders/builtin"
-)
-
-// Encoder interface is for all register encoders
-type Encoder interface {
- Encode(subject string, v any) ([]byte, error)
- Decode(subject string, data []byte, vPtr any) error
-}
-
-var encMap map[string]Encoder
-var encLock sync.Mutex
-
-// Indexed names into the Registered Encoders.
-const (
- JSON_ENCODER = "json"
- GOB_ENCODER = "gob"
- DEFAULT_ENCODER = "default"
-)
-
-func init() {
- encMap = make(map[string]Encoder)
- // Register json, gob and default encoder
- RegisterEncoder(JSON_ENCODER, &builtin.JsonEncoder{})
- RegisterEncoder(GOB_ENCODER, &builtin.GobEncoder{})
- RegisterEncoder(DEFAULT_ENCODER, &builtin.DefaultEncoder{})
-}
-
-// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to
-// a nats server and have an extendable encoder system that will encode and decode messages
-// from raw Go types.
-type EncodedConn struct {
- Conn *Conn
- Enc Encoder
-}
-
-// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered
-// encoder.
-func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) {
- if c == nil {
- return nil, errors.New("nats: Nil Connection")
- }
- if c.IsClosed() {
- return nil, ErrConnectionClosed
- }
- ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)}
- if ec.Enc == nil {
- return nil, fmt.Errorf("no encoder registered for '%s'", encType)
- }
- return ec, nil
-}
-
-// RegisterEncoder will register the encType with the given Encoder. Useful for customization.
-func RegisterEncoder(encType string, enc Encoder) {
- encLock.Lock()
- defer encLock.Unlock()
- encMap[encType] = enc
-}
-
-// EncoderForType will return the registered Encoder for the encType.
-func EncoderForType(encType string) Encoder {
- encLock.Lock()
- defer encLock.Unlock()
- return encMap[encType]
-}
-
-// Publish publishes the data argument to the given subject. The data argument
-// will be encoded using the associated encoder.
-func (c *EncodedConn) Publish(subject string, v any) error {
- b, err := c.Enc.Encode(subject, v)
- if err != nil {
- return err
- }
- return c.Conn.publish(subject, _EMPTY_, nil, b)
-}
-
-// PublishRequest will perform a Publish() expecting a response on the
-// reply subject. Use Request() for automatically waiting for a response
-// inline.
-func (c *EncodedConn) PublishRequest(subject, reply string, v any) error {
- b, err := c.Enc.Encode(subject, v)
- if err != nil {
- return err
- }
- return c.Conn.publish(subject, reply, nil, b)
-}
-
-// Request will create an Inbox and perform a Request() call
-// with the Inbox reply for the data v. A response will be
-// decoded into the vPtr Response.
-func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Duration) error {
- b, err := c.Enc.Encode(subject, v)
- if err != nil {
- return err
- }
- m, err := c.Conn.Request(subject, b, timeout)
- if err != nil {
- return err
- }
- if reflect.TypeOf(vPtr) == emptyMsgType {
- mPtr := vPtr.(*Msg)
- *mPtr = *m
- } else {
- err = c.Enc.Decode(m.Subject, m.Data, vPtr)
- }
- return err
-}
-
-// Handler is a specific callback used for Subscribe. It is generalized to
-// an any, but we will discover its format and arguments at runtime
-// and perform the correct callback, including demarshaling encoded data
-// back into the appropriate struct based on the signature of the Handler.
-//
-// Handlers are expected to have one of four signatures.
-//
-// type person struct {
-// Name string `json:"name,omitempty"`
-// Age uint `json:"age,omitempty"`
-// }
-//
-// handler := func(m *Msg)
-// handler := func(p *person)
-// handler := func(subject string, o *obj)
-// handler := func(subject, reply string, o *obj)
-//
-// These forms allow a callback to request a raw Msg ptr, where the processing
-// of the message from the wire is untouched. Process a JSON representation
-// and demarshal it into the given struct, e.g. person.
-// There are also variants where the callback wants either the subject, or the
-// subject and the reply subject.
-type Handler any
-
-// Dissect the cb Handler's signature
-func argInfo(cb Handler) (reflect.Type, int) {
- cbType := reflect.TypeOf(cb)
- if cbType.Kind() != reflect.Func {
- panic("nats: Handler needs to be a func")
- }
- numArgs := cbType.NumIn()
- if numArgs == 0 {
- return nil, numArgs
- }
- return cbType.In(numArgs - 1), numArgs
-}
-
-var emptyMsgType = reflect.TypeOf(&Msg{})
-
-// Subscribe will create a subscription on the given subject and process incoming
-// messages using the specified Handler. The Handler should be a func that matches
-// a signature from the description of Handler from above.
-func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) {
- return c.subscribe(subject, _EMPTY_, cb)
-}
-
-// QueueSubscribe will create a queue subscription on the given subject and process
-// incoming messages using the specified Handler. The Handler should be a func that
-// matches a signature from the description of Handler from above.
-func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) {
- return c.subscribe(subject, queue, cb)
-}
-
-// Internal implementation that all public functions will use.
-func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) {
- if cb == nil {
- return nil, errors.New("nats: Handler required for EncodedConn Subscription")
- }
- argType, numArgs := argInfo(cb)
- if argType == nil {
- return nil, errors.New("nats: Handler requires at least one argument")
- }
-
- cbValue := reflect.ValueOf(cb)
- wantsRaw := (argType == emptyMsgType)
-
- natsCB := func(m *Msg) {
- var oV []reflect.Value
- if wantsRaw {
- oV = []reflect.Value{reflect.ValueOf(m)}
- } else {
- var oPtr reflect.Value
- if argType.Kind() != reflect.Ptr {
- oPtr = reflect.New(argType)
- } else {
- oPtr = reflect.New(argType.Elem())
- }
- if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
- if c.Conn.Opts.AsyncErrorCB != nil {
- c.Conn.ach.push(func() {
- c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error()))
- })
- }
- return
- }
- if argType.Kind() != reflect.Ptr {
- oPtr = reflect.Indirect(oPtr)
- }
-
- // Callback Arity
- switch numArgs {
- case 1:
- oV = []reflect.Value{oPtr}
- case 2:
- subV := reflect.ValueOf(m.Subject)
- oV = []reflect.Value{subV, oPtr}
- case 3:
- subV := reflect.ValueOf(m.Subject)
- replyV := reflect.ValueOf(m.Reply)
- oV = []reflect.Value{subV, replyV, oPtr}
- }
-
- }
- cbValue.Call(oV)
- }
-
- return c.Conn.subscribe(subject, queue, natsCB, nil, false, nil)
-}
-
-// FlushTimeout allows a Flush operation to have an associated timeout.
-func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) {
- return c.Conn.FlushTimeout(timeout)
-}
-
-// Flush will perform a round trip to the server and return when it
-// receives the internal reply.
-func (c *EncodedConn) Flush() error {
- return c.Conn.Flush()
-}
-
-// Close will close the connection to the server. This call will release
-// all blocking calls, such as Flush(), etc.
-func (c *EncodedConn) Close() {
- c.Conn.Close()
-}
-
-// Drain will put a connection into a drain state. All subscriptions will
-// immediately be put into a drain state. Upon completion, the publishers
-// will be drained and can not publish any additional messages. Upon draining
-// of the publishers, the connection will be closed. Use the ClosedCB()
-// option to know when the connection has moved from draining to closed.
-func (c *EncodedConn) Drain() error {
- return c.Conn.Drain()
-}
-
-// LastError reports the last error encountered via the Connection.
-func (c *EncodedConn) LastError() error {
- return c.Conn.LastError()
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go
deleted file mode 100644
index c1d0f6f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2012-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package builtin
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strconv"
- "unsafe"
-)
-
-// DefaultEncoder implementation for EncodedConn.
-// This encoder will leave []byte and string untouched, but will attempt to
-// turn numbers into appropriate strings that can be decoded. It will also
-// properly encoded and decode bools. If will encode a struct, but if you want
-// to properly handle structures you should use JsonEncoder.
-type DefaultEncoder struct {
- // Empty
-}
-
-var trueB = []byte("true")
-var falseB = []byte("false")
-var nilB = []byte("")
-
-// Encode
-func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) {
- switch arg := v.(type) {
- case string:
- bytes := *(*[]byte)(unsafe.Pointer(&arg))
- return bytes, nil
- case []byte:
- return arg, nil
- case bool:
- if arg {
- return trueB, nil
- } else {
- return falseB, nil
- }
- case nil:
- return nilB, nil
- default:
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "%+v", arg)
- return buf.Bytes(), nil
- }
-}
-
-// Decode
-func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr any) error {
- // Figure out what it's pointing to...
- sData := *(*string)(unsafe.Pointer(&data))
- switch arg := vPtr.(type) {
- case *string:
- *arg = sData
- return nil
- case *[]byte:
- *arg = data
- return nil
- case *int:
- n, err := strconv.ParseInt(sData, 10, 64)
- if err != nil {
- return err
- }
- *arg = int(n)
- return nil
- case *int32:
- n, err := strconv.ParseInt(sData, 10, 64)
- if err != nil {
- return err
- }
- *arg = int32(n)
- return nil
- case *int64:
- n, err := strconv.ParseInt(sData, 10, 64)
- if err != nil {
- return err
- }
- *arg = int64(n)
- return nil
- case *float32:
- n, err := strconv.ParseFloat(sData, 32)
- if err != nil {
- return err
- }
- *arg = float32(n)
- return nil
- case *float64:
- n, err := strconv.ParseFloat(sData, 64)
- if err != nil {
- return err
- }
- *arg = float64(n)
- return nil
- case *bool:
- b, err := strconv.ParseBool(sData)
- if err != nil {
- return err
- }
- *arg = b
- return nil
- default:
- vt := reflect.TypeOf(arg).Elem()
- return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt)
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go
deleted file mode 100644
index 7ecf85e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2013-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package builtin
-
-import (
- "bytes"
- "encoding/gob"
-)
-
-// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn.
-// This encoder will use the builtin encoding/gob to Marshal
-// and Unmarshal most types, including structs.
-type GobEncoder struct {
- // Empty
-}
-
-// FIXME(dlc) - This could probably be more efficient.
-
-// Encode
-func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) {
- b := new(bytes.Buffer)
- enc := gob.NewEncoder(b)
- if err := enc.Encode(v); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-// Decode
-func (ge *GobEncoder) Decode(subject string, data []byte, vPtr any) (err error) {
- dec := gob.NewDecoder(bytes.NewBuffer(data))
- err = dec.Decode(vPtr)
- return
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go
deleted file mode 100644
index 0540d98..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package builtin
-
-import (
- "encoding/json"
- "strings"
-)
-
-// JsonEncoder is a JSON Encoder implementation for EncodedConn.
-// This encoder will use the builtin encoding/json to Marshal
-// and Unmarshal most types, including structs.
-type JsonEncoder struct {
- // Empty
-}
-
-// Encode
-func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) {
- b, err := json.Marshal(v)
- if err != nil {
- return nil, err
- }
- return b, nil
-}
-
-// Decode
-func (je *JsonEncoder) Decode(subject string, data []byte, vPtr any) (err error) {
- switch arg := vPtr.(type) {
- case *string:
- // If they want a string and it is a JSON string, strip quotes
- // This allows someone to send a struct but receive as a plain string
- // This cast should be efficient for Go 1.3 and beyond.
- str := string(data)
- if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) {
- *arg = str[1 : len(str)-1]
- } else {
- *arg = str
- }
- case *[]byte:
- *arg = data
- default:
- err = json.Unmarshal(data, arg)
- }
- return
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/go_test.mod b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/go_test.mod
deleted file mode 100644
index 5dfd112..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/go_test.mod
+++ /dev/null
@@ -1,23 +0,0 @@
-module github.com/nats-io/nats.go
-
-go 1.19
-
-require (
- github.com/golang/protobuf v1.4.2
- github.com/klauspost/compress v1.17.6
- github.com/nats-io/jwt v1.2.2
- github.com/nats-io/nats-server/v2 v2.10.11
- github.com/nats-io/nkeys v0.4.7
- github.com/nats-io/nuid v1.0.1
- go.uber.org/goleak v1.3.0
- golang.org/x/text v0.14.0
- google.golang.org/protobuf v1.23.0
-)
-
-require (
- github.com/minio/highwayhash v1.0.2 // indirect
- github.com/nats-io/jwt/v2 v2.5.3 // indirect
- golang.org/x/crypto v0.19.0 // indirect
- golang.org/x/sys v0.17.0 // indirect
- golang.org/x/time v0.5.0 // indirect
-)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/go_test.sum b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/go_test.sum
deleted file mode 100644
index d28f0f6..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/go_test.sum
+++ /dev/null
@@ -1,56 +0,0 @@
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
-github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
-github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
-github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
-github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU=
-github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
-github.com/nats-io/jwt/v2 v2.5.3 h1:/9SWvzc6hTfamcgXJ3uYRpgj+QuY2aLNqRiqrKcrpEo=
-github.com/nats-io/jwt/v2 v2.5.3/go.mod h1:iysuPemFcc7p4IoYots3IuELSI4EDe9Y0bQMe+I3Bf4=
-github.com/nats-io/nats-server/v2 v2.10.11 h1:yKUiLVincZISpo3A4YljJQ+HfLltGAgoNNJl99KL8I0=
-github.com/nats-io/nats-server/v2 v2.10.11/go.mod h1:dXtOqVWzbMTEj+tUyC/itXjJhW37xh0tUBrTAlqAfx8=
-github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
-github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
-github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
-github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
-go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
-go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
-golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/internal/parser/parse.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/internal/parser/parse.go
deleted file mode 100644
index 7eab8ad..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/internal/parser/parse.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2020-2022 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parser
-
-import (
- "errors"
- "fmt"
-)
-
-const (
- AckDomainTokenPos = iota + 2
- AckAccHashTokenPos
- AckStreamTokenPos
- AckConsumerTokenPos
- AckNumDeliveredTokenPos
- AckStreamSeqTokenPos
- AckConsumerSeqTokenPos
- AckTimestampSeqTokenPos
- AckNumPendingTokenPos
-)
-
-var ErrInvalidSubjectFormat = errors.New("invalid format of ACK subject")
-
-// Quick parser for positive numbers in ack reply encoding.
-// NOTE: This parser does not detect uint64 overflow
-func ParseNum(d string) (n uint64) {
- if len(d) == 0 {
- return 0
- }
-
- // ASCII numbers 0-9
- const (
- asciiZero = 48
- asciiNine = 57
- )
-
- for _, dec := range d {
- if dec < asciiZero || dec > asciiNine {
- return 0
- }
- n = n*10 + uint64(dec) - asciiZero
- }
- return
-}
-
-func GetMetadataFields(subject string) ([]string, error) {
- v1TokenCounts, v2TokenCounts := 9, 12
-
- var start int
- tokens := make([]string, 0, v2TokenCounts)
- for i := 0; i < len(subject); i++ {
- if subject[i] == '.' {
- tokens = append(tokens, subject[start:i])
- start = i + 1
- }
- }
- tokens = append(tokens, subject[start:])
- //
- // Newer server will include the domain name and account hash in the subject,
- // and a token at the end.
- //
- // Old subject was:
- // $JS.ACK.......
- //
- // New subject would be:
- // $JS.ACK..........
- //
- // v1 has 9 tokens, v2 has 12, but we must not be strict on the 12th since
- // it may be removed in the future. Also, the library has no use for it.
- // The point is that a v2 ACK subject is valid if it has at least 11 tokens.
- //
- tokensLen := len(tokens)
- // If lower than 9 or more than 9 but less than 11, report an error
- if tokensLen < v1TokenCounts || (tokensLen > v1TokenCounts && tokensLen < v2TokenCounts-1) {
- return nil, ErrInvalidSubjectFormat
- }
- if tokens[0] != "$JS" || tokens[1] != "ACK" {
- return nil, fmt.Errorf("%w: subject should start with $JS.ACK", ErrInvalidSubjectFormat)
- }
- // For v1 style, we insert 2 empty tokens (domain and hash) so that the
- // rest of the library references known fields at a constant location.
- if tokensLen == v1TokenCounts {
- // Extend the array (we know the backend is big enough)
- tokens = append(tokens[:AckDomainTokenPos+2], tokens[AckDomainTokenPos:]...)
- // Clear the domain and hash tokens
- tokens[AckDomainTokenPos], tokens[AckAccHashTokenPos] = "", ""
-
- } else if tokens[AckDomainTokenPos] == "_" {
- // If domain is "_", replace with empty value.
- tokens[AckDomainTokenPos] = ""
- }
- return tokens, nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/README.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/README.md
deleted file mode 100644
index 75b46c7..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/README.md
+++ /dev/null
@@ -1,1014 +0,0 @@
-# JetStream Simplified Client
-
-This doc covers the basic usage of the `jetstream` package in `nats.go` client.
-
-- [JetStream Simplified Client](#jetstream-simplified-client)
- - [Overview](#overview)
- - [Basic usage](#basic-usage)
- - [Streams](#streams)
- - [Stream management (CRUD)](#stream-management-crud)
- - [Listing streams and stream names](#listing-streams-and-stream-names)
- - [Stream-specific operations](#stream-specific-operations)
- - [Consumers](#consumers)
- - [Consumers management](#consumers-management)
- - [Listing consumers and consumer
- names](#listing-consumers-and-consumer-names)
- - [Ordered consumers](#ordered-consumers)
- - [Receiving messages from the
- consumer](#receiving-messages-from-the-consumer)
- - [Single fetch](#single-fetch)
- - [Continuous polling](#continuous-polling)
- - [Using `Consume()` receive messages in a
- callback](#using-consume-receive-messages-in-a-callback)
- - [Using `Messages()` to iterate over incoming
- messages](#using-messages-to-iterate-over-incoming-messages)
- - [Publishing on stream](#publishing-on-stream)
- - [Synchronous publish](#synchronous-publish)
- - [Async publish](#async-publish)
- - [KeyValue Store](#keyvalue-store)
- - [Basic usage of KV bucket](#basic-usage-of-kv-bucket)
- - [Watching for changes on a bucket](#watching-for-changes-on-a-bucket)
- - [Additional operations on a bucket](#additional-operations-on-a-bucket)
- - [Object Store](#object-store)
- - [Basic usage of Object Store](#basic-usage-of-object-store)
- - [Watching for changes on a store](#watching-for-changes-on-a-store)
- - [Additional operations on a store](#additional-operations-on-a-store)
- - [Examples](#examples)
-
-## Overview
-
-`jetstream` package is a new client API to interact with NATS JetStream, aiming
-to replace the JetStream client implementation from `nats` package. The main
-goal of this package is to provide a simple and clear way to interact with
-JetStream API. Key differences between `jetstream` and `nats` packages include:
-
-- Using smaller, simpler interfaces to manage streams and consumers
-- Using more granular and predictable approach to consuming messages from a
- stream, instead of relying on often complicated and unpredictable
- `Subscribe()` method (and all of its flavors)
-- Allowing the usage of pull consumers to continuously receive incoming messages
- (including ordered consumer functionality)
-- Separating JetStream context from core NATS
-
-`jetstream` package provides several ways of interacting with the API:
-
-- `JetStream` - top-level interface, used to create and manage streams,
- consumers and publishing messages
-- `Stream` - used to manage consumers for a specific stream, as well as
- performing stream-specific operations (purging, fetching and deleting messages
- by sequence number, fetching stream info)
-- `Consumer` - used to get information about a consumer as well as consuming
- messages
-- `Msg` - used for message-specific operations - reading data, headers and
- metadata, as well as performing various types of acknowledgements
-
-Additionally, `jetstream` exposes [KeyValue Store](#keyvalue-store) and
-[ObjectStore](#object-store) capabilities. KV and Object stores are abstraction
-layers on top of JetStream Streams, simplifying key value and large data
-storage on Streams.
-
-> __NOTE__: `jetstream` requires nats-server >= 2.9.0 to work correctly.
-
-## Basic usage
-
-```go
-package main
-
-import (
- "context"
- "fmt"
- "strconv"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nats.go/jetstream"
-)
-
-func main() {
- // In the `jetstream` package, almost all API calls rely on `context.Context` for timeout/cancellation handling
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- defer cancel()
- nc, _ := nats.Connect(nats.DefaultURL)
-
- // Create a JetStream management interface
- js, _ := jetstream.New(nc)
-
- // Create a stream
- s, _ := js.CreateStream(ctx, jetstream.StreamConfig{
- Name: "ORDERS",
- Subjects: []string{"ORDERS.*"},
- })
-
- // Publish some messages
- for i := 0; i < 100; i++ {
- js.Publish(ctx, "ORDERS.new", []byte("hello message "+strconv.Itoa(i)))
- fmt.Printf("Published hello message %d\n", i)
- }
-
- // Create durable consumer
- c, _ := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{
- Durable: "CONS",
- AckPolicy: jetstream.AckExplicitPolicy,
- })
-
- // Get 10 messages from the consumer
- messageCounter := 0
- msgs, err := c.Fetch(10)
- if err != nil {
- // handle error
- }
-
- for msg := range msgs.Messages() {
- msg.Ack()
- fmt.Printf("Received a JetStream message via fetch: %s\n", string(msg.Data()))
- messageCounter++
- }
-
- fmt.Printf("received %d messages\n", messageCounter)
-
- if msgs.Error() != nil {
- fmt.Println("Error during Fetch(): ", msgs.Error())
- }
-
- // Receive messages continuously in a callback
- cons, _ := c.Consume(func(msg jetstream.Msg) {
- msg.Ack()
- fmt.Printf("Received a JetStream message via callback: %s\n", string(msg.Data()))
- messageCounter++
- })
- defer cons.Stop()
-
- // Iterate over messages continuously
- it, _ := c.Messages()
- for i := 0; i < 10; i++ {
- msg, _ := it.Next()
- msg.Ack()
- fmt.Printf("Received a JetStream message via iterator: %s\n", string(msg.Data()))
- messageCounter++
- }
- it.Stop()
-
- // block until all 100 published messages have been processed
- for messageCounter < 100 {
- time.Sleep(10 * time.Millisecond)
- }
-}
-```
-
-## Streams
-
-`jetstream` provides methods to manage and list streams, as well as perform
-stream-specific operations (purging, fetching/deleting messages by sequence id)
-
-### Stream management (CRUD)
-
-```go
-js, _ := jetstream.New(nc)
-
-// create a stream (this is an idempotent operation)
-s, _ := js.CreateStream(ctx, jetstream.StreamConfig{
- Name: "ORDERS",
- Subjects: []string{"ORDERS.*"},
-})
-
-// update a stream
-s, _ = js.UpdateStream(ctx, jetstream.StreamConfig{
- Name: "ORDERS",
- Subjects: []string{"ORDERS.*"},
- Description: "updated stream",
-})
-
-// get stream handle
-s, _ = js.Stream(ctx, "ORDERS")
-
-// delete a stream
-js.DeleteStream(ctx, "ORDERS")
-```
-
-### Listing streams and stream names
-
-```go
-// list streams
-streams := js.ListStreams(ctx)
-for s := range streams.Info() {
- fmt.Println(s.Config.Name)
-}
-if streams.Err() != nil {
- fmt.Println("Unexpected error occurred")
-}
-
-// list stream names
-names := js.StreamNames(ctx)
-for name := range names.Name() {
- fmt.Println(name)
-}
-if names.Err() != nil {
- fmt.Println("Unexpected error occurred")
-}
-```
-
-### Stream-specific operations
-
-Using `Stream` interface, it is also possible to:
-
-- Purge a stream
-
-```go
-// remove all messages from a stream
-_ = s.Purge(ctx)
-
-// remove all messages from a stream that are stored on a specific subject
-_ = s.Purge(ctx, jetstream.WithPurgeSubject("ORDERS.new"))
-
-// remove all messages up to specified sequence number
-_ = s.Purge(ctx, jetstream.WithPurgeSequence(100))
-
-// remove messages, but keep 10 newest
-_ = s.Purge(ctx, jetstream.WithPurgeKeep(10))
-```
-
-- Get and messages from stream
-
-```go
-// get message from stream with sequence number == 100
-msg, _ := s.GetMsg(ctx, 100)
-
-// get last message from "ORDERS.new" subject
-msg, _ = s.GetLastMsgForSubject(ctx, "ORDERS.new")
-
-// delete a message with sequence number == 100
-_ = s.DeleteMsg(ctx, 100)
-```
-
-- Get information about a stream
-
-```go
-// Fetches latest stream info from server
-info, _ := s.Info(ctx)
-fmt.Println(info.Config.Name)
-
-// Returns the most recently fetched StreamInfo, without making an API call to the server
-cachedInfo := s.CachedInfo()
-fmt.Println(cachedInfo.Config.Name)
-```
-
-## Consumers
-
-Only pull consumers are supported in `jetstream` package. However, unlike the
-JetStream API in `nats` package, pull consumers allow for continuous message
-retrieval (similarly to how `nats.Subscribe()` works). Because of that, push
-consumers can be easily replaced by pull consumers for most of the use cases.
-
-### Consumers management
-
-CRUD operations on consumers can be achieved on 2 levels:
-
-- on `JetStream` interface
-
-```go
-js, _ := jetstream.New(nc)
-
-// create a consumer (this is an idempotent operation)
-// an error will be returned if consumer already exists and has different configuration.
-cons, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{
- Durable: "foo",
- AckPolicy: jetstream.AckExplicitPolicy,
-})
-
-// create an ephemeral pull consumer by not providing `Durable`
-ephemeral, _ := js.CreateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{
- AckPolicy: jetstream.AckExplicitPolicy,
-})
-
-
-// consumer can also be created using CreateOrUpdateConsumer
-// this method will either create a consumer if it does not exist
-// or update existing consumer (if possible)
-cons2 := js.CreateOrUpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{
- Name: "bar",
-})
-
-// consumers can be updated
-// an error will be returned if consumer with given name does not exist
-// or an illegal property is to be updated (e.g. AckPolicy)
-updated, _ := js.UpdateConsumer(ctx, "ORDERS", jetstream.ConsumerConfig{
- AckPolicy: jetstream.AckExplicitPolicy,
- Description: "updated consumer"
-})
-
-// get consumer handle
-cons, _ = js.Consumer(ctx, "ORDERS", "foo")
-
-// delete a consumer
-js.DeleteConsumer(ctx, "ORDERS", "foo")
-```
-
-- on `Stream` interface
-
-```go
-// Create a JetStream management interface
-js, _ := jetstream.New(nc)
-
-// get stream handle
-stream, _ := js.Stream(ctx, "ORDERS")
-
-// create consumer
-cons, _ := stream.CreateConsumer(ctx, jetstream.ConsumerConfig{
- Durable: "foo",
- AckPolicy: jetstream.AckExplicitPolicy,
-})
-
-// get consumer handle
-cons, _ = stream.Consumer(ctx, "ORDERS", "foo")
-
-// delete a consumer
-stream.DeleteConsumer(ctx, "foo")
-```
-
-`Consumer` interface, returned when creating/fetching consumers, allows fetching
-`ConsumerInfo`:
-
-```go
-// Fetches latest consumer info from server
-info, _ := cons.Info(ctx)
-fmt.Println(info.Config.Durable)
-
-// Returns the most recently fetched ConsumerInfo, without making an API call to the server
-cachedInfo := cons.CachedInfo()
-fmt.Println(cachedInfo.Config.Durable)
-```
-
-### Listing consumers and consumer names
-
-```go
-// list consumers
-consumers := s.ListConsumers(ctx)
-for cons := range consumers.Info() {
- fmt.Println(cons.Name)
-}
-if consumers.Err() != nil {
- fmt.Println("Unexpected error occurred")
-}
-
-// list consumer names
-names := s.ConsumerNames(ctx)
-for name := range names.Name() {
- fmt.Println(name)
-}
-if names.Err() != nil {
- fmt.Println("Unexpected error occurred")
-}
-```
-
-### Ordered consumers
-
-`jetstream`, in addition to basic named/ephemeral consumers, supports ordered
-consumer functionality. Ordered is strictly processing messages in the order
-that they were stored on the stream, providing a consistent and deterministic
-message ordering. It is also resilient to consumer deletion.
-
-Ordered consumers present the same set of message consumption methods as
-standard pull consumers.
-
-```go
-js, _ := jetstream.New(nc)
-
-// create a consumer (this is an idempotent operation)
-cons, _ := js.OrderedConsumer(ctx, "ORDERS", jetstream.OrderedConsumerConfig{
- // Filter results from "ORDERS" stream by specific subject
- FilterSubjects: []{"ORDERS.A"},
-})
-```
-
-### Receiving messages from the consumer
-
-The `Consumer` interface covers allows fetching messages on demand, with
-pre-defined batch size on bytes limit, or continuous push-like receiving of
-messages.
-
-#### __Single fetch__
-
-This pattern pattern allows fetching a defined number of messages in a single
-RPC.
-
-- Using `Fetch` or `FetchBytes`, consumer will return up to the provided number
-of messages/bytes. By default, `Fetch()` will wait 30 seconds before timing out
-(this behavior can be configured using `FetchMaxWait()` option):
-
-```go
-// receive up to 10 messages from the stream
-msgs, err := c.Fetch(10)
-if err != nil {
- // handle error
-}
-
-for msg := range msgs.Messages() {
- fmt.Printf("Received a JetStream message: %s\n", string(msg.Data()))
-}
-
-if msgs.Error() != nil {
- // handle error
-}
-
-// receive up to 1024 B of data
-msgs, err := c.FetchBytes(1024)
-if err != nil {
-// handle error
-}
-
-for msg := range msgs.Messages() {
- fmt.Printf("Received a JetStream message: %s\n", string(msg.Data()))
-}
-
-if msgs.Error() != nil {
- // handle error
-}
-```
-
-Similarly, `FetchNoWait()` can be used in order to only return messages from the
-stream available at the time of sending request:
-
-```go
-// FetchNoWait will not wait for new messages if the whole batch is not available at the time of sending request.
-msgs, err := c.FetchNoWait(10)
-if err != nil {
-// handle error
-}
-
-for msg := range msgs.Messages() {
- fmt.Printf("Received a JetStream message: %s\n", string(msg.Data()))
-}
-
-if msgs.Error() != nil {
- // handle error
-}
-```
-
-> __Warning__: Both `Fetch()` and `FetchNoWait()` have worse performance when
-> used to continuously retrieve messages in comparison to `Messages()` or
-`Consume()` methods, as they do not perform any optimizations (pre-buffering)
-and new subscription is created for each execution.
-
-#### Continuous polling
-
-There are 2 ways to achieve push-like behavior using pull consumers in
-`jetstream` package. Both `Messages()` and `Consume()` methods perform similar optimizations
-and for most cases can be used interchangeably.
-
-There is an advantage of using `Messages()` instead of `Consume()` for work-queue scenarios,
-where messages should be fetched one by one, as it allows for finer control over fetching
-single messages on demand.
-
-Subject filtering is achieved by configuring a consumer with a `FilterSubject`
-value.
-
-##### Using `Consume()` receive messages in a callback
-
-```go
-cons, _ := js.CreateOrUpdateConsumer("ORDERS", jetstream.ConsumerConfig{
- AckPolicy: jetstream.AckExplicitPolicy,
- // receive messages from ORDERS.A subject only
- FilterSubject: "ORDERS.A"
-}))
-
-consContext, _ := c.Consume(func(msg jetstream.Msg) {
- fmt.Printf("Received a JetStream message: %s\n", string(msg.Data()))
-})
-defer consContext.Stop()
-```
-
-Similarly to `Messages()`, `Consume()` can be supplied with options to modify
-the behavior of a single pull request:
-
-- `PullMaxMessages(int)` - up to provided number of messages will be buffered
-- `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This
-setting and `PullMaxMessages` are mutually exclusive
-- `PullExpiry(time.Duration)` - timeout on a single pull request to the server
-type PullThresholdMessages int
-- `PullThresholdMessages(int)` - amount of messages which triggers refilling the
- buffer
-- `PullThresholdBytes(int)` - amount of bytes which triggers refilling the
- buffer
-- `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull
-request. An error will be triggered if at least 2 heartbeats are missed
-- `WithConsumeErrHandler(func (ConsumeContext, error))` - when used, sets a
- custom error handler on `Consume()`, allowing e.g. tracking missing
- heartbeats.
-
-> __NOTE__: `Stop()` should always be called on `ConsumeContext` to avoid
-> leaking goroutines.
-
-##### Using `Messages()` to iterate over incoming messages
-
-```go
-iter, _ := cons.Messages()
-for {
- msg, err := iter.Next()
- // Next can return error, e.g. when iterator is closed or no heartbeats were received
- if err != nil {
- //handle error
- }
- fmt.Printf("Received a JetStream message: %s\n", string(msg.Data()))
- msg.Ack()
-}
-iter.Stop()
-```
-
-It can also be configured to only store up to defined number of messages/bytes
-in the buffer.
-
-```go
-// a maximum of 10 messages or 1024 bytes will be stored in memory (whichever is encountered first)
-iter, _ := cons.Messages(jetstream.PullMaxMessages(10), jetstream.PullMaxBytes(1024))
-```
-
-`Messages()` exposes the following options:
-
-- `PullMaxMessages(int)` - up to provided number of messages will be buffered
-- `PullMaxBytes(int)` - up to provided number of bytes will be buffered. This
-setting and `PullMaxMessages` are mutually exclusive
-- `PullExpiry(time.Duration)` - timeout on a single pull request to the server
-type PullThresholdMessages int
-- `PullThresholdMessages(int)` - amount of messages which triggers refilling the
- buffer
-- `PullThresholdBytes(int)` - amount of bytes which triggers refilling the
- buffer
-- `PullHeartbeat(time.Duration)` - idle heartbeat duration for a single pull
-request. An error will be triggered if at least 2 heartbeats are missed (unless
-`WithMessagesErrOnMissingHeartbeat(false)` is used)
-
-##### Using `Messages()` to fetch single messages one by one
-
-When implementing work queue, it is possible to use `Messages()` in order to
-fetch messages from the server one-by-one, without optimizations and
-pre-buffering (to avoid redeliveries when processing messages at slow rate).
-
-```go
-// PullMaxMessages determines how many messages will be sent to the client in a single pull request
-iter, _ := cons.Messages(jetstream.PullMaxMessages(1))
-numWorkers := 5
-sem := make(chan struct{}, numWorkers)
-for {
- sem <- struct{}{}
- go func() {
- defer func() {
- <-sem
- }()
- msg, err := iter.Next()
- if err != nil {
- // handle err
- }
- fmt.Printf("Processing msg: %s\n", string(msg.Data()))
- doWork()
- msg.Ack()
- }()
-}
-```
-
-## Publishing on stream
-
-`JetStream` interface allows publishing messages on stream in 2 ways:
-
-### __Synchronous publish__
-
-```go
-js, _ := jetstream.New(nc)
-
-// Publish message on subject ORDERS.new
-// Given subject has to belong to a stream
-ack, err := js.PublishMsg(ctx, &nats.Msg{
- Data: []byte("hello"),
- Subject: "ORDERS.new",
-})
-fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream)
-
-// A helper method accepting subject and data as parameters
-ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello"))
-```
-
-Both `Publish()` and `PublishMsg()` can be supplied with options allowing
-setting various headers. Additionally, for `PublishMsg()` headers can be set
-directly on `nats.Msg`.
-
-```go
-// All 3 implementations are work identically
-ack, err := js.PublishMsg(ctx, &nats.Msg{
- Data: []byte("hello"),
- Subject: "ORDERS.new",
- Header: nats.Header{
- "Nats-Msg-Id": []string{"id"},
- },
-})
-
-ack, err = js.PublishMsg(ctx, &nats.Msg{
- Data: []byte("hello"),
- Subject: "ORDERS.new",
-}, jetstream.WithMsgID("id"))
-
-ack, err = js.Publish(ctx, "ORDERS.new", []byte("hello"), jetstream.WithMsgID("id"))
-```
-
-### __Async publish__
-
-```go
-js, _ := jetstream.New(nc)
-
-// publish message and do not wait for ack
-ackF, err := js.PublishMsgAsync(ctx, &nats.Msg{
- Data: []byte("hello"),
- Subject: "ORDERS.new",
-})
-
-// block and wait for ack
-select {
-case ack := <-ackF.Ok():
- fmt.Printf("Published msg with sequence number %d on stream %q", ack.Sequence, ack.Stream)
-case err := <-ackF.Err():
- fmt.Println(err)
-}
-
-// similarly to synchronous publish, there is a helper method accepting subject and data
-ackF, err = js.PublishAsync("ORDERS.new", []byte("hello"))
-```
-
-Just as for synchronous publish, `PublishAsync()` and `PublishMsgAsync()` accept
-options for setting headers.
-
-## KeyValue Store
-
-JetStream KeyValue Stores offer a straightforward method for storing key-value
-pairs within JetStream. These stores are supported by a specially configured
-stream, designed to efficiently and compactly store these pairs. This structure
-ensures rapid and convenient access to the data.
-
-The KV Store, also known as a bucket, enables the execution of various operations:
-
-- create/update a value for a given key
-- get a value for a given key
-- delete a value for a given key
-- purge all values from a bucket
-- list all keys in a bucket
-- watch for changes on given key set or the whole bucket
-- retrieve history of changes for a given key
-
-### Basic usage of KV bucket
-
-The most basic usage of KV bucket is to create or retrieve a bucket and perform
-basic CRUD operations on keys.
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-
-// Create a new bucket. Bucket name is required and has to be unique within a JetStream account.
-kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"})
-
-// Set a value for a given key
-// Put will either create or update a value for a given key
-kv.Put(ctx, "sue.color", []byte("blue"))
-
-// Get an entry for a given key
-// Entry contains key/value, but also metadata (revision, timestamp, etc.))
-entry, _ := kv.Get(ctx, "sue.color")
-
-// Prints `sue.color @ 1 -> "blue"`
-fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value()))
-
-// Update a value for a given key
-// Update will fail if the key does not exist or the revision has changed
-kv.Update(ctx, "sue.color", []byte("red"), 1)
-
-// Create will fail if the key already exists
-_, err := kv.Create(ctx, "sue.color", []byte("purple"))
-fmt.Println(err) // prints `nats: key exists`
-
-// Delete a value for a given key.
-// Delete is not destructive, it will add a delete marker for a given key
-// and all previous revisions will still be available
-kv.Delete(ctx, "sue.color")
-
-// getting a deleted key will return an error
-_, err = kv.Get(ctx, "sue.color")
-fmt.Println(err) // prints `nats: key not found`
-
-// A bucket can be deleted once it is no longer needed
-js.DeleteKeyValue(ctx, "profiles")
-```
-
-### Watching for changes on a bucket
-
-KV buckets support Watchers, which can be used to watch for changes on a given
-key or the whole bucket. Watcher will receive a notification on a channel when a
-change occurs. By default, watcher will return initial values for all matching
-keys. After sending all initial values, watcher will send nil on the channel to
-signal that all initial values have been sent and it will start sending updates when
-changes occur.
-
-Watcher supports several configuration options:
-
-- `IncludeHistory` will have the key watcher send all historical values
-for each key (up to KeyValueMaxHistory).
-- `IgnoreDeletes` will have the key watcher not pass any keys with
-delete markers.
-- `UpdatesOnly` will have the key watcher only pass updates on values
-(without values already present when starting).
-- `MetaOnly` will have the key watcher retrieve only the entry metadata, not the entry value.
-- `ResumeFromRevision` instructs the key watcher to resume from a
-specific revision number.
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"})
-
-kv.Put(ctx, "sue.color", []byte("blue"))
-
-// A watcher can be created to watch for changes on a given key or the whole bucket
-// By default, watcher will return most recent values for all matching keys.
-// Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option.
-watcher, _ := kv.Watch(ctx, "sue.*")
-defer watcher.Stop()
-
-kv.Put(ctx, "sue.age", []byte("43"))
-kv.Put(ctx, "sue.color", []byte("red"))
-
-// First, the watcher sends most recent values for all matching keys.
-// In this case, it will send a single entry for `sue.color`.
-entry := <-watcher.Updates()
-// Prints `sue.color @ 1 -> "blue"`
-fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value()))
-
-// After all current values have been sent, watcher will send nil on the channel.
-entry = <-watcher.Updates()
-if entry != nil {
- fmt.Println("Unexpected entry received")
-}
-
-// After that, watcher will send updates when changes occur
-// In this case, it will send an entry for `sue.color` and `sue.age`.
-
-entry = <-watcher.Updates()
-// Prints `sue.age @ 2 -> "43"`
-fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value()))
-
-entry = <-watcher.Updates()
-// Prints `sue.color @ 3 -> "red"`
-fmt.Printf("%s @ %d -> %q\n", entry.Key(), entry.Revision(), string(entry.Value()))
-```
-
-### Additional operations on a bucket
-
-In addition to basic CRUD operations and watching for changes, KV buckets
-support several additional operations:
-
-- `ListKeys` will return all keys in a bucket"
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"})
-
-kv.Put(ctx, "sue.color", []byte("blue"))
-kv.Put(ctx, "sue.age", []byte("43"))
-kv.Put(ctx, "bucket", []byte("profiles"))
-
-keys, _ := kv.ListKeys(ctx)
-
-// Prints all 3 keys
-for key := range keys.Keys() {
- fmt.Println(key)
-}
-```
-
-- `Purge` and `PurgeDeletes` for removing all keys from a bucket
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"})
-
-kv.Put(ctx, "sue.color", []byte("blue"))
-kv.Put(ctx, "sue.age", []byte("43"))
-kv.Put(ctx, "bucket", []byte("profiles"))
-
-// Purge will remove all keys from a bucket.
-// The latest revision of each key will be kept
-// with a delete marker, all previous revisions will be removed
-// permanently.
-kv.Purge(ctx)
-
-// PurgeDeletes will remove all keys from a bucket
-// with a delete marker.
-kv.PurgeDeletes(ctx)
-```
-
-- `Status` will return the current status of a bucket
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-kv, _ := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{Bucket: "profiles"})
-
-kv.Put(ctx, "sue.color", []byte("blue"))
-kv.Put(ctx, "sue.age", []byte("43"))
-kv.Put(ctx, "bucket", []byte("profiles"))
-
-status, _ := kv.Status(ctx)
-
-fmt.Println(status.Bucket()) // prints `profiles`
-fmt.Println(status.Values()) // prints `3`
-fmt.Println(status.Bytes()) // prints the size of all values in bytes
-```
-
-## Object Store
-
-JetStream Object Stores offer a straightforward method for storing large objects
-within JetStream. These stores are backed by a specially configured streams,
-designed to efficiently and compactly store these objects.
-
-The Object Store, also known as a bucket, enables the execution of various
-operations:
-
-- create/update an object
-- get an object
-- delete an object
-- list all objects in a bucket
-- watch for changes on objects in a bucket
-- create links to other objects or other buckets
-
-### Basic usage of Object Store
-
-The most basic usage of Object bucket is to create or retrieve a bucket and
-perform basic CRUD operations on objects.
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-
-// Create a new bucket. Bucket name is required and has to be unique within a JetStream account.
-os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"})
-
-config1 := bytes.NewBufferString("first config")
-// Put an object in a bucket. Put expects an object metadata and a reader
-// to read the object data from.
-os.Put(ctx, jetstream.ObjectMeta{Name: "config-1"}, config1)
-
-// Objects can also be created using various helper methods
-
-// 1. As raw strings
-os.PutString(ctx, "config-2", "second config")
-
-// 2. As raw bytes
-os.PutBytes(ctx, "config-3", []byte("third config"))
-
-// 3. As a file
-os.PutFile(ctx, "config-4.txt")
-
-// Get an object
-// Get returns a reader and object info
-// Similar to Put, Get can also be used with helper methods
-// to retrieve object data as a string, bytes or to save it to a file
-object, _ := os.Get(ctx, "config-1")
-data, _ := io.ReadAll(object)
-info, _ := object.Info()
-
-// Prints `configs.config-1 -> "first config"`
-fmt.Printf("%s.%s -> %q\n", info.Bucket, info.Name, string(data))
-
-// Delete an object.
-// Delete will remove object data from stream, but object metadata will be kept
-// with a delete marker.
-os.Delete(ctx, "config-1")
-
-// getting a deleted object will return an error
-_, err := os.Get(ctx, "config-1")
-fmt.Println(err) // prints `nats: object not found`
-
-// A bucket can be deleted once it is no longer needed
-js.DeleteObjectStore(ctx, "configs")
-```
-
-### Watching for changes on a store
-
-Object Stores support Watchers, which can be used to watch for changes on
-objects in a given bucket. Watcher will receive a notification on a channel when
-a change occurs. By default, watcher will return latest information for all
-objects in a bucket. After sending all initial values, watcher will send nil on
-the channel to signal that all initial values have been sent and it will start
-sending updates when changes occur.
-
->__NOTE:__ Watchers do not retrieve values for objects, only metadata (containing
->information such as object name, bucket name, object size etc.). If object data
->is required, `Get` method should be used.
-
-Watcher supports several configuration options:
-
-- `IncludeHistory` will have the watcher send historical updates for each
- object.
-- `IgnoreDeletes` will have the watcher not pass any objects with delete
- markers.
-- `UpdatesOnly` will have the watcher only pass updates on objects (without
- objects already present when starting).
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"})
-
-os.PutString(ctx, "config-1", "first config")
-
-// By default, watcher will return most recent values for all objects in a bucket.
-// Watcher can be configured to only return updates by using jetstream.UpdatesOnly() option.
-watcher, _ := os.Watch(ctx)
-defer watcher.Stop()
-
-// create a second object
-os.PutString(ctx, "config-2", "second config")
-
-// update metadata of the first object
-os.UpdateMeta(ctx, "config-1", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"})
-
-// First, the watcher sends most recent values for all matching objects.
-// In this case, it will send a single entry for `config-1`.
-object := <-watcher.Updates()
-// Prints `configs.config-1 -> ""`
-fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description)
-
-// After all current values have been sent, watcher will send nil on the channel.
-object = <-watcher.Updates()
-if object != nil {
- fmt.Println("Unexpected object received")
-}
-
-// After that, watcher will send updates when changes occur
-// In this case, it will send an entry for `config-2` and `config-1`.
-object = <-watcher.Updates()
-// Prints `configs.config-2 -> ""`
-fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description)
-
-object = <-watcher.Updates()
-// Prints `configs.config-1 -> "updated config"`
-fmt.Printf("%s.%s -> %q\n", object.Bucket, object.Name, object.Description)
-```
-
-### Additional operations on a store
-
-In addition to basic CRUD operations and watching for changes, Object Stores
-support several additional operations:
-
-- `UpdateMeta` for updating object metadata, such as name, description, etc.
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"})
-
-os.PutString(ctx, "config", "data")
-
-// update metadata of the object to e.g. add a description
-os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config", Description: "this is a config"})
-
-// object can be moved under a new name (unless it already exists)
-os.UpdateMeta(ctx, "config", jetstream.ObjectMeta{Name: "config-1", Description: "updated config"})
-```
-
-- `List` for listing information about all objects in a bucket:
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"})
-
-os.PutString(ctx, "config-1", "cfg1")
-os.PutString(ctx, "config-2", "cfg1")
-os.PutString(ctx, "config-3", "cfg1")
-
-// List will return information about all objects in a bucket
-objects, _ := os.List(ctx)
-
-// Prints all 3 objects
-for _, object := range objects {
- fmt.Println(object.Name)
-}
-```
-
-- `Status` will return the current status of a bucket
-
-```go
-js, _ := jetstream.New(nc)
-ctx := context.Background()
-os, _ := js.CreateObjectStore(ctx, jetstream.ObjectStoreConfig{Bucket: "configs"})
-
-os.PutString(ctx, "config-1", "cfg1")
-os.PutString(ctx, "config-2", "cfg1")
-os.PutString(ctx, "config-3", "cfg1")
-
-status, _ := os.Status(ctx)
-
-fmt.Println(status.Bucket()) // prints `configs`
-fmt.Println(status.Size()) // prints the size of the bucket in bytes
-```
-
-## Examples
-
-You can find more examples of `jetstream` usage [here](https://github.com/nats-io/nats.go/tree/main/examples/jetstream).
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/api.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/api.go
deleted file mode 100644
index 1cea088..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/api.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2022-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "encoding/json"
- "strings"
-)
-
-type (
- apiResponse struct {
- Type string `json:"type"`
- Error *APIError `json:"error,omitempty"`
- }
-
- // apiPaged includes variables used to create paged responses from the JSON API
- apiPaged struct {
- Total int `json:"total"`
- Offset int `json:"offset"`
- Limit int `json:"limit"`
- }
-)
-
-// Request API subjects for JetStream.
-const (
- // DefaultAPIPrefix is the default prefix for the JetStream API.
- DefaultAPIPrefix = "$JS.API."
-
- // jsDomainT is used to create JetStream API prefix by specifying only Domain
- jsDomainT = "$JS.%s.API."
-
- // jsExtDomainT is used to create a StreamSource External APIPrefix
- jsExtDomainT = "$JS.%s.API"
-
- // apiAccountInfo is for obtaining general information about JetStream.
- apiAccountInfo = "INFO"
-
- // apiConsumerCreateT is used to create consumers.
- apiConsumerCreateT = "CONSUMER.CREATE.%s.%s"
-
- // apiConsumerCreateT is used to create consumers.
- // it accepts stream name, consumer name and filter subject
- apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s"
-
- // apiConsumerInfoT is used to create consumers.
- apiConsumerInfoT = "CONSUMER.INFO.%s.%s"
-
- // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode.
- apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s"
-
- // apiConsumerDeleteT is used to delete consumers.
- apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s"
-
- // apiConsumerListT is used to return all detailed consumer information
- apiConsumerListT = "CONSUMER.LIST.%s"
-
- // apiConsumerNamesT is used to return a list with all consumer names for the stream.
- apiConsumerNamesT = "CONSUMER.NAMES.%s"
-
- // apiStreams can lookup a stream by subject.
- apiStreams = "STREAM.NAMES"
-
- // apiStreamCreateT is the endpoint to create new streams.
- apiStreamCreateT = "STREAM.CREATE.%s"
-
- // apiStreamInfoT is the endpoint to get information on a stream.
- apiStreamInfoT = "STREAM.INFO.%s"
-
- // apiStreamUpdateT is the endpoint to update existing streams.
- apiStreamUpdateT = "STREAM.UPDATE.%s"
-
- // apiStreamDeleteT is the endpoint to delete streams.
- apiStreamDeleteT = "STREAM.DELETE.%s"
-
- // apiStreamPurgeT is the endpoint to purge streams.
- apiStreamPurgeT = "STREAM.PURGE.%s"
-
- // apiStreamListT is the endpoint that will return all detailed stream information
- apiStreamListT = "STREAM.LIST"
-
- // apiMsgGetT is the endpoint to get a message.
- apiMsgGetT = "STREAM.MSG.GET.%s"
-
- // apiMsgGetT is the endpoint to perform a direct get of a message.
- apiDirectMsgGetT = "DIRECT.GET.%s"
-
- // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject.
- apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s"
-
- // apiMsgDeleteT is the endpoint to remove a message.
- apiMsgDeleteT = "STREAM.MSG.DELETE.%s"
-)
-
-func (js *jetStream) apiRequestJSON(ctx context.Context, subject string, resp any, data ...[]byte) (*jetStreamMsg, error) {
- jsMsg, err := js.apiRequest(ctx, subject, data...)
- if err != nil {
- return nil, err
- }
- if err := json.Unmarshal(jsMsg.Data(), resp); err != nil {
- return nil, err
- }
- return jsMsg, nil
-}
-
-// a RequestWithContext with tracing via TraceCB
-func (js *jetStream) apiRequest(ctx context.Context, subj string, data ...[]byte) (*jetStreamMsg, error) {
- var req []byte
- if len(data) > 0 {
- req = data[0]
- }
- if js.clientTrace != nil {
- ctrace := js.clientTrace
- if ctrace.RequestSent != nil {
- ctrace.RequestSent(subj, req)
- }
- }
- resp, err := js.conn.RequestWithContext(ctx, subj, req)
- if err != nil {
- return nil, err
- }
- if js.clientTrace != nil {
- ctrace := js.clientTrace
- if ctrace.ResponseReceived != nil {
- ctrace.ResponseReceived(subj, resp.Data, resp.Header)
- }
- }
-
- return js.toJSMsg(resp), nil
-}
-
-func apiSubj(prefix, subject string) string {
- if prefix == "" {
- return subject
- }
- var b strings.Builder
- b.WriteString(prefix)
- b.WriteString(subject)
- return b.String()
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/consumer.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/consumer.go
deleted file mode 100644
index aa9003f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/consumer.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "crypto/sha256"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/nats-io/nuid"
-)
-
-type (
-
- // Consumer contains methods for fetching/processing messages from a stream,
- // as well as fetching consumer info.
- //
- // This package provides two implementations of Consumer interface:
- //
- // - Standard named/ephemeral pull consumers. These consumers are created using
- // CreateConsumer method on Stream or JetStream interface. They can be
- // explicitly configured (using [ConsumerConfig]) and managed by the user,
- // either from this package or externally.
- //
- // - Ordered consumers. These consumers are created using OrderedConsumer
- // method on Stream or JetStream interface. They are managed by the library
- // and provide a simple way to consume messages from a stream. Ordered
- // consumers are ephemeral in-memory pull consumers and are resilient to
- // deletes and restarts. They provide limited configuration options
- // using [OrderedConsumerConfig].
- //
- // Consumer provides method for optimized continuous consumption of messages
- // using Consume and Messages methods, as well as simple one-off messages
- // retrieval using Fetch and Next methods.
- Consumer interface {
- // Fetch is used to retrieve up to a provided number of messages from a
- // stream. This method will send a single request and deliver either all
- // requested messages unless time out is met earlier. Fetch timeout
- // defaults to 30 seconds and can be configured using FetchMaxWait
- // option.
- //
- // By default, Fetch uses a 5s idle heartbeat for requests longer than
- // 10 seconds. For shorter requests, the idle heartbeat is disabled.
- // This can be configured using FetchHeartbeat option. If a client does
- // not receive a heartbeat message from a stream for more than 2 times
- // the idle heartbeat setting, Fetch will return [ErrNoHeartbeat].
- //
- // Fetch is non-blocking and returns MessageBatch, exposing a channel
- // for delivered messages.
- //
- // Messages channel is always closed, thus it is safe to range over it
- // without additional checks.
- Fetch(batch int, opts ...FetchOpt) (MessageBatch, error)
-
- // FetchBytes is used to retrieve up to a provided bytes from the
- // stream. This method will send a single request and deliver the
- // provided number of bytes unless time out is met earlier. FetchBytes
- // timeout defaults to 30 seconds and can be configured using
- // FetchMaxWait option.
- //
- // By default, FetchBytes uses a 5s idle heartbeat for requests longer than
- // 10 seconds. For shorter requests, the idle heartbeat is disabled.
- // This can be configured using FetchHeartbeat option. If a client does
- // not receive a heartbeat message from a stream for more than 2 times
- // the idle heartbeat setting, Fetch will return ErrNoHeartbeat.
- //
- // FetchBytes is non-blocking and returns MessageBatch, exposing a channel
- // for delivered messages.
- //
- // Messages channel is always closed, thus it is safe to range over it
- // without additional checks.
- FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error)
-
- // FetchNoWait is used to retrieve up to a provided number of messages
- // from a stream. Unlike Fetch, FetchNoWait will only deliver messages
- // that are currently available in the stream and will not wait for new
- // messages to arrive, even if batch size is not met.
- //
- // FetchNoWait is non-blocking and returns MessageBatch, exposing a
- // channel for delivered messages.
- //
- // Messages channel is always closed, thus it is safe to range over it
- // without additional checks.
- FetchNoWait(batch int) (MessageBatch, error)
-
- // Consume will continuously receive messages and handle them
- // with the provided callback function. Consume can be configured using
- // PullConsumeOpt options:
- //
- // - Error handling and monitoring can be configured using ConsumeErrHandler
- // option, which provides information about errors encountered during
- // consumption (both transient and terminal)
- // - Consume can be configured to stop after a certain number of
- // messages is received using StopAfter option.
- // - Consume can be optimized for throughput or memory usage using
- // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options.
- // Unless there is a specific use case, these options should not be used.
- //
- // Consume returns a ConsumeContext, which can be used to stop or drain
- // the consumer.
- Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error)
-
- // Messages returns MessagesContext, allowing continuously iterating
- // over messages on a stream. Messages can be configured using
- // PullMessagesOpt options:
- //
- // - Messages can be optimized for throughput or memory usage using
- // PullExpiry, PullMaxMessages, PullMaxBytes and PullHeartbeat options.
- // Unless there is a specific use case, these options should not be used.
- // - WithMessagesErrOnMissingHeartbeat can be used to enable/disable
- // erroring out on MessagesContext.Next when a heartbeat is missing.
- // This option is enabled by default.
- Messages(opts ...PullMessagesOpt) (MessagesContext, error)
-
- // Next is used to retrieve the next message from the consumer. This
- // method will block until the message is retrieved or timeout is
- // reached.
- Next(opts ...FetchOpt) (Msg, error)
-
- // Info fetches current ConsumerInfo from the server.
- Info(context.Context) (*ConsumerInfo, error)
-
- // CachedInfo returns ConsumerInfo currently cached on this consumer.
- // This method does not perform any network requests. The cached
- // ConsumerInfo is updated on every call to Info and Update.
- CachedInfo() *ConsumerInfo
- }
-
- createConsumerRequest struct {
- Stream string `json:"stream_name"`
- Config *ConsumerConfig `json:"config"`
- Action string `json:"action"`
- }
-)
-
-// Info fetches current ConsumerInfo from the server.
-func (p *pullConsumer) Info(ctx context.Context) (*ConsumerInfo, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- infoSubject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiConsumerInfoT, p.stream, p.name))
- var resp consumerInfoResponse
-
- if _, err := p.jetStream.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
- return nil, ErrConsumerNotFound
- }
- return nil, resp.Error
- }
- if resp.Error == nil && resp.ConsumerInfo == nil {
- return nil, ErrConsumerNotFound
- }
-
- p.info = resp.ConsumerInfo
- return resp.ConsumerInfo, nil
-}
-
-// CachedInfo returns ConsumerInfo currently cached on this consumer.
-// This method does not perform any network requests. The cached
-// ConsumerInfo is updated on every call to Info and Update.
-func (p *pullConsumer) CachedInfo() *ConsumerInfo {
- return p.info
-}
-
-func upsertConsumer(ctx context.Context, js *jetStream, stream string, cfg ConsumerConfig, action string) (Consumer, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- req := createConsumerRequest{
- Stream: stream,
- Config: &cfg,
- Action: action,
- }
- reqJSON, err := json.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- consumerName := cfg.Name
- if consumerName == "" {
- if cfg.Durable != "" {
- consumerName = cfg.Durable
- } else {
- consumerName = generateConsName()
- }
- }
- if err := validateConsumerName(consumerName); err != nil {
- return nil, err
- }
-
- var ccSubj string
- if cfg.FilterSubject != "" && len(cfg.FilterSubjects) == 0 {
- if err := validateSubject(cfg.FilterSubject); err != nil {
- return nil, err
- }
- ccSubj = apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject))
- } else {
- ccSubj = apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerCreateT, stream, consumerName))
- }
- var resp consumerInfoResponse
-
- if _, err := js.apiRequestJSON(ctx, ccSubj, &resp, reqJSON); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
- return nil, ErrStreamNotFound
- }
- return nil, resp.Error
- }
-
- // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo
- if len(cfg.FilterSubjects) != 0 && len(resp.Config.FilterSubjects) == 0 {
- return nil, ErrConsumerMultipleFilterSubjectsNotSupported
- }
-
- return &pullConsumer{
- jetStream: js,
- stream: stream,
- name: resp.Name,
- durable: cfg.Durable != "",
- info: resp.ConsumerInfo,
- subscriptions: make(map[string]*pullSubscription),
- }, nil
-}
-
-const (
- consumerActionCreate = "create"
- consumerActionUpdate = "update"
- consumerActionCreateOrUpdate = ""
-)
-
-func generateConsName() string {
- name := nuid.Next()
- sha := sha256.New()
- sha.Write([]byte(name))
- b := sha.Sum(nil)
- for i := 0; i < 8; i++ {
- b[i] = rdigits[int(b[i]%base)]
- }
- return string(b[:8])
-}
-
-func getConsumer(ctx context.Context, js *jetStream, stream, name string) (Consumer, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- if err := validateConsumerName(name); err != nil {
- return nil, err
- }
- infoSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerInfoT, stream, name))
-
- var resp consumerInfoResponse
-
- if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
- return nil, ErrConsumerNotFound
- }
- return nil, resp.Error
- }
- if resp.Error == nil && resp.ConsumerInfo == nil {
- return nil, ErrConsumerNotFound
- }
-
- cons := &pullConsumer{
- jetStream: js,
- stream: stream,
- name: name,
- durable: resp.Config.Durable != "",
- info: resp.ConsumerInfo,
- subscriptions: make(map[string]*pullSubscription, 0),
- }
-
- return cons, nil
-}
-
-func deleteConsumer(ctx context.Context, js *jetStream, stream, consumer string) error {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- if err := validateConsumerName(consumer); err != nil {
- return err
- }
- deleteSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiConsumerDeleteT, stream, consumer))
-
- var resp consumerDeleteResponse
-
- if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil {
- return err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
- return ErrConsumerNotFound
- }
- return resp.Error
- }
- return nil
-}
-
-func validateConsumerName(dur string) error {
- if dur == "" {
- return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, "name is required")
- }
- if strings.ContainsAny(dur, ">*. /\\") {
- return fmt.Errorf("%w: '%s'", ErrInvalidConsumerName, dur)
- }
- return nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go
deleted file mode 100644
index 0ff4672..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/consumer_config.go
+++ /dev/null
@@ -1,460 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "encoding/json"
- "fmt"
- "time"
-)
-
-type (
- // ConsumerInfo is the detailed information about a JetStream consumer.
- ConsumerInfo struct {
- // Stream specifies the name of the stream that the consumer is bound
- // to.
- Stream string `json:"stream_name"`
-
- // Name represents the unique identifier for the consumer. This can be
- // either set explicitly by the client or generated automatically if not
- // set.
- Name string `json:"name"`
-
- // Created is the timestamp when the consumer was created.
- Created time.Time `json:"created"`
-
- // Config contains the configuration settings of the consumer, set when
- // creating or updating the consumer.
- Config ConsumerConfig `json:"config"`
-
- // Delivered holds information about the most recently delivered
- // message, including its sequence numbers and timestamp.
- Delivered SequenceInfo `json:"delivered"`
-
- // AckFloor indicates the message before the first unacknowledged
- // message.
- AckFloor SequenceInfo `json:"ack_floor"`
-
- // NumAckPending is the number of messages that have been delivered but
- // not yet acknowledged.
- NumAckPending int `json:"num_ack_pending"`
-
- // NumRedelivered counts the number of messages that have been
- // redelivered and not yet acknowledged. Each message is counted only
- // once, even if it has been redelivered multiple times. This count is
- // reset when the message is eventually acknowledged.
- NumRedelivered int `json:"num_redelivered"`
-
- // NumWaiting is the count of active pull requests. It is only relevant
- // for pull-based consumers.
- NumWaiting int `json:"num_waiting"`
-
- // NumPending is the number of messages that match the consumer's
- // filter, but have not been delivered yet.
- NumPending uint64 `json:"num_pending"`
-
- // Cluster contains information about the cluster to which this consumer
- // belongs (if applicable).
- Cluster *ClusterInfo `json:"cluster,omitempty"`
-
- // PushBound indicates whether at least one subscription exists for the
- // delivery subject of this consumer. This is only applicable to
- // push-based consumers.
- PushBound bool `json:"push_bound,omitempty"`
-
- // TimeStamp indicates when the info was gathered by the server.
- TimeStamp time.Time `json:"ts"`
- }
-
- // ConsumerConfig is the configuration of a JetStream consumer.
- ConsumerConfig struct {
- // Name is an optional name for the consumer. If not set, one is
- // generated automatically.
- //
- // Name cannot contain whitespace, ., *, >, path separators (forward or
- // backwards slash), and non-printable characters.
- Name string `json:"name,omitempty"`
-
- // Durable is an optional durable name for the consumer. If both Durable
- // and Name are set, they have to be equal. Unless InactiveThreshold is set, a
- // durable consumer will not be cleaned up automatically.
- //
- // Durable cannot contain whitespace, ., *, >, path separators (forward or
- // backwards slash), and non-printable characters.
- Durable string `json:"durable_name,omitempty"`
-
- // Description provides an optional description of the consumer.
- Description string `json:"description,omitempty"`
-
- // DeliverPolicy defines from which point to start delivering messages
- // from the stream. Defaults to DeliverAllPolicy.
- DeliverPolicy DeliverPolicy `json:"deliver_policy"`
-
- // OptStartSeq is an optional sequence number from which to start
- // message delivery. Only applicable when DeliverPolicy is set to
- // DeliverByStartSequencePolicy.
- OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
-
- // OptStartTime is an optional time from which to start message
- // delivery. Only applicable when DeliverPolicy is set to
- // DeliverByStartTimePolicy.
- OptStartTime *time.Time `json:"opt_start_time,omitempty"`
-
- // AckPolicy defines the acknowledgement policy for the consumer.
- // Defaults to AckExplicitPolicy.
- AckPolicy AckPolicy `json:"ack_policy"`
-
- // AckWait defines how long the server will wait for an acknowledgement
- // before resending a message. If not set, server default is 30 seconds.
- AckWait time.Duration `json:"ack_wait,omitempty"`
-
- // MaxDeliver defines the maximum number of delivery attempts for a
- // message. Applies to any message that is re-sent due to ack policy.
- // If not set, server default is -1 (unlimited).
- MaxDeliver int `json:"max_deliver,omitempty"`
-
- // BackOff specifies the optional back-off intervals for retrying
- // message delivery after a failed acknowledgement. It overrides
- // AckWait.
- //
- // BackOff only applies to messages not acknowledged in specified time,
- // not messages that were nack'ed.
- //
- // The number of intervals specified must be lower or equal to
- // MaxDeliver. If the number of intervals is lower, the last interval is
- // used for all remaining attempts.
- BackOff []time.Duration `json:"backoff,omitempty"`
-
- // FilterSubject can be used to filter messages delivered from the
- // stream. FilterSubject is exclusive with FilterSubjects.
- FilterSubject string `json:"filter_subject,omitempty"`
-
- // ReplayPolicy defines the rate at which messages are sent to the
- // consumer. If ReplayOriginalPolicy is set, messages are sent in the
- // same intervals in which they were stored on stream. This can be used
- // e.g. to simulate production traffic in development environments. If
- // ReplayInstantPolicy is set, messages are sent as fast as possible.
- // Defaults to ReplayInstantPolicy.
- ReplayPolicy ReplayPolicy `json:"replay_policy"`
-
- // RateLimit specifies an optional maximum rate of message delivery in
- // bits per second.
- RateLimit uint64 `json:"rate_limit_bps,omitempty"`
-
- // SampleFrequency is an optional frequency for sampling how often
- // acknowledgements are sampled for observability. See
- // https://docs.nats.io/running-a-nats-service/nats_admin/monitoring/monitoring_jetstream
- SampleFrequency string `json:"sample_freq,omitempty"`
-
- // MaxWaiting is a maximum number of pull requests waiting to be
- // fulfilled. If not set, this will inherit settings from stream's
- // ConsumerLimits or (if those are not set) from account settings. If
- // neither are set, server default is 512.
- MaxWaiting int `json:"max_waiting,omitempty"`
-
- // MaxAckPending is a maximum number of outstanding unacknowledged
- // messages. Once this limit is reached, the server will suspend sending
- // messages to the consumer. If not set, server default is 1000
- // seconds. Set to -1 for unlimited.
- MaxAckPending int `json:"max_ack_pending,omitempty"`
-
- // HeadersOnly indicates whether only headers of messages should be sent
- // (and no payload). Defaults to false.
- HeadersOnly bool `json:"headers_only,omitempty"`
-
- // MaxRequestBatch is the optional maximum batch size a single pull
- // request can make. When set with MaxRequestMaxBytes, the batch size
- // will be constrained by whichever limit is hit first.
- MaxRequestBatch int `json:"max_batch,omitempty"`
-
- // MaxRequestExpires is the maximum duration a single pull request will
- // wait for messages to be available to pull.
- MaxRequestExpires time.Duration `json:"max_expires,omitempty"`
-
- // MaxRequestMaxBytes is the optional maximum total bytes that can be
- // requested in a given batch. When set with MaxRequestBatch, the batch
- // size will be constrained by whichever limit is hit first.
- MaxRequestMaxBytes int `json:"max_bytes,omitempty"`
-
- // InactiveThreshold is a duration which instructs the server to clean
- // up the consumer if it has been inactive for the specified duration.
- // Durable consumers will not be cleaned up by default, but if
- // InactiveThreshold is set, they will be. If not set, this will inherit
- // settings from stream's ConsumerLimits. If neither are set, server
- // default is 5 seconds.
- //
- // A consumer is considered inactive there are not pull requests
- // received by the server (for pull consumers), or no interest detected
- // on deliver subject (for push consumers), not if there are no
- // messages to be delivered.
- InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
-
- // Replicas the number of replicas for the consumer's state. By default,
- // consumers inherit the number of replicas from the stream.
- Replicas int `json:"num_replicas"`
-
- // MemoryStorage is a flag to force the consumer to use memory storage
- // rather than inherit the storage type from the stream.
- MemoryStorage bool `json:"mem_storage,omitempty"`
-
- // FilterSubjects allows filtering messages from a stream by subject.
- // This field is exclusive with FilterSubject. Requires nats-server
- // v2.10.0 or later.
- FilterSubjects []string `json:"filter_subjects,omitempty"`
-
- // Metadata is a set of application-defined key-value pairs for
- // associating metadata on the consumer. This feature requires
- // nats-server v2.10.0 or later.
- Metadata map[string]string `json:"metadata,omitempty"`
- }
-
- // OrderedConsumerConfig is the configuration of an ordered JetStream
- // consumer. For more information, see [Ordered Consumers] in README
- //
- // [Ordered Consumers]: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md#ordered-consumers
- OrderedConsumerConfig struct {
- // FilterSubjects allows filtering messages from a stream by subject.
- // This field is exclusive with FilterSubject. Requires nats-server
- // v2.10.0 or later.
- FilterSubjects []string `json:"filter_subjects,omitempty"`
-
- // DeliverPolicy defines from which point to start delivering messages
- // from the stream. Defaults to DeliverAllPolicy.
- DeliverPolicy DeliverPolicy `json:"deliver_policy"`
-
- // OptStartSeq is an optional sequence number from which to start
- // message delivery. Only applicable when DeliverPolicy is set to
- // DeliverByStartSequencePolicy.
- OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
-
- // OptStartTime is an optional time from which to start message
- // delivery. Only applicable when DeliverPolicy is set to
- // DeliverByStartTimePolicy.
- OptStartTime *time.Time `json:"opt_start_time,omitempty"`
-
- // ReplayPolicy defines the rate at which messages are sent to the
- // consumer. If ReplayOriginalPolicy is set, messages are sent in the
- // same intervals in which they were stored on stream. This can be used
- // e.g. to simulate production traffic in development environments. If
- // ReplayInstantPolicy is set, messages are sent as fast as possible.
- // Defaults to ReplayInstantPolicy.
- ReplayPolicy ReplayPolicy `json:"replay_policy"`
-
- // InactiveThreshold is a duration which instructs the server to clean
- // up the consumer if it has been inactive for the specified duration.
- // Defaults to 5s.
- InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
-
- // HeadersOnly indicates whether only headers of messages should be sent
- // (and no payload). Defaults to false.
- HeadersOnly bool `json:"headers_only,omitempty"`
-
- // Maximum number of attempts for the consumer to be recreated in a
- // single recreation cycle. Defaults to unlimited.
- MaxResetAttempts int
- }
-
- // DeliverPolicy determines from which point to start delivering messages.
- DeliverPolicy int
-
- // AckPolicy determines how the consumer should acknowledge delivered
- // messages.
- AckPolicy int
-
- // ReplayPolicy determines how the consumer should replay messages it
- // already has queued in the stream.
- ReplayPolicy int
-
- // SequenceInfo has both the consumer and the stream sequence and last
- // activity.
- SequenceInfo struct {
- Consumer uint64 `json:"consumer_seq"`
- Stream uint64 `json:"stream_seq"`
- Last *time.Time `json:"last_active,omitempty"`
- }
-)
-
-const (
- // DeliverAllPolicy starts delivering messages from the very beginning of a
- // stream. This is the default.
- DeliverAllPolicy DeliverPolicy = iota
-
- // DeliverLastPolicy will start the consumer with the last sequence
- // received.
- DeliverLastPolicy
-
- // DeliverNewPolicy will only deliver new messages that are sent after the
- // consumer is created.
- DeliverNewPolicy
-
- // DeliverByStartSequencePolicy will deliver messages starting from a given
- // sequence configured with OptStartSeq in ConsumerConfig.
- DeliverByStartSequencePolicy
-
- // DeliverByStartTimePolicy will deliver messages starting from a given time
- // configured with OptStartTime in ConsumerConfig.
- DeliverByStartTimePolicy
-
- // DeliverLastPerSubjectPolicy will start the consumer with the last message
- // for all subjects received.
- DeliverLastPerSubjectPolicy
-)
-
-func (p *DeliverPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString("all"), jsonString("undefined"):
- *p = DeliverAllPolicy
- case jsonString("last"):
- *p = DeliverLastPolicy
- case jsonString("new"):
- *p = DeliverNewPolicy
- case jsonString("by_start_sequence"):
- *p = DeliverByStartSequencePolicy
- case jsonString("by_start_time"):
- *p = DeliverByStartTimePolicy
- case jsonString("last_per_subject"):
- *p = DeliverLastPerSubjectPolicy
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
-
- return nil
-}
-
-func (p DeliverPolicy) MarshalJSON() ([]byte, error) {
- switch p {
- case DeliverAllPolicy:
- return json.Marshal("all")
- case DeliverLastPolicy:
- return json.Marshal("last")
- case DeliverNewPolicy:
- return json.Marshal("new")
- case DeliverByStartSequencePolicy:
- return json.Marshal("by_start_sequence")
- case DeliverByStartTimePolicy:
- return json.Marshal("by_start_time")
- case DeliverLastPerSubjectPolicy:
- return json.Marshal("last_per_subject")
- }
- return nil, fmt.Errorf("nats: unknown deliver policy %v", p)
-}
-
-func (p DeliverPolicy) String() string {
- switch p {
- case DeliverAllPolicy:
- return "all"
- case DeliverLastPolicy:
- return "last"
- case DeliverNewPolicy:
- return "new"
- case DeliverByStartSequencePolicy:
- return "by_start_sequence"
- case DeliverByStartTimePolicy:
- return "by_start_time"
- case DeliverLastPerSubjectPolicy:
- return "last_per_subject"
- }
- return ""
-}
-
-const (
- // AckExplicitPolicy requires ack or nack for all messages.
- AckExplicitPolicy AckPolicy = iota
-
- // AckAllPolicy when acking a sequence number, this implicitly acks all
- // sequences below this one as well.
- AckAllPolicy
-
- // AckNonePolicy requires no acks for delivered messages.
- AckNonePolicy
-)
-
-func (p *AckPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString("none"):
- *p = AckNonePolicy
- case jsonString("all"):
- *p = AckAllPolicy
- case jsonString("explicit"):
- *p = AckExplicitPolicy
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-func (p AckPolicy) MarshalJSON() ([]byte, error) {
- switch p {
- case AckNonePolicy:
- return json.Marshal("none")
- case AckAllPolicy:
- return json.Marshal("all")
- case AckExplicitPolicy:
- return json.Marshal("explicit")
- }
- return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p)
-}
-
-func (p AckPolicy) String() string {
- switch p {
- case AckNonePolicy:
- return "AckNone"
- case AckAllPolicy:
- return "AckAll"
- case AckExplicitPolicy:
- return "AckExplicit"
- }
- return "Unknown AckPolicy"
-}
-
-const (
- // ReplayInstantPolicy will replay messages as fast as possible.
- ReplayInstantPolicy ReplayPolicy = iota
-
- // ReplayOriginalPolicy will maintain the same timing as the messages were
- // received.
- ReplayOriginalPolicy
-)
-
-func (p *ReplayPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString("instant"):
- *p = ReplayInstantPolicy
- case jsonString("original"):
- *p = ReplayOriginalPolicy
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-func (p ReplayPolicy) MarshalJSON() ([]byte, error) {
- switch p {
- case ReplayOriginalPolicy:
- return json.Marshal("original")
- case ReplayInstantPolicy:
- return json.Marshal("instant")
- }
- return nil, fmt.Errorf("nats: unknown replay policy %v", p)
-}
-
-func (p ReplayPolicy) String() string {
- switch p {
- case ReplayOriginalPolicy:
- return "original"
- case ReplayInstantPolicy:
- return "instant"
- }
- return ""
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/errors.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/errors.go
deleted file mode 100644
index fb36434..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/errors.go
+++ /dev/null
@@ -1,421 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "errors"
- "fmt"
-)
-
-type (
- // JetStreamError is an error result that happens when using JetStream.
- // In case of client-side error, [APIError] returns nil.
- JetStreamError interface {
- APIError() *APIError
- error
- }
-
- jsError struct {
- apiErr *APIError
- message string
- }
-
- // APIError is included in all API responses if there was an error.
- APIError struct {
- Code int `json:"code"`
- ErrorCode ErrorCode `json:"err_code"`
- Description string `json:"description,omitempty"`
- }
-
- // ErrorCode represents error_code returned in response from JetStream API.
- ErrorCode uint16
-)
-
-const (
- JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039
- JSErrCodeJetStreamNotEnabled ErrorCode = 10076
-
- JSErrCodeStreamNotFound ErrorCode = 10059
- JSErrCodeStreamNameInUse ErrorCode = 10058
-
- JSErrCodeConsumerCreate ErrorCode = 10012
- JSErrCodeConsumerNotFound ErrorCode = 10014
- JSErrCodeConsumerNameExists ErrorCode = 10013
- JSErrCodeConsumerAlreadyExists ErrorCode = 10105
- JSErrCodeConsumerExists ErrorCode = 10148
- JSErrCodeDuplicateFilterSubjects ErrorCode = 10136
- JSErrCodeOverlappingFilterSubjects ErrorCode = 10138
- JSErrCodeConsumerEmptyFilter ErrorCode = 10139
- JSErrCodeConsumerDoesNotExist ErrorCode = 10149
-
- JSErrCodeMessageNotFound ErrorCode = 10037
-
- JSErrCodeBadRequest ErrorCode = 10003
-
- JSErrCodeStreamWrongLastSequence ErrorCode = 10071
-)
-
-var (
- // JetStream API errors
-
- // ErrJetStreamNotEnabled is an error returned when JetStream is not
- // enabled.
- //
- // Note: This error will not be returned in clustered mode, even if each
- // server in the cluster does not have JetStream enabled. In clustered mode,
- // requests will time out instead.
- ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}}
-
- // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is
- // not enabled for an account.
- ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}}
-
- // ErrStreamNotFound is an error returned when stream with given name does
- // not exist.
- ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}}
-
- // ErrStreamNameAlreadyInUse is returned when a stream with given name
- // already exists and has a different configuration.
- ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}}
-
- // ErrStreamSubjectTransformNotSupported is returned when the connected
- // nats-server version does not support setting the stream subject
- // transform. If this error is returned when executing CreateStream(), the
- // stream with invalid configuration was already created in the server.
- ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
-
- // ErrStreamSourceSubjectTransformNotSupported is returned when the
- // connected nats-server version does not support setting the stream source
- // subject transform. If this error is returned when executing
- // CreateStream(), the stream with invalid configuration was already created
- // in the server.
- ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
-
- // ErrStreamSourceNotSupported is returned when the connected nats-server
- // version does not support setting the stream sources. If this error is
- // returned when executing CreateStream(), the stream with invalid
- // configuration was already created in the server.
- ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"}
-
- // ErrStreamSourceMultipleFilterSubjectsNotSupported is returned when the
- // connected nats-server version does not support setting the stream
- // sources. If this error is returned when executing CreateStream(), the
- // stream with invalid configuration was already created in the server.
- ErrStreamSourceMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject filters not supported by nats-server"}
-
- // ErrConsumerNotFound is an error returned when consumer with given name
- // does not exist.
- ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}}
-
- // ErrConsumerExists is returned when attempting to create a consumer with
- // CreateConsumer but a consumer with given name already exists.
- ErrConsumerExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerExists, Description: "consumer already exists", Code: 400}}
-
- // ErrConsumerNameExists is returned when attempting to update a consumer
- // with UpdateConsumer but a consumer with given name does not exist.
- ErrConsumerDoesNotExist JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerDoesNotExist, Description: "consumer does not exist", Code: 400}}
-
- // ErrMsgNotFound is returned when message with provided sequence number
- // does not exist.
- ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}}
-
- // ErrBadRequest is returned when invalid request is sent to JetStream API.
- ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}}
-
- // ErrConsumerCreate is returned when nats-server reports error when
- // creating consumer (e.g. illegal update).
- ErrConsumerCreate JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerCreate, Description: "could not create consumer", Code: 500}}
-
- // ErrDuplicateFilterSubjects is returned when both FilterSubject and
- // FilterSubjects are specified when creating consumer.
- ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}}
-
- // ErrDuplicateFilterSubjects is returned when filter subjects overlap when
- // creating consumer.
- ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}}
-
- // ErrEmptyFilter is returned when a filter in FilterSubjects is empty.
- ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}}
-
- // Client errors
-
- // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the
- // connected nats-server version does not support setting multiple filter
- // subjects with filter_subjects field. If this error is returned when
- // executing AddConsumer(), the consumer with invalid configuration was
- // already created in the server.
- ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"}
-
- // ErrConsumerNotFound is an error returned when consumer with given name
- // does not exist.
- ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"}
-
- // ErrInvalidJSAck is returned when JetStream ack from message publish is
- // invalid.
- ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"}
-
- // ErrStreamNameRequired is returned when the provided stream name is empty.
- ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"}
-
- // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more
- // than once.
- ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"}
-
- // ErrNoStreamResponse is returned when there is no response from stream
- // (e.g. no responders error).
- ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"}
-
- // ErrNotJSMessage is returned when attempting to get metadata from non
- // JetStream message.
- ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"}
-
- // ErrInvalidStreamName is returned when the provided stream name is invalid
- // (contains '.').
- ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"}
-
- // ErrInvalidSubject is returned when the provided subject name is invalid.
- ErrInvalidSubject JetStreamError = &jsError{message: "invalid subject name"}
-
- // ErrInvalidConsumerName is returned when the provided consumer name is
- // invalid (contains '.').
- ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"}
-
- // ErrNoMessages is returned when no messages are currently available for a
- // consumer.
- ErrNoMessages JetStreamError = &jsError{message: "no messages"}
-
- // ErrMaxBytesExceeded is returned when a message would exceed MaxBytes set
- // on a pull request.
- ErrMaxBytesExceeded JetStreamError = &jsError{message: "message size exceeds max bytes"}
-
- // ErrConsumerDeleted is returned when attempting to send pull request to a
- // consumer which does not exist.
- ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"}
-
- // ErrConsumerLeadershipChanged is returned when pending requests are no
- // longer valid after leadership has changed.
- ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "leadership change"}
-
- // ErrHandlerRequired is returned when no handler func is provided in
- // Stream().
- ErrHandlerRequired JetStreamError = &jsError{message: "handler cannot be empty"}
-
- // ErrEndOfData is returned when iterating over paged API from JetStream
- // reaches end of data.
- ErrEndOfData JetStreamError = &jsError{message: "end of data reached"}
-
- // ErrNoHeartbeat is received when no message is received in IdleHeartbeat
- // time (if set).
- ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"}
-
- // ErrConsumerHasActiveSubscription is returned when a consumer is already
- // subscribed to a stream.
- ErrConsumerHasActiveSubscription JetStreamError = &jsError{message: "consumer has active subscription"}
-
- // ErrMsgNotBound is returned when given message is not bound to any
- // subscription.
- ErrMsgNotBound JetStreamError = &jsError{message: "message is not bound to subscription/connection"}
-
- // ErrMsgNoReply is returned when attempting to reply to a message without a
- // reply subject.
- ErrMsgNoReply JetStreamError = &jsError{message: "message does not have a reply"}
-
- // ErrMsgDeleteUnsuccessful is returned when an attempt to delete a message
- // is unsuccessful.
- ErrMsgDeleteUnsuccessful JetStreamError = &jsError{message: "message deletion unsuccessful"}
-
- // ErrAsyncPublishReplySubjectSet is returned when reply subject is set on
- // async message publish.
- ErrAsyncPublishReplySubjectSet JetStreamError = &jsError{message: "reply subject should be empty"}
-
- // ErrTooManyStalledMsgs is returned when too many outstanding async
- // messages are waiting for ack.
- ErrTooManyStalledMsgs JetStreamError = &jsError{message: "stalled with too many outstanding async published messages"}
-
- // ErrInvalidOption is returned when there is a collision between options.
- ErrInvalidOption JetStreamError = &jsError{message: "invalid jetstream option"}
-
- // ErrMsgIteratorClosed is returned when attempting to get message from a
- // closed iterator.
- ErrMsgIteratorClosed JetStreamError = &jsError{message: "messages iterator closed"}
-
- // ErrOrderedConsumerReset is returned when resetting ordered consumer fails
- // due to too many attempts.
- ErrOrderedConsumerReset JetStreamError = &jsError{message: "recreating ordered consumer"}
-
- // ErrOrderConsumerUsedAsFetch is returned when ordered consumer was already
- // used to process messages using Fetch (or FetchBytes).
- ErrOrderConsumerUsedAsFetch JetStreamError = &jsError{message: "ordered consumer initialized as fetch"}
-
- // ErrOrderConsumerUsedAsConsume is returned when ordered consumer was
- // already used to process messages using Consume or Messages.
- ErrOrderConsumerUsedAsConsume JetStreamError = &jsError{message: "ordered consumer initialized as consume"}
-
- // ErrOrderedConsumerConcurrentRequests is returned when attempting to run
- // concurrent operations on ordered consumers.
- ErrOrderedConsumerConcurrentRequests JetStreamError = &jsError{message: "cannot run concurrent processing using ordered consumer"}
-
- // ErrOrderedConsumerNotCreated is returned when trying to get consumer info
- // of an ordered consumer which was not yet created.
- ErrOrderedConsumerNotCreated JetStreamError = &jsError{message: "consumer instance not yet created"}
-
- // KeyValue Errors
-
- // ErrKeyExists is returned when attempting to create a key that already
- // exists.
- ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"}
-
- // ErrKeyValueConfigRequired is returned when attempting to create a bucket
- // without a config.
- ErrKeyValueConfigRequired JetStreamError = &jsError{message: "config required"}
-
- // ErrInvalidBucketName is returned when attempting to create a bucket with
- // an invalid name.
- ErrInvalidBucketName JetStreamError = &jsError{message: "invalid bucket name"}
-
- // ErrInvalidKey is returned when attempting to create a key with an invalid
- // name.
- ErrInvalidKey JetStreamError = &jsError{message: "invalid key"}
-
- // ErrBucketExists is returned when attempting to create a bucket that
- // already exists and has a different configuration.
- ErrBucketExists JetStreamError = &jsError{message: "bucket name already in use"}
-
- // ErrBucketNotFound is returned when attempting to access a bucket that
- // does not exist.
- ErrBucketNotFound JetStreamError = &jsError{message: "bucket not found"}
-
- // ErrBadBucket is returned when attempting to access a bucket that is not a
- // key-value store.
- ErrBadBucket JetStreamError = &jsError{message: "bucket not valid key-value store"}
-
- // ErrKeyNotFound is returned when attempting to access a key that does not
- // exist.
- ErrKeyNotFound JetStreamError = &jsError{message: "key not found"}
-
- // ErrKeyDeleted is returned when attempting to access a key that was
- // deleted.
- ErrKeyDeleted JetStreamError = &jsError{message: "key was deleted"}
-
- // ErrHistoryToLarge is returned when provided history limit is larger than
- // 64.
- ErrHistoryTooLarge JetStreamError = &jsError{message: "history limited to a max of 64"}
-
- // ErrNoKeysFound is returned when no keys are found.
- ErrNoKeysFound JetStreamError = &jsError{message: "no keys found"}
-
- // ErrObjectConfigRequired is returned when attempting to create an object
- // without a config.
- ErrObjectConfigRequired JetStreamError = &jsError{message: "object-store config required"}
-
- // ErrBadObjectMeta is returned when the meta information of an object is
- // invalid.
- ErrBadObjectMeta JetStreamError = &jsError{message: "object-store meta information invalid"}
-
- // ErrObjectNotFound is returned when an object is not found.
- ErrObjectNotFound JetStreamError = &jsError{message: "object not found"}
-
- // ErrInvalidStoreName is returned when the name of an object-store is
- // invalid.
- ErrInvalidStoreName JetStreamError = &jsError{message: "invalid object-store name"}
-
- // ErrDigestMismatch is returned when the digests of an object do not match.
- ErrDigestMismatch JetStreamError = &jsError{message: "received a corrupt object, digests do not match"}
-
- // ErrInvalidDigestFormat is returned when the digest hash of an object has
- // an invalid format.
- ErrInvalidDigestFormat JetStreamError = &jsError{message: "object digest hash has invalid format"}
-
- // ErrNoObjectsFound is returned when no objects are found.
- ErrNoObjectsFound JetStreamError = &jsError{message: "no objects found"}
-
- // ErrObjectAlreadyExists is returned when an object with the same name
- // already exists.
- ErrObjectAlreadyExists JetStreamError = &jsError{message: "an object already exists with that name"}
-
- // ErrNameRequired is returned when a name is required.
- ErrNameRequired JetStreamError = &jsError{message: "name is required"}
-
- // ErrLinkNotAllowed is returned when a link cannot be set when putting the
- // object in a bucket.
- ErrLinkNotAllowed JetStreamError = &jsError{message: "link cannot be set when putting the object in bucket"}
-
- // ErrObjectRequired is returned when an object is required.
- ErrObjectRequired = &jsError{message: "object required"}
-
- // ErrNoLinkToDeleted is returned when it is not allowed to link to a
- // deleted object.
- ErrNoLinkToDeleted JetStreamError = &jsError{message: "not allowed to link to a deleted object"}
-
- // ErrNoLinkToLink is returned when it is not allowed to link to another
- // link.
- ErrNoLinkToLink JetStreamError = &jsError{message: "not allowed to link to another link"}
-
- // ErrCantGetBucket is returned when an invalid Get is attempted on an
- // object that is a link to a bucket.
- ErrCantGetBucket JetStreamError = &jsError{message: "invalid Get, object is a link to a bucket"}
-
- // ErrBucketRequired is returned when a bucket is required.
- ErrBucketRequired JetStreamError = &jsError{message: "bucket required"}
-
- // ErrBucketMalformed is returned when a bucket is malformed.
- ErrBucketMalformed JetStreamError = &jsError{message: "bucket malformed"}
-
- // ErrUpdateMetaDeleted is returned when the meta information of a deleted
- // object cannot be updated.
- ErrUpdateMetaDeleted JetStreamError = &jsError{message: "cannot update meta for a deleted object"}
-)
-
-// Error prints the JetStream API error code and description.
-func (e *APIError) Error() string {
- return fmt.Sprintf("nats: API error: code=%d err_code=%d description=%s", e.Code, e.ErrorCode, e.Description)
-}
-
-// APIError implements the JetStreamError interface.
-func (e *APIError) APIError() *APIError {
- return e
-}
-
-// Is matches against an APIError.
-func (e *APIError) Is(err error) bool {
- if e == nil {
- return false
- }
- // Extract internal APIError to match against.
- var aerr *APIError
- ok := errors.As(err, &aerr)
- if !ok {
- return ok
- }
- return e.ErrorCode == aerr.ErrorCode
-}
-
-func (err *jsError) APIError() *APIError {
- return err.apiErr
-}
-
-func (err *jsError) Error() string {
- if err.apiErr != nil && err.apiErr.Description != "" {
- return err.apiErr.Error()
- }
- return fmt.Sprintf("nats: %s", err.message)
-}
-
-func (err *jsError) Unwrap() error {
- // Allow matching to embedded APIError in case there is one.
- if err.apiErr == nil {
- return nil
- }
- return err.apiErr
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go
deleted file mode 100644
index 3e48788..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/jetstream.go
+++ /dev/null
@@ -1,1050 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "regexp"
- "strings"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nuid"
-)
-
-type (
-
- // JetStream is the top-level interface for interacting with JetStream.
- // The capabilities of JetStream include:
- //
- // - Publishing messages to a stream using [Publisher].
- // - Managing streams using [StreamManager].
- // - Managing consumers using [StreamConsumerManager]. Those are the same
- // methods as on [Stream], but are available as a shortcut to a consumer
- // bypassing stream lookup.
- // - Managing KeyValue stores using [KeyValueManager].
- // - Managing Object Stores using [ObjectStoreManager].
- //
- // JetStream can be created using [New], [NewWithAPIPrefix] or
- // [NewWithDomain] methods.
- JetStream interface {
- // AccountInfo fetches account information from the server, containing details
- // about the account associated with this JetStream connection. If account is
- // not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned. If
- // the server does not have JetStream enabled, ErrJetStreamNotEnabled is
- // returned.
- AccountInfo(ctx context.Context) (*AccountInfo, error)
-
- StreamConsumerManager
- StreamManager
- Publisher
- KeyValueManager
- ObjectStoreManager
- }
-
- // Publisher provides methods for publishing messages to a stream.
- // It is available as a part of [JetStream] interface.
- // The behavior of Publisher can be customized using [PublishOpt] options.
- Publisher interface {
- // Publish performs a synchronous publish to a stream and waits for ack
- // from server. It accepts subject name (which must be bound to a stream)
- // and message payload.
- Publish(ctx context.Context, subject string, payload []byte, opts ...PublishOpt) (*PubAck, error)
-
- // PublishMsg performs a synchronous publish to a stream and waits for
- // ack from server. It accepts subject name (which must be bound to a
- // stream) and nats.Message.
- PublishMsg(ctx context.Context, msg *nats.Msg, opts ...PublishOpt) (*PubAck, error)
-
- // PublishAsync performs a publish to a stream and returns
- // [PubAckFuture] interface, not blocking while waiting for an
- // acknowledgement. It accepts subject name (which must be bound to a
- // stream) and message payload.
- //
- // PublishAsync does not guarantee that the message has been
- // received by the server. It only guarantees that the message has been
- // sent to the server and thus messages can be stored in the stream
- // out of order in case of retries.
- PublishAsync(subject string, payload []byte, opts ...PublishOpt) (PubAckFuture, error)
-
- // PublishMsgAsync performs a publish to a stream and returns
- // [PubAckFuture] interface, not blocking while waiting for an
- // acknowledgement. It accepts subject name (which must
- // be bound to a stream) and nats.Message.
- //
- // PublishMsgAsync does not guarantee that the message has been
- // sent to the server and thus messages can be stored in the stream
- // received by the server. It only guarantees that the message has been
- // out of order in case of retries.
- PublishMsgAsync(msg *nats.Msg, opts ...PublishOpt) (PubAckFuture, error)
-
- // PublishAsyncPending returns the number of async publishes outstanding
- // for this context. An outstanding publish is one that has been
- // sent by the publisher but has not yet received an ack.
- PublishAsyncPending() int
-
- // PublishAsyncComplete returns a channel that will be closed when all
- // outstanding asynchronously published messages are acknowledged by the
- // server.
- PublishAsyncComplete() <-chan struct{}
- }
-
- // StreamManager provides CRUD API for managing streams. It is available as
- // a part of [JetStream] interface. CreateStream, UpdateStream,
- // CreateOrUpdateStream and Stream methods return a [Stream] interface, allowing
- // to operate on a stream.
- StreamManager interface {
- // CreateStream creates a new stream with given config and returns an
- // interface to operate on it. If stream with given name already exists,
- // ErrStreamNameAlreadyInUse is returned.
- CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error)
-
- // UpdateStream updates an existing stream. If stream does not exist,
- // ErrStreamNotFound is returned.
- UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error)
-
- // CreateOrUpdateStream creates a stream with given config. If stream
- // already exists, it will be updated (if possible).
- CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error)
-
- // Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name.
- // If stream does not exist, ErrStreamNotFound is returned.
- Stream(ctx context.Context, stream string) (Stream, error)
-
- // StreamNameBySubject returns a stream name stream listening on given
- // subject. If no stream is bound to given subject, ErrStreamNotFound
- // is returned.
- StreamNameBySubject(ctx context.Context, subject string) (string, error)
-
- // DeleteStream removes a stream with given name. If stream does not
- // exist, ErrStreamNotFound is returned.
- DeleteStream(ctx context.Context, stream string) error
-
- // ListStreams returns StreamInfoLister, enabling iterating over a
- // channel of stream infos.
- ListStreams(context.Context, ...StreamListOpt) StreamInfoLister
-
- // StreamNames returns a StreamNameLister, enabling iterating over a
- // channel of stream names.
- StreamNames(context.Context, ...StreamListOpt) StreamNameLister
- }
-
- // StreamConsumerManager provides CRUD API for managing consumers. It is
- // available as a part of [JetStream] interface. This is an alternative to
- // [Stream] interface, allowing to bypass stream lookup. CreateConsumer,
- // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a
- // [Consumer] interface, allowing to operate on a consumer (e.g. consume
- // messages).
- StreamConsumerManager interface {
- // CreateOrUpdateConsumer creates a consumer on a given stream with
- // given config. If consumer already exists, it will be updated (if
- // possible). Consumer interface is returned, allowing to operate on a
- // consumer (e.g. fetch messages).
- CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error)
-
- // CreateConsumer creates a consumer on a given stream with given
- // config. If consumer already exists and the provided configuration
- // differs from its configuration, ErrConsumerExists is returned. If the
- // provided configuration is the same as the existing consumer, the
- // existing consumer is returned. Consumer interface is returned,
- // allowing to operate on a consumer (e.g. fetch messages).
- CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error)
-
- // UpdateConsumer updates an existing consumer. If consumer does not
- // exist, ErrConsumerDoesNotExist is returned. Consumer interface is
- // returned, allowing to operate on a consumer (e.g. fetch messages).
- UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error)
-
- // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
- // are managed by the library and provide a simple way to consume
- // messages from a stream. Ordered consumers are ephemeral in-memory
- // pull consumers and are resilient to deletes and restarts.
- OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error)
-
- // Consumer returns an interface to an existing consumer, allowing processing
- // of messages. If consumer does not exist, ErrConsumerNotFound is
- // returned.
- Consumer(ctx context.Context, stream string, consumer string) (Consumer, error)
-
- // DeleteConsumer removes a consumer with given name from a stream.
- // If consumer does not exist, ErrConsumerNotFound is returned.
- DeleteConsumer(ctx context.Context, stream string, consumer string) error
- }
-
- // StreamListOpt is a functional option for [StreamManager.ListStreams] and
- // [StreamManager.StreamNames] methods.
- StreamListOpt func(*streamsRequest) error
-
- // AccountInfo contains information about the JetStream usage from the
- // current account.
- AccountInfo struct {
- // Tier is the current account usage tier.
- Tier
-
- // Domain is the domain name associated with this account.
- Domain string `json:"domain"`
-
- // API is the API usage statistics for this account.
- API APIStats `json:"api"`
-
- // Tiers is the list of available tiers for this account.
- Tiers map[string]Tier `json:"tiers"`
- }
-
- // Tier represents a JetStream account usage tier.
- Tier struct {
- // Memory is the memory storage being used for Stream Message storage.
- Memory uint64 `json:"memory"`
-
- // Store is the disk storage being used for Stream Message storage.
- Store uint64 `json:"storage"`
-
- // ReservedMemory is the number of bytes reserved for memory usage by
- // this account on the server
- ReservedMemory uint64 `json:"reserved_memory"`
-
- // ReservedStore is the number of bytes reserved for disk usage by this
- // account on the server
- ReservedStore uint64 `json:"reserved_storage"`
-
- // Streams is the number of streams currently defined for this account.
- Streams int `json:"streams"`
-
- // Consumers is the number of consumers currently defined for this
- // account.
- Consumers int `json:"consumers"`
-
- // Limits are the JetStream limits for this account.
- Limits AccountLimits `json:"limits"`
- }
-
- // APIStats reports on API calls to JetStream for this account.
- APIStats struct {
- // Total is the total number of API calls.
- Total uint64 `json:"total"`
-
- // Errors is the total number of API errors.
- Errors uint64 `json:"errors"`
- }
-
- // AccountLimits includes the JetStream limits of the current account.
- AccountLimits struct {
- // MaxMemory is the maximum amount of memory available for this account.
- MaxMemory int64 `json:"max_memory"`
-
- // MaxStore is the maximum amount of disk storage available for this
- // account.
- MaxStore int64 `json:"max_storage"`
-
- // MaxStreams is the maximum number of streams allowed for this account.
- MaxStreams int `json:"max_streams"`
-
- // MaxConsumers is the maximum number of consumers allowed for this
- // account.
- MaxConsumers int `json:"max_consumers"`
- }
-
- jetStream struct {
- conn *nats.Conn
- jsOpts
-
- publisher *jetStreamClient
- }
-
- // JetStreamOpt is a functional option for [New], [NewWithAPIPrefix] and
- // [NewWithDomain] methods.
- JetStreamOpt func(*jsOpts) error
-
- jsOpts struct {
- publisherOpts asyncPublisherOpts
- apiPrefix string
- replyPrefix string
- replyPrefixLen int
- clientTrace *ClientTrace
- }
-
- // ClientTrace can be used to trace API interactions for [JetStream].
- ClientTrace struct {
- // RequestSent is called when an API request is sent to the server.
- RequestSent func(subj string, payload []byte)
-
- // ResponseReceived is called when a response is received from the
- // server.
- ResponseReceived func(subj string, payload []byte, hdr nats.Header)
- }
- streamInfoResponse struct {
- apiResponse
- apiPaged
- *StreamInfo
- }
-
- accountInfoResponse struct {
- apiResponse
- AccountInfo
- }
-
- streamDeleteResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
- }
-
- // StreamInfoLister is used to iterate over a channel of stream infos.
- // Err method can be used to check for errors encountered during iteration.
- // Info channel is always closed and therefore can be used in a range loop.
- StreamInfoLister interface {
- Info() <-chan *StreamInfo
- Err() error
- }
-
- // StreamNameLister is used to iterate over a channel of stream names.
- // Err method can be used to check for errors encountered during iteration.
- // Name channel is always closed and therefore can be used in a range loop.
- StreamNameLister interface {
- Name() <-chan string
- Err() error
- }
-
- apiPagedRequest struct {
- Offset int `json:"offset"`
- }
-
- streamLister struct {
- js *jetStream
- offset int
- pageInfo *apiPaged
-
- streams chan *StreamInfo
- names chan string
- err error
- }
-
- streamListResponse struct {
- apiResponse
- apiPaged
- Streams []*StreamInfo `json:"streams"`
- }
-
- streamNamesResponse struct {
- apiResponse
- apiPaged
- Streams []string `json:"streams"`
- }
-
- streamsRequest struct {
- apiPagedRequest
- Subject string `json:"subject,omitempty"`
- }
-)
-
-// defaultAPITimeout is used if context.Background() or context.TODO() is passed to API calls.
-const defaultAPITimeout = 5 * time.Second
-
-var subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`)
-
-// New returns a new JetStream instance.
-// It uses default API prefix ($JS.API) for JetStream API requests.
-// If a custom API prefix is required, use [NewWithAPIPrefix] or [NewWithDomain].
-//
-// Available options:
-// - [WithClientTrace] - enables request/response tracing.
-// - [WithPublishAsyncErrHandler] - sets error handler for async message publish.
-// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes
-// that can be inflight at one time.
-func New(nc *nats.Conn, opts ...JetStreamOpt) (JetStream, error) {
- jsOpts := jsOpts{
- apiPrefix: DefaultAPIPrefix,
- publisherOpts: asyncPublisherOpts{
- maxpa: defaultAsyncPubAckInflight,
- },
- }
- setReplyPrefix(nc, &jsOpts)
- for _, opt := range opts {
- if err := opt(&jsOpts); err != nil {
- return nil, err
- }
- }
- js := &jetStream{
- conn: nc,
- jsOpts: jsOpts,
- publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts},
- }
-
- return js, nil
-}
-
-const (
- // defaultAsyncPubAckInflight is the number of async pub acks inflight.
- defaultAsyncPubAckInflight = 4000
-)
-
-func setReplyPrefix(nc *nats.Conn, jsOpts *jsOpts) {
- jsOpts.replyPrefix = nats.InboxPrefix
- if nc.Opts.InboxPrefix != "" {
- jsOpts.replyPrefix = nc.Opts.InboxPrefix + "."
- }
- // Add 1 for the dot separator.
- jsOpts.replyPrefixLen = len(jsOpts.replyPrefix) + aReplyTokensize + 1
-
-}
-
-// NewWithAPIPrefix returns a new JetStream instance and sets the API prefix to be used in requests to JetStream API.
-// The API prefix will be used in API requests to JetStream, e.g. .STREAM.INFO..
-//
-// Available options:
-// - [WithClientTrace] - enables request/response tracing.
-// - [WithPublishAsyncErrHandler] - sets error handler for async message publish.
-// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes
-// that can be inflight at one time.
-func NewWithAPIPrefix(nc *nats.Conn, apiPrefix string, opts ...JetStreamOpt) (JetStream, error) {
- jsOpts := jsOpts{
- publisherOpts: asyncPublisherOpts{
- maxpa: defaultAsyncPubAckInflight,
- },
- }
- setReplyPrefix(nc, &jsOpts)
- for _, opt := range opts {
- if err := opt(&jsOpts); err != nil {
- return nil, err
- }
- }
- if apiPrefix == "" {
- return nil, fmt.Errorf("API prefix cannot be empty")
- }
- if !strings.HasSuffix(apiPrefix, ".") {
- jsOpts.apiPrefix = fmt.Sprintf("%s.", apiPrefix)
- }
- js := &jetStream{
- conn: nc,
- jsOpts: jsOpts,
- publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts},
- }
- return js, nil
-}
-
-// NewWithDomain returns a new JetStream instance and sets the domain name token used when sending JetStream requests.
-// The domain name token will be used in API requests to JetStream, e.g. $JS..API.STREAM.INFO..
-//
-// Available options:
-// - [WithClientTrace] - enables request/response tracing.
-// - [WithPublishAsyncErrHandler] - sets error handler for async message publish.
-// - [WithPublishAsyncMaxPending] - sets the maximum outstanding async publishes
-// that can be inflight at one time.
-func NewWithDomain(nc *nats.Conn, domain string, opts ...JetStreamOpt) (JetStream, error) {
- jsOpts := jsOpts{
- publisherOpts: asyncPublisherOpts{
- maxpa: defaultAsyncPubAckInflight,
- },
- }
- setReplyPrefix(nc, &jsOpts)
- for _, opt := range opts {
- if err := opt(&jsOpts); err != nil {
- return nil, err
- }
- }
- if domain == "" {
- return nil, errors.New("domain cannot be empty")
- }
- jsOpts.apiPrefix = fmt.Sprintf(jsDomainT, domain)
- js := &jetStream{
- conn: nc,
- jsOpts: jsOpts,
- publisher: &jetStreamClient{asyncPublisherOpts: jsOpts.publisherOpts},
- }
- return js, nil
-}
-
-// CreateStream creates a new stream with given config and returns an
-// interface to operate on it. If stream with given name already exists,
-// ErrStreamNameAlreadyInUse is returned.
-func (js *jetStream) CreateStream(ctx context.Context, cfg StreamConfig) (Stream, error) {
- if err := validateStreamName(cfg.Name); err != nil {
- return nil, err
- }
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- ncfg := cfg
- // If we have a mirror and an external domain, convert to ext.APIPrefix.
- if ncfg.Mirror != nil && ncfg.Mirror.Domain != "" {
- // Copy so we do not change the caller's version.
- ncfg.Mirror = ncfg.Mirror.copy()
- if err := ncfg.Mirror.convertDomain(); err != nil {
- return nil, err
- }
- }
-
- // Check sources for the same.
- if len(ncfg.Sources) > 0 {
- ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...)
- for i, ss := range ncfg.Sources {
- if ss.Domain != "" {
- ncfg.Sources[i] = ss.copy()
- if err := ncfg.Sources[i].convertDomain(); err != nil {
- return nil, err
- }
- }
- }
- }
-
- req, err := json.Marshal(ncfg)
- if err != nil {
- return nil, err
- }
-
- createSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamCreateT, cfg.Name))
- var resp streamInfoResponse
-
- if _, err = js.apiRequestJSON(ctx, createSubject, &resp, req); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeStreamNameInUse {
- return nil, ErrStreamNameAlreadyInUse
- }
- return nil, resp.Error
- }
-
- // check that input subject transform (if used) is reflected in the returned StreamInfo
- if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil {
- return nil, ErrStreamSubjectTransformNotSupported
- }
-
- if len(cfg.Sources) != 0 {
- if len(cfg.Sources) != len(resp.Config.Sources) {
- return nil, ErrStreamSourceNotSupported
- }
- for i := range cfg.Sources {
- if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 {
- return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported
- }
- }
- }
-
- return &stream{
- jetStream: js,
- name: cfg.Name,
- info: resp.StreamInfo,
- }, nil
-}
-
-// If we have a Domain, convert to the appropriate ext.APIPrefix.
-// This will change the stream source, so should be a copy passed in.
-func (ss *StreamSource) convertDomain() error {
- if ss.Domain == "" {
- return nil
- }
- if ss.External != nil {
- return errors.New("nats: domain and external are both set")
- }
- ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)}
- return nil
-}
-
-// Helper for copying when we do not want to change user's version.
-func (ss *StreamSource) copy() *StreamSource {
- nss := *ss
- // Check pointers
- if ss.OptStartTime != nil {
- t := *ss.OptStartTime
- nss.OptStartTime = &t
- }
- if ss.External != nil {
- ext := *ss.External
- nss.External = &ext
- }
- return &nss
-}
-
-// UpdateStream updates an existing stream. If stream does not exist,
-// ErrStreamNotFound is returned.
-func (js *jetStream) UpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) {
- if err := validateStreamName(cfg.Name); err != nil {
- return nil, err
- }
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
-
- req, err := json.Marshal(cfg)
- if err != nil {
- return nil, err
- }
-
- updateSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamUpdateT, cfg.Name))
- var resp streamInfoResponse
-
- if _, err = js.apiRequestJSON(ctx, updateSubject, &resp, req); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
- return nil, ErrStreamNotFound
- }
- return nil, resp.Error
- }
-
- // check that input subject transform (if used) is reflected in the returned StreamInfo
- if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil {
- return nil, ErrStreamSubjectTransformNotSupported
- }
-
- if len(cfg.Sources) != 0 {
- if len(cfg.Sources) != len(resp.Config.Sources) {
- return nil, ErrStreamSourceNotSupported
- }
- for i := range cfg.Sources {
- if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 {
- return nil, ErrStreamSourceMultipleFilterSubjectsNotSupported
- }
- }
- }
-
- return &stream{
- jetStream: js,
- name: cfg.Name,
- info: resp.StreamInfo,
- }, nil
-}
-
-// CreateOrUpdateStream creates a stream with given config. If stream
-// already exists, it will be updated (if possible).
-func (js *jetStream) CreateOrUpdateStream(ctx context.Context, cfg StreamConfig) (Stream, error) {
- s, err := js.UpdateStream(ctx, cfg)
- if err != nil {
- if !errors.Is(err, ErrStreamNotFound) {
- return nil, err
- }
- return js.CreateStream(ctx, cfg)
- }
-
- return s, nil
-}
-
-// Stream fetches [StreamInfo] and returns a [Stream] interface for a given stream name.
-// If stream does not exist, ErrStreamNotFound is returned.
-func (js *jetStream) Stream(ctx context.Context, name string) (Stream, error) {
- if err := validateStreamName(name); err != nil {
- return nil, err
- }
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- infoSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamInfoT, name))
-
- var resp streamInfoResponse
-
- if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
- return nil, ErrStreamNotFound
- }
- return nil, resp.Error
- }
- return &stream{
- jetStream: js,
- name: name,
- info: resp.StreamInfo,
- }, nil
-}
-
-// DeleteStream removes a stream with given name
-func (js *jetStream) DeleteStream(ctx context.Context, name string) error {
- if err := validateStreamName(name); err != nil {
- return err
- }
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- deleteSubject := apiSubj(js.apiPrefix, fmt.Sprintf(apiStreamDeleteT, name))
- var resp streamDeleteResponse
-
- if _, err := js.apiRequestJSON(ctx, deleteSubject, &resp); err != nil {
- return err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
- return ErrStreamNotFound
- }
- return resp.Error
- }
- return nil
-}
-
-// CreateOrUpdateConsumer creates a consumer on a given stream with
-// given config. If consumer already exists, it will be updated (if
-// possible). Consumer interface is returned, allowing to operate on a
-// consumer (e.g. fetch messages).
-func (js *jetStream) CreateOrUpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) {
- if err := validateStreamName(stream); err != nil {
- return nil, err
- }
- return upsertConsumer(ctx, js, stream, cfg, consumerActionCreateOrUpdate)
-}
-
-// CreateConsumer creates a consumer on a given stream with given
-// config. If consumer already exists and the provided configuration
-// differs from its configuration, ErrConsumerExists is returned. If the
-// provided configuration is the same as the existing consumer, the
-// existing consumer is returned. Consumer interface is returned,
-// allowing to operate on a consumer (e.g. fetch messages).
-func (js *jetStream) CreateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) {
- if err := validateStreamName(stream); err != nil {
- return nil, err
- }
- return upsertConsumer(ctx, js, stream, cfg, consumerActionCreate)
-}
-
-// UpdateConsumer updates an existing consumer. If consumer does not
-// exist, ErrConsumerDoesNotExist is returned. Consumer interface is
-// returned, allowing to operate on a consumer (e.g. fetch messages).
-func (js *jetStream) UpdateConsumer(ctx context.Context, stream string, cfg ConsumerConfig) (Consumer, error) {
- if err := validateStreamName(stream); err != nil {
- return nil, err
- }
- return upsertConsumer(ctx, js, stream, cfg, consumerActionUpdate)
-}
-
-// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
-// are managed by the library and provide a simple way to consume
-// messages from a stream. Ordered consumers are ephemeral in-memory
-// pull consumers and are resilient to deletes and restarts.
-func (js *jetStream) OrderedConsumer(ctx context.Context, stream string, cfg OrderedConsumerConfig) (Consumer, error) {
- if err := validateStreamName(stream); err != nil {
- return nil, err
- }
- oc := &orderedConsumer{
- jetStream: js,
- cfg: &cfg,
- stream: stream,
- namePrefix: nuid.Next(),
- doReset: make(chan struct{}, 1),
- }
- if cfg.OptStartSeq != 0 {
- oc.cursor.streamSeq = cfg.OptStartSeq - 1
- }
- err := oc.reset()
- if err != nil {
- return nil, err
- }
-
- return oc, nil
-}
-
-// Consumer returns an interface to an existing consumer, allowing processing
-// of messages. If consumer does not exist, ErrConsumerNotFound is
-// returned.
-func (js *jetStream) Consumer(ctx context.Context, stream string, name string) (Consumer, error) {
- if err := validateStreamName(stream); err != nil {
- return nil, err
- }
- return getConsumer(ctx, js, stream, name)
-}
-
-// DeleteConsumer removes a consumer with given name from a stream.
-// If consumer does not exist, ErrConsumerNotFound is returned.
-func (js *jetStream) DeleteConsumer(ctx context.Context, stream string, name string) error {
- if err := validateStreamName(stream); err != nil {
- return err
- }
- return deleteConsumer(ctx, js, stream, name)
-}
-
-func validateStreamName(stream string) error {
- if stream == "" {
- return ErrStreamNameRequired
- }
- if strings.ContainsAny(stream, ">*. /\\") {
- return fmt.Errorf("%w: '%s'", ErrInvalidStreamName, stream)
- }
- return nil
-}
-
-func validateSubject(subject string) error {
- if subject == "" {
- return fmt.Errorf("%w: %s", ErrInvalidSubject, "subject cannot be empty")
- }
- if subject[0] == '.' || subject[len(subject)-1] == '.' || !subjectRegexp.MatchString(subject) {
- return fmt.Errorf("%w: %s", ErrInvalidSubject, subject)
- }
- return nil
-}
-
-// AccountInfo fetches account information from the server, containing details
-// about the account associated with this JetStream connection. If account is
-// not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned.
-//
-// If the server does not have JetStream enabled, ErrJetStreamNotEnabled is
-// returned (for a single server setup). For clustered topologies, AccountInfo
-// will time out.
-func (js *jetStream) AccountInfo(ctx context.Context) (*AccountInfo, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- var resp accountInfoResponse
-
- infoSubject := apiSubj(js.apiPrefix, apiAccountInfo)
- if _, err := js.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
- if errors.Is(err, nats.ErrNoResponders) {
- return nil, ErrJetStreamNotEnabled
- }
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabledForAccount {
- return nil, ErrJetStreamNotEnabledForAccount
- }
- if resp.Error.ErrorCode == JSErrCodeJetStreamNotEnabled {
- return nil, ErrJetStreamNotEnabled
- }
- return nil, resp.Error
- }
-
- return &resp.AccountInfo, nil
-}
-
-// ListStreams returns StreamInfoLister, enabling iterating over a
-// channel of stream infos.
-func (js *jetStream) ListStreams(ctx context.Context, opts ...StreamListOpt) StreamInfoLister {
- l := &streamLister{
- js: js,
- streams: make(chan *StreamInfo),
- }
- var streamsReq streamsRequest
- for _, opt := range opts {
- if err := opt(&streamsReq); err != nil {
- l.err = err
- close(l.streams)
- return l
- }
- }
- go func() {
- defer close(l.streams)
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- for {
- page, err := l.streamInfos(ctx, streamsReq)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- l.err = err
- return
- }
- for _, info := range page {
- select {
- case l.streams <- info:
- case <-ctx.Done():
- l.err = ctx.Err()
- return
- }
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
-
- return l
-}
-
-// Info returns a channel allowing retrieval of stream infos returned by [ListStreams]
-func (s *streamLister) Info() <-chan *StreamInfo {
- return s.streams
-}
-
-// Err returns an error channel which will be populated with error from [ListStreams] or [StreamNames] request
-func (s *streamLister) Err() error {
- return s.err
-}
-
-// StreamNames returns a StreamNameLister, enabling iterating over a
-// channel of stream names.
-func (js *jetStream) StreamNames(ctx context.Context, opts ...StreamListOpt) StreamNameLister {
- l := &streamLister{
- js: js,
- names: make(chan string),
- }
- var streamsReq streamsRequest
- for _, opt := range opts {
- if err := opt(&streamsReq); err != nil {
- l.err = err
- close(l.streams)
- return l
- }
- }
- go func() {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- defer close(l.names)
- for {
- page, err := l.streamNames(ctx, streamsReq)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- l.err = err
- return
- }
- for _, info := range page {
- select {
- case l.names <- info:
- case <-ctx.Done():
- l.err = ctx.Err()
- return
- }
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
-
- return l
-}
-
-// StreamNameBySubject returns a stream name stream listening on given
-// subject. If no stream is bound to given subject, ErrStreamNotFound
-// is returned.
-func (js *jetStream) StreamNameBySubject(ctx context.Context, subject string) (string, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- if err := validateSubject(subject); err != nil {
- return "", err
- }
- streamsSubject := apiSubj(js.apiPrefix, apiStreams)
-
- r := &streamsRequest{Subject: subject}
- req, err := json.Marshal(r)
- if err != nil {
- return "", err
- }
- var resp streamNamesResponse
- _, err = js.apiRequestJSON(ctx, streamsSubject, &resp, req)
- if err != nil {
- return "", err
- }
- if resp.Error != nil {
- return "", resp.Error
- }
- if len(resp.Streams) == 0 {
- return "", ErrStreamNotFound
- }
-
- return resp.Streams[0], nil
-}
-
-// Name returns a channel allowing retrieval of stream names returned by [StreamNames]
-func (s *streamLister) Name() <-chan string {
- return s.names
-}
-
-// infos fetches the next [StreamInfo] page
-func (s *streamLister) streamInfos(ctx context.Context, streamsReq streamsRequest) ([]*StreamInfo, error) {
- if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
- return nil, ErrEndOfData
- }
-
- req := streamsRequest{
- apiPagedRequest: apiPagedRequest{
- Offset: s.offset,
- },
- Subject: streamsReq.Subject,
- }
- reqJSON, err := json.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- slSubj := apiSubj(s.js.apiPrefix, apiStreamListT)
- var resp streamListResponse
- _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, reqJSON)
- if err != nil {
- return nil, err
- }
- if resp.Error != nil {
- return nil, resp.Error
- }
-
- s.pageInfo = &resp.apiPaged
- s.offset += len(resp.Streams)
- return resp.Streams, nil
-}
-
-// streamNames fetches the next stream names page
-func (s *streamLister) streamNames(ctx context.Context, streamsReq streamsRequest) ([]string, error) {
- if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
- return nil, ErrEndOfData
- }
-
- req := streamsRequest{
- apiPagedRequest: apiPagedRequest{
- Offset: s.offset,
- },
- Subject: streamsReq.Subject,
- }
- reqJSON, err := json.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- slSubj := apiSubj(s.js.apiPrefix, apiStreams)
- var resp streamNamesResponse
- _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, reqJSON)
- if err != nil {
- return nil, err
- }
- if resp.Error != nil {
- return nil, resp.Error
- }
-
- s.pageInfo = &resp.apiPaged
- s.offset += len(resp.Streams)
- return resp.Streams, nil
-}
-
-// wrapContextWithoutDeadline wraps context without deadline with default timeout.
-// If deadline is already set, it will be returned as is, and cancel() will be nil.
-// Caller should check if cancel() is nil before calling it.
-func wrapContextWithoutDeadline(ctx context.Context) (context.Context, context.CancelFunc) {
- if _, ok := ctx.Deadline(); ok {
- return ctx, nil
- }
- return context.WithTimeout(ctx, defaultAPITimeout)
-}
-
-func (js *jetStream) cleanupReplySub() {
- if js.publisher == nil {
- return
- }
- js.publisher.Lock()
- if js.publisher.replySub != nil {
- js.publisher.replySub.Unsubscribe()
- js.publisher.replySub = nil
- }
- if js.publisher.connStatusCh != nil {
- close(js.publisher.connStatusCh)
- js.publisher.connStatusCh = nil
- }
- js.publisher.Unlock()
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go
deleted file mode 100644
index a08d203..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/jetstream_options.go
+++ /dev/null
@@ -1,408 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "fmt"
- "time"
-)
-
-type pullOptFunc func(*consumeOpts) error
-
-func (fn pullOptFunc) configureConsume(opts *consumeOpts) error {
- return fn(opts)
-}
-
-func (fn pullOptFunc) configureMessages(opts *consumeOpts) error {
- return fn(opts)
-}
-
-// WithClientTrace enables request/response API calls tracing.
-func WithClientTrace(ct *ClientTrace) JetStreamOpt {
- return func(opts *jsOpts) error {
- opts.clientTrace = ct
- return nil
- }
-}
-
-// WithPublishAsyncErrHandler sets error handler for async message publish.
-func WithPublishAsyncErrHandler(cb MsgErrHandler) JetStreamOpt {
- return func(opts *jsOpts) error {
- opts.publisherOpts.aecb = cb
- return nil
- }
-}
-
-// WithPublishAsyncMaxPending sets the maximum outstanding async publishes that
-// can be inflight at one time.
-func WithPublishAsyncMaxPending(max int) JetStreamOpt {
- return func(opts *jsOpts) error {
- if max < 1 {
- return fmt.Errorf("%w: max ack pending should be >= 1", ErrInvalidOption)
- }
- opts.publisherOpts.maxpa = max
- return nil
- }
-}
-
-// WithPurgeSubject sets a specific subject for which messages on a stream will
-// be purged
-func WithPurgeSubject(subject string) StreamPurgeOpt {
- return func(req *StreamPurgeRequest) error {
- req.Subject = subject
- return nil
- }
-}
-
-// WithPurgeSequence is used to set a specific sequence number up to which (but
-// not including) messages will be purged from a stream Can be combined with
-// [WithPurgeSubject] option, but not with [WithPurgeKeep]
-func WithPurgeSequence(sequence uint64) StreamPurgeOpt {
- return func(req *StreamPurgeRequest) error {
- if req.Keep != 0 {
- return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption)
- }
- req.Sequence = sequence
- return nil
- }
-}
-
-// WithPurgeKeep sets the number of messages to be kept in the stream after
-// purge. Can be combined with [WithPurgeSubject] option, but not with
-// [WithPurgeSequence]
-func WithPurgeKeep(keep uint64) StreamPurgeOpt {
- return func(req *StreamPurgeRequest) error {
- if req.Sequence != 0 {
- return fmt.Errorf("%w: both 'keep' and 'sequence' cannot be provided in purge request", ErrInvalidOption)
- }
- req.Keep = keep
- return nil
- }
-}
-
-// WithGetMsgSubject sets the stream subject from which the message should be
-// retrieved. Server will return a first message with a seq >= to the input seq
-// that has the specified subject.
-func WithGetMsgSubject(subject string) GetMsgOpt {
- return func(req *apiMsgGetRequest) error {
- req.NextFor = subject
- return nil
- }
-}
-
-// PullMaxMessages limits the number of messages to be buffered in the client.
-// If not provided, a default of 500 messages will be used.
-// This option is exclusive with PullMaxBytes.
-type PullMaxMessages int
-
-func (max PullMaxMessages) configureConsume(opts *consumeOpts) error {
- if max <= 0 {
- return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
- }
- opts.MaxMessages = int(max)
- return nil
-}
-
-func (max PullMaxMessages) configureMessages(opts *consumeOpts) error {
- if max <= 0 {
- return fmt.Errorf("%w: maxMessages size must be at least 1", ErrInvalidOption)
- }
- opts.MaxMessages = int(max)
- return nil
-}
-
-// PullExpiry sets timeout on a single pull request, waiting until at least one
-// message is available.
-// If not provided, a default of 30 seconds will be used.
-type PullExpiry time.Duration
-
-func (exp PullExpiry) configureConsume(opts *consumeOpts) error {
- expiry := time.Duration(exp)
- if expiry < time.Second {
- return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption)
- }
- opts.Expires = expiry
- return nil
-}
-
-func (exp PullExpiry) configureMessages(opts *consumeOpts) error {
- expiry := time.Duration(exp)
- if expiry < time.Second {
- return fmt.Errorf("%w: expires value must be at least 1s", ErrInvalidOption)
- }
- opts.Expires = expiry
- return nil
-}
-
-// PullMaxBytes limits the number of bytes to be buffered in the client.
-// If not provided, the limit is not set (max messages will be used instead).
-// This option is exclusive with PullMaxMessages.
-type PullMaxBytes int
-
-func (max PullMaxBytes) configureConsume(opts *consumeOpts) error {
- if max <= 0 {
- return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption)
- }
- opts.MaxBytes = int(max)
- return nil
-}
-
-func (max PullMaxBytes) configureMessages(opts *consumeOpts) error {
- if max <= 0 {
- return fmt.Errorf("%w: max bytes must be greater then 0", ErrInvalidOption)
- }
- opts.MaxBytes = int(max)
- return nil
-}
-
-// PullThresholdMessages sets the message count on which Consume will trigger
-// new pull request to the server. Defaults to 50% of MaxMessages.
-type PullThresholdMessages int
-
-func (t PullThresholdMessages) configureConsume(opts *consumeOpts) error {
- opts.ThresholdMessages = int(t)
- return nil
-}
-
-func (t PullThresholdMessages) configureMessages(opts *consumeOpts) error {
- opts.ThresholdMessages = int(t)
- return nil
-}
-
-// PullThresholdBytes sets the byte count on which Consume will trigger
-// new pull request to the server. Defaults to 50% of MaxBytes (if set).
-type PullThresholdBytes int
-
-func (t PullThresholdBytes) configureConsume(opts *consumeOpts) error {
- opts.ThresholdBytes = int(t)
- return nil
-}
-
-func (t PullThresholdBytes) configureMessages(opts *consumeOpts) error {
- opts.ThresholdBytes = int(t)
- return nil
-}
-
-// PullHeartbeat sets the idle heartbeat duration for a pull subscription
-// If a client does not receive a heartbeat message from a stream for more
-// than the idle heartbeat setting, the subscription will be removed
-// and error will be passed to the message handler.
-// If not provided, a default PullExpiry / 2 will be used (capped at 30 seconds)
-type PullHeartbeat time.Duration
-
-func (hb PullHeartbeat) configureConsume(opts *consumeOpts) error {
- hbTime := time.Duration(hb)
- if hbTime < 500*time.Millisecond || hbTime > 30*time.Second {
- return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption)
- }
- opts.Heartbeat = hbTime
- return nil
-}
-
-func (hb PullHeartbeat) configureMessages(opts *consumeOpts) error {
- hbTime := time.Duration(hb)
- if hbTime < 500*time.Millisecond || hbTime > 30*time.Second {
- return fmt.Errorf("%w: idle_heartbeat value must be within 500ms-30s range", ErrInvalidOption)
- }
- opts.Heartbeat = hbTime
- return nil
-}
-
-// StopAfter sets the number of messages after which the consumer is
-// automatically stopped and no more messages are pulled from the server.
-type StopAfter int
-
-func (nMsgs StopAfter) configureConsume(opts *consumeOpts) error {
- if nMsgs <= 0 {
- return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption)
- }
- opts.StopAfter = int(nMsgs)
- return nil
-}
-
-func (nMsgs StopAfter) configureMessages(opts *consumeOpts) error {
- if nMsgs <= 0 {
- return fmt.Errorf("%w: auto stop after value cannot be less than 1", ErrInvalidOption)
- }
- opts.StopAfter = int(nMsgs)
- return nil
-}
-
-// ConsumeErrHandler sets custom error handler invoked when an error was
-// encountered while consuming messages It will be invoked for both terminal
-// (Consumer Deleted, invalid request body) and non-terminal (e.g. missing
-// heartbeats) errors.
-func ConsumeErrHandler(cb ConsumeErrHandlerFunc) PullConsumeOpt {
- return pullOptFunc(func(cfg *consumeOpts) error {
- cfg.ErrHandler = cb
- return nil
- })
-}
-
-// WithMessagesErrOnMissingHeartbeat sets whether a missing heartbeat error
-// should be reported when calling [MessagesContext.Next] (Default: true).
-func WithMessagesErrOnMissingHeartbeat(hbErr bool) PullMessagesOpt {
- return pullOptFunc(func(cfg *consumeOpts) error {
- cfg.ReportMissingHeartbeats = hbErr
- return nil
- })
-}
-
-// FetchMaxWait sets custom timeout for fetching predefined batch of messages.
-//
-// If not provided, a default of 30 seconds will be used.
-func FetchMaxWait(timeout time.Duration) FetchOpt {
- return func(req *pullRequest) error {
- if timeout <= 0 {
- return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption)
- }
- req.Expires = timeout
- return nil
- }
-}
-
-// FetchHeartbeat sets custom heartbeat for individual fetch request. If a
-// client does not receive a heartbeat message from a stream for more than 2
-// times the idle heartbeat setting, Fetch will return [ErrNoHeartbeat].
-//
-// Heartbeat value has to be lower than FetchMaxWait / 2.
-//
-// If not provided, heartbeat will is set to 5s for requests with FetchMaxWait > 10s
-// and disabled otherwise.
-func FetchHeartbeat(hb time.Duration) FetchOpt {
- return func(req *pullRequest) error {
- if hb <= 0 {
- return fmt.Errorf("%w: timeout value must be greater than 0", ErrInvalidOption)
- }
- req.Heartbeat = hb
- return nil
- }
-}
-
-// WithDeletedDetails can be used to display the information about messages
-// deleted from a stream on a stream info request
-func WithDeletedDetails(deletedDetails bool) StreamInfoOpt {
- return func(req *streamInfoRequest) error {
- req.DeletedDetails = deletedDetails
- return nil
- }
-}
-
-// WithSubjectFilter can be used to display the information about messages
-// stored on given subjects.
-// NOTE: if the subject filter matches over 100k
-// subjects, this will result in multiple requests to the server to retrieve all
-// the information, and all of the returned subjects will be kept in memory.
-func WithSubjectFilter(subject string) StreamInfoOpt {
- return func(req *streamInfoRequest) error {
- req.SubjectFilter = subject
- return nil
- }
-}
-
-// WithStreamListSubject can be used to filter results of ListStreams and
-// StreamNames requests to only streams that have given subject in their
-// configuration.
-func WithStreamListSubject(subject string) StreamListOpt {
- return func(req *streamsRequest) error {
- req.Subject = subject
- return nil
- }
-}
-
-// WithMsgID sets the message ID used for deduplication.
-func WithMsgID(id string) PublishOpt {
- return func(opts *pubOpts) error {
- opts.id = id
- return nil
- }
-}
-
-// WithExpectStream sets the expected stream the message should be published to.
-// If the message is published to a different stream server will reject the
-// message and publish will fail.
-func WithExpectStream(stream string) PublishOpt {
- return func(opts *pubOpts) error {
- opts.stream = stream
- return nil
- }
-}
-
-// WithExpectLastSequence sets the expected sequence number the last message
-// on a stream should have. If the last message has a different sequence number
-// server will reject the message and publish will fail.
-func WithExpectLastSequence(seq uint64) PublishOpt {
- return func(opts *pubOpts) error {
- opts.lastSeq = &seq
- return nil
- }
-}
-
-// WithExpectLastSequencePerSubject sets the expected sequence number the last
-// message on a subject the message is published to. If the last message on a
-// subject has a different sequence number server will reject the message and
-// publish will fail.
-func WithExpectLastSequencePerSubject(seq uint64) PublishOpt {
- return func(opts *pubOpts) error {
- opts.lastSubjectSeq = &seq
- return nil
- }
-}
-
-// WithExpectLastMsgID sets the expected message ID the last message on a stream
-// should have. If the last message has a different message ID server will
-// reject the message and publish will fail.
-func WithExpectLastMsgID(id string) PublishOpt {
- return func(opts *pubOpts) error {
- opts.lastMsgID = id
- return nil
- }
-}
-
-// WithRetryWait sets the retry wait time when ErrNoResponders is encountered.
-// Defaults to 250ms.
-func WithRetryWait(dur time.Duration) PublishOpt {
- return func(opts *pubOpts) error {
- if dur <= 0 {
- return fmt.Errorf("%w: retry wait should be more than 0", ErrInvalidOption)
- }
- opts.retryWait = dur
- return nil
- }
-}
-
-// WithRetryAttempts sets the retry number of attempts when ErrNoResponders is
-// encountered. Defaults to 2
-func WithRetryAttempts(num int) PublishOpt {
- return func(opts *pubOpts) error {
- if num < 0 {
- return fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidOption)
- }
- opts.retryAttempts = num
- return nil
- }
-}
-
-// WithStallWait sets the max wait when the producer becomes stall producing
-// messages. If a publish call is blocked for this long, ErrTooManyStalledMsgs
-// is returned.
-func WithStallWait(ttl time.Duration) PublishOpt {
- return func(opts *pubOpts) error {
- if ttl <= 0 {
- return fmt.Errorf("%w: stall wait should be more than 0", ErrInvalidOption)
- }
- opts.stallWait = ttl
- return nil
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/kv.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/kv.go
deleted file mode 100644
index 7a026a2..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/kv.go
+++ /dev/null
@@ -1,1376 +0,0 @@
-// Copyright 2023-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "errors"
- "fmt"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nats.go/internal/parser"
-)
-
-type (
- // KeyValueManager is used to manage KeyValue stores. It provides methods to
- // create, delete, and retrieve KeyValue stores.
- KeyValueManager interface {
- // KeyValue will lookup and bind to an existing KeyValue store.
- //
- // If the KeyValue store with given name does not exist,
- // ErrBucketNotFound will be returned.
- KeyValue(ctx context.Context, bucket string) (KeyValue, error)
-
- // CreateKeyValue will create a KeyValue store with the given
- // configuration.
- //
- // If a KeyValue store with the same name already exists and the
- // configuration is different, ErrBucketExists will be returned.
- CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error)
-
- // UpdateKeyValue will update an existing KeyValue store with the given
- // configuration.
- //
- // If a KeyValue store with the given name does not exist, ErrBucketNotFound
- // will be returned.
- UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error)
-
- // CreateOrUpdateKeyValue will create a KeyValue store if it does not
- // exist or update an existing KeyValue store with the given
- // configuration (if possible).
- CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error)
-
- // DeleteKeyValue will delete this KeyValue store.
- //
- // If the KeyValue store with given name does not exist,
- // ErrBucketNotFound will be returned.
- DeleteKeyValue(ctx context.Context, bucket string) error
-
- // KeyValueStoreNames is used to retrieve a list of key value store
- // names. It returns a KeyValueNamesLister exposing a channel to read
- // the names from. The lister will always close the channel when done
- // (either all names have been read or an error occurred) and therefore
- // can be used in range loops.
- KeyValueStoreNames(ctx context.Context) KeyValueNamesLister
-
- // KeyValueStores is used to retrieve a list of key value store
- // statuses. It returns a KeyValueLister exposing a channel to read the
- // statuses from. The lister will always close the channel when done
- // (either all statuses have been read or an error occurred) and
- // therefore can be used in range loops.
- KeyValueStores(ctx context.Context) KeyValueLister
- }
-
- // KeyValue contains methods to operate on a KeyValue store.
- // Using the KeyValue interface, it is possible to:
- //
- // - Get, Put, Create, Update, Delete and Purge a key
- // - Watch for updates to keys
- // - List all keys
- // - Retrieve historical values for a key
- // - Retrieve status and configuration of a key value bucket
- // - Purge all delete markers
- // - Close the KeyValue store
- KeyValue interface {
- // Get returns the latest value for the key. If the key does not exist,
- // ErrKeyNotFound will be returned.
- Get(ctx context.Context, key string) (KeyValueEntry, error)
-
- // GetRevision returns a specific revision value for the key. If the key
- // does not exist or the provided revision does not exists,
- // ErrKeyNotFound will be returned.
- GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error)
-
- // Put will place the new value for the key into the store. If the key
- // does not exist, it will be created. If the key exists, the value will
- // be updated.
- //
- // A key has to consist of alphanumeric characters, dashes, underscores,
- // equal signs, and dots.
- Put(ctx context.Context, key string, value []byte) (uint64, error)
-
- // PutString will place the string for the key into the store. If the
- // key does not exist, it will be created. If the key exists, the value
- // will be updated.
- //
- // A key has to consist of alphanumeric characters, dashes, underscores,
- // equal signs, and dots.
- PutString(ctx context.Context, key string, value string) (uint64, error)
-
- // Create will add the key/value pair if it does not exist. If the key
- // already exists, ErrKeyExists will be returned.
- //
- // A key has to consist of alphanumeric characters, dashes, underscores,
- // equal signs, and dots.
- Create(ctx context.Context, key string, value []byte) (uint64, error)
-
- // Update will update the value if the latest revision matches.
- // If the provided revision is not the latest, Update will return an error.
- Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error)
-
- // Delete will place a delete marker and leave all revisions. A history
- // of a deleted key can still be retrieved by using the History method
- // or a watch on the key. [Delete] is a non-destructive operation and
- // will not remove any previous revisions from the underlying stream.
- //
- // [LastRevision] option can be specified to only perform delete if the
- // latest revision the provided one.
- Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error
-
- // Purge will place a delete marker and remove all previous revisions.
- // Only the latest revision will be preserved (with a delete marker).
- // Unlike [Delete], Purge is a destructive operation and will remove all
- // previous revisions from the underlying streams.
- //
- // [LastRevision] option can be specified to only perform purge if the
- // latest revision the provided one.
- Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error
-
- // Watch for any updates to keys that match the keys argument which
- // could include wildcards. By default, the watcher will send the latest
- // value for each key and all future updates. Watch will send a nil
- // entry when it has received all initial values. There are a few ways
- // to configure the watcher:
- //
- // - IncludeHistory will have the key watcher send all historical values
- // for each key (up to KeyValueMaxHistory).
- // - IgnoreDeletes will have the key watcher not pass any keys with
- // delete markers.
- // - UpdatesOnly will have the key watcher only pass updates on values
- // (without latest values when started).
- // - MetaOnly will have the key watcher retrieve only the entry meta
- // data, not the entry value.
- // - ResumeFromRevision instructs the key watcher to resume from a
- // specific revision number.
- Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error)
-
- // WatchAll will watch for any updates to all keys. It can be configured
- // with the same options as Watch.
- WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error)
-
- // Keys will return all keys. DEPRECATED: Use ListKeys instead to avoid
- // memory issues.
- Keys(ctx context.Context, opts ...WatchOpt) ([]string, error)
-
- // ListKeys will return KeyLister, allowing to retrieve all keys from
- // the key value store in a streaming fashion (on a channel).
- ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error)
-
- // History will return all historical values for the key (up to
- // KeyValueMaxHistory).
- History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error)
-
- // Bucket returns the KV store name.
- Bucket() string
-
- // PurgeDeletes will remove all current delete markers. It can be
- // configured using DeleteMarkersOlderThan option to only remove delete
- // markers older than a certain duration.
- //
- // [PurgeDeletes] is a destructive operation and will remove all entries
- // with delete markers from the underlying stream.
- PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error
-
- // Status retrieves the status and configuration of a bucket.
- Status(ctx context.Context) (KeyValueStatus, error)
- }
-
- // KeyValueConfig is the configuration for a KeyValue store.
- KeyValueConfig struct {
- // Bucket is the name of the KeyValue store. Bucket name has to be
- // unique and can only contain alphanumeric characters, dashes, and
- // underscores.
- Bucket string `json:"bucket"`
-
- // Description is an optional description for the KeyValue store.
- Description string `json:"description,omitempty"`
-
- // MaxValueSize is the maximum size of a value in bytes. If not
- // specified, the default is -1 (unlimited).
- MaxValueSize int32 `json:"max_value_size,omitempty"`
-
- // History is the number of historical values to keep per key. If not
- // specified, the default is 1. Max is 64.
- History uint8 `json:"history,omitempty"`
-
- // TTL is the expiry time for keys. By default, keys do not expire.
- TTL time.Duration `json:"ttl,omitempty"`
-
- // MaxBytes is the maximum size in bytes of the KeyValue store. If not
- // specified, the default is -1 (unlimited).
- MaxBytes int64 `json:"max_bytes,omitempty"`
-
- // Storage is the type of storage to use for the KeyValue store. If not
- // specified, the default is FileStorage.
- Storage StorageType `json:"storage,omitempty"`
-
- // Replicas is the number of replicas to keep for the KeyValue store in
- // clustered jetstream. Defaults to 1, maximum is 5.
- Replicas int `json:"num_replicas,omitempty"`
-
- // Placement is used to declare where the stream should be placed via
- // tags and/or an explicit cluster name.
- Placement *Placement `json:"placement,omitempty"`
-
- // RePublish allows immediate republishing a message to the configured
- // subject after it's stored.
- RePublish *RePublish `json:"republish,omitempty"`
-
- // Mirror defines the consiguration for mirroring another KeyValue
- // store.
- Mirror *StreamSource `json:"mirror,omitempty"`
-
- // Sources defines the configuration for sources of a KeyValue store.
- Sources []*StreamSource `json:"sources,omitempty"`
-
- // Compression sets the underlying stream compression.
- // NOTE: Compression is supported for nats-server 2.10.0+
- Compression bool `json:"compression,omitempty"`
- }
-
- // KeyLister is used to retrieve a list of key value store keys. It returns
- // a channel to read the keys from. The lister will always close the channel
- // when done (either all keys have been read or an error occurred) and
- // therefore can be used in range loops. Stop can be used to stop the lister
- // when not all keys have been read.
- KeyLister interface {
- Keys() <-chan string
- Stop() error
- }
-
- // KeyValueLister is used to retrieve a list of key value stores. It returns
- // a channel to read the KV store statuses from. The lister will always
- // close the channel when done (either all stores have been retrieved or an
- // error occurred) and therefore can be used in range loops. Stop can be
- // used to stop the lister when not all KeyValue stores have been read.
- KeyValueLister interface {
- Status() <-chan KeyValueStatus
- Error() error
- }
-
- // KeyValueNamesLister is used to retrieve a list of key value store names.
- // It returns a channel to read the KV bucket names from. The lister will
- // always close the channel when done (either all stores have been retrieved
- // or an error occurred) and therefore can be used in range loops. Stop can
- // be used to stop the lister when not all bucket names have been read.
- KeyValueNamesLister interface {
- Name() <-chan string
- Error() error
- }
-
- // KeyValueStatus is run-time status about a Key-Value bucket.
- KeyValueStatus interface {
- // Bucket returns the name of the KeyValue store.
- Bucket() string
-
- // Values is how many messages are in the bucket, including historical values.
- Values() uint64
-
- // History returns the configured history kept per key.
- History() int64
-
- // TTL returns the duration for which keys are kept in the bucket.
- TTL() time.Duration
-
- // BackingStore indicates what technology is used for storage of the bucket.
- // Currently only JetStream is supported.
- BackingStore() string
-
- // Bytes returns the size of the bucket in bytes.
- Bytes() uint64
-
- // IsCompressed indicates if the data is compressed on disk.
- IsCompressed() bool
- }
-
- // KeyWatcher is what is returned when doing a watch. It can be used to
- // retrieve updates to keys. If not using UpdatesOnly option, it will also
- // send the latest value for each key. After all initial values have been
- // sent, a nil entry will be sent. Stop can be used to stop the watcher and
- // close the underlying channel. Watcher will not close the channel until
- // Stop is called or connection is closed.
- KeyWatcher interface {
- Updates() <-chan KeyValueEntry
- Stop() error
- }
-
- // KeyValueEntry is a retrieved entry for Get, List or Watch.
- KeyValueEntry interface {
- // Bucket is the bucket the data was loaded from.
- Bucket() string
-
- // Key is the name of the key that was retrieved.
- Key() string
-
- // Value is the retrieved value.
- Value() []byte
-
- // Revision is a unique sequence for this value.
- Revision() uint64
-
- // Created is the time the data was put in the bucket.
- Created() time.Time
-
- // Delta is distance from the latest value (how far the current sequence
- // is from the latest).
- Delta() uint64
-
- // Operation returns Put or Delete or Purge, depending on the manner in
- // which the current revision was created.
- Operation() KeyValueOp
- }
-)
-
-type (
- WatchOpt interface {
- configureWatcher(opts *watchOpts) error
- }
-
- watchOpts struct {
- // Do not send delete markers to the update channel.
- ignoreDeletes bool
- // Include all history per subject, not just last one.
- includeHistory bool
- // Include only updates for keys.
- updatesOnly bool
- // retrieve only the meta data of the entry
- metaOnly bool
- // resumeFromRevision is the revision to resume from.
- resumeFromRevision uint64
- }
-
- // KVDeleteOpt is used to configure delete and purge operations.
- KVDeleteOpt interface {
- configureDelete(opts *deleteOpts) error
- }
-
- deleteOpts struct {
- // Remove all previous revisions.
- purge bool
-
- // Delete only if the latest revision matches.
- revision uint64
- }
-
- // KVPurgeOpt is used to configure PurgeDeletes.
- KVPurgeOpt interface {
- configurePurge(opts *purgeOpts) error
- }
-
- purgeOpts struct {
- dmthr time.Duration // Delete markers threshold
- }
-)
-
-// kvs is the implementation of KeyValue
-type kvs struct {
- name string
- streamName string
- pre string
- putPre string
- pushJS nats.JetStreamContext
- js *jetStream
- stream Stream
- // If true, it means that APIPrefix/Domain was set in the context
- // and we need to add something to some of our high level protocols
- // (such as Put, etc..)
- useJSPfx bool
- // To know if we can use the stream direct get API
- useDirect bool
-}
-
-// KeyValueOp represents the type of KV operation (Put, Delete, Purge). It is a
-// part of KeyValueEntry.
-type KeyValueOp uint8
-
-// Available KeyValueOp values.
-const (
- // KeyValuePut is a set on a revision which creates or updates a value for a
- // key.
- KeyValuePut KeyValueOp = iota
-
- // KeyValueDelete is a set on a revision which adds a delete marker for a
- // key.
- KeyValueDelete
-
- // KeyValuePurge is a set on a revision which removes all previous revisions
- // for a key.
- KeyValuePurge
-)
-
-func (op KeyValueOp) String() string {
- switch op {
- case KeyValuePut:
- return "KeyValuePutOp"
- case KeyValueDelete:
- return "KeyValueDeleteOp"
- case KeyValuePurge:
- return "KeyValuePurgeOp"
- default:
- return "Unknown Operation"
- }
-}
-
-const (
- kvBucketNamePre = "KV_"
- kvBucketNameTmpl = "KV_%s"
- kvSubjectsTmpl = "$KV.%s.>"
- kvSubjectsPreTmpl = "$KV.%s."
- kvSubjectsPreDomainTmpl = "%s.$KV.%s."
- kvNoPending = "0"
-)
-
-const (
- KeyValueMaxHistory = 64
- AllKeys = ">"
- kvLatestRevision = 0
- kvop = "KV-Operation"
- kvdel = "DEL"
- kvpurge = "PURGE"
-)
-
-// Regex for valid keys and buckets.
-var (
- validBucketRe = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`)
- validKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9]+$`)
- validSearchKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9*]*[>]?$`)
-)
-
-func (js *jetStream) KeyValue(ctx context.Context, bucket string) (KeyValue, error) {
- if !bucketValid(bucket) {
- return nil, ErrInvalidBucketName
- }
- streamName := fmt.Sprintf(kvBucketNameTmpl, bucket)
- stream, err := js.Stream(ctx, streamName)
- if err != nil {
- if errors.Is(err, ErrStreamNotFound) {
- err = ErrBucketNotFound
- }
- return nil, err
- }
- // Do some quick sanity checks that this is a correctly formed stream for KV.
- // Max msgs per subject should be > 0.
- if stream.CachedInfo().Config.MaxMsgsPerSubject < 1 {
- return nil, ErrBadBucket
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
-
- return mapStreamToKVS(js, pushJS, stream), nil
-}
-
-func (js *jetStream) CreateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) {
- scfg, err := js.prepareKeyValueConfig(ctx, cfg)
- if err != nil {
- return nil, err
- }
-
- stream, err := js.CreateStream(ctx, scfg)
- if err != nil {
- if errors.Is(err, ErrStreamNameAlreadyInUse) {
- // errors are joined so that backwards compatibility is retained
- // and previous checks for ErrStreamNameAlreadyInUse will still work.
- err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err)
-
- // If we have a failure to add, it could be because we have
- // a config change if the KV was created against before a bug fix
- // that changed the value of discard policy.
- // We will check if the stream exists and if the only difference
- // is the discard policy, we will update the stream.
- // The same logic applies for KVs created pre 2.9.x and
- // the AllowDirect setting.
- if stream, _ = js.Stream(ctx, scfg.Name); stream != nil {
- cfg := stream.CachedInfo().Config
- cfg.Discard = scfg.Discard
- cfg.AllowDirect = scfg.AllowDirect
- if reflect.DeepEqual(cfg, scfg) {
- stream, err = js.UpdateStream(ctx, scfg)
- }
- }
- }
- if err != nil {
- return nil, err
- }
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
-
- return mapStreamToKVS(js, pushJS, stream), nil
-}
-
-func (js *jetStream) UpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) {
- scfg, err := js.prepareKeyValueConfig(ctx, cfg)
- if err != nil {
- return nil, err
- }
-
- stream, err := js.UpdateStream(ctx, scfg)
- if err != nil {
- if errors.Is(err, ErrStreamNotFound) {
- err = fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket)
- }
- return nil, err
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
-
- return mapStreamToKVS(js, pushJS, stream), nil
-}
-
-func (js *jetStream) CreateOrUpdateKeyValue(ctx context.Context, cfg KeyValueConfig) (KeyValue, error) {
- scfg, err := js.prepareKeyValueConfig(ctx, cfg)
- if err != nil {
- return nil, err
- }
-
- stream, err := js.CreateOrUpdateStream(ctx, scfg)
- if err != nil {
- return nil, err
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
-
- return mapStreamToKVS(js, pushJS, stream), nil
-}
-
-func (js *jetStream) prepareKeyValueConfig(ctx context.Context, cfg KeyValueConfig) (StreamConfig, error) {
- if !bucketValid(cfg.Bucket) {
- return StreamConfig{}, ErrInvalidBucketName
- }
- if _, err := js.AccountInfo(ctx); err != nil {
- return StreamConfig{}, err
- }
-
- // Default to 1 for history. Max is 64 for now.
- history := int64(1)
- if cfg.History > 0 {
- if cfg.History > KeyValueMaxHistory {
- return StreamConfig{}, ErrHistoryTooLarge
- }
- history = int64(cfg.History)
- }
-
- replicas := cfg.Replicas
- if replicas == 0 {
- replicas = 1
- }
-
- // We will set explicitly some values so that we can do comparison
- // if we get an "already in use" error and need to check if it is same.
- maxBytes := cfg.MaxBytes
- if maxBytes == 0 {
- maxBytes = -1
- }
- maxMsgSize := cfg.MaxValueSize
- if maxMsgSize == 0 {
- maxMsgSize = -1
- }
- // When stream's MaxAge is not set, server uses 2 minutes as the default
- // for the duplicate window. If MaxAge is set, and lower than 2 minutes,
- // then the duplicate window will be set to that. If MaxAge is greater,
- // we will cap the duplicate window to 2 minutes (to be consistent with
- // previous behavior).
- duplicateWindow := 2 * time.Minute
- if cfg.TTL > 0 && cfg.TTL < duplicateWindow {
- duplicateWindow = cfg.TTL
- }
- var compression StoreCompression
- if cfg.Compression {
- compression = S2Compression
- }
- scfg := StreamConfig{
- Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket),
- Description: cfg.Description,
- MaxMsgsPerSubject: history,
- MaxBytes: maxBytes,
- MaxAge: cfg.TTL,
- MaxMsgSize: maxMsgSize,
- Storage: cfg.Storage,
- Replicas: replicas,
- Placement: cfg.Placement,
- AllowRollup: true,
- DenyDelete: true,
- Duplicates: duplicateWindow,
- MaxMsgs: -1,
- MaxConsumers: -1,
- AllowDirect: true,
- RePublish: cfg.RePublish,
- Compression: compression,
- Discard: DiscardNew,
- }
- if cfg.Mirror != nil {
- // Copy in case we need to make changes so we do not change caller's version.
- m := cfg.Mirror.copy()
- if !strings.HasPrefix(m.Name, kvBucketNamePre) {
- m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name)
- }
- scfg.Mirror = m
- scfg.MirrorDirect = true
- } else if len(cfg.Sources) > 0 {
- // For now we do not allow direct subjects for sources. If that is desired a user could use stream API directly.
- for _, ss := range cfg.Sources {
- var sourceBucketName string
- if strings.HasPrefix(ss.Name, kvBucketNamePre) {
- sourceBucketName = ss.Name[len(kvBucketNamePre):]
- } else {
- sourceBucketName = ss.Name
- ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name)
- }
-
- if ss.External == nil || sourceBucketName != cfg.Bucket {
- ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}}
- }
- scfg.Sources = append(scfg.Sources, ss)
- }
- scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}
- } else {
- scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}
- }
-
- return scfg, nil
-}
-
-// DeleteKeyValue will delete this KeyValue store (JetStream stream).
-func (js *jetStream) DeleteKeyValue(ctx context.Context, bucket string) error {
- if !bucketValid(bucket) {
- return ErrInvalidBucketName
- }
- stream := fmt.Sprintf(kvBucketNameTmpl, bucket)
- if err := js.DeleteStream(ctx, stream); err != nil {
- if errors.Is(err, ErrStreamNotFound) {
- err = errors.Join(fmt.Errorf("%w: %s", ErrBucketNotFound, bucket), err)
- }
- return err
- }
- return nil
-}
-
-// KeyValueStoreNames is used to retrieve a list of key value store names
-func (js *jetStream) KeyValueStoreNames(ctx context.Context) KeyValueNamesLister {
- res := &kvLister{
- kvNames: make(chan string),
- }
- l := &streamLister{js: js}
- streamsReq := streamsRequest{
- Subject: fmt.Sprintf(kvSubjectsTmpl, "*"),
- }
- go func() {
- defer close(res.kvNames)
- for {
- page, err := l.streamNames(ctx, streamsReq)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- res.err = err
- return
- }
- for _, name := range page {
- if !strings.HasPrefix(name, kvBucketNamePre) {
- continue
- }
- res.kvNames <- strings.TrimPrefix(name, kvBucketNamePre)
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
- return res
-}
-
-// KeyValueStores is used to retrieve a list of key value store statuses
-func (js *jetStream) KeyValueStores(ctx context.Context) KeyValueLister {
- res := &kvLister{
- kvs: make(chan KeyValueStatus),
- }
- l := &streamLister{js: js}
- streamsReq := streamsRequest{
- Subject: fmt.Sprintf(kvSubjectsTmpl, "*"),
- }
- go func() {
- defer close(res.kvs)
- for {
- page, err := l.streamInfos(ctx, streamsReq)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- res.err = err
- return
- }
- for _, info := range page {
- if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) {
- continue
- }
- res.kvs <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)}
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
- return res
-}
-
-// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus
-type KeyValueBucketStatus struct {
- nfo *StreamInfo
- bucket string
-}
-
-// Bucket the name of the bucket
-func (s *KeyValueBucketStatus) Bucket() string { return s.bucket }
-
-// Values is how many messages are in the bucket, including historical values
-func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs }
-
-// History returns the configured history kept per key
-func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject }
-
-// TTL is how long the bucket keeps values for
-func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge }
-
-// BackingStore indicates what technology is used for storage of the bucket
-func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" }
-
-// StreamInfo is the stream info retrieved to create the status
-func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo }
-
-// Bytes is the size of the stream
-func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes }
-
-// IsCompressed indicates if the data is compressed on disk
-func (s *KeyValueBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression }
-
-type kvLister struct {
- kvs chan KeyValueStatus
- kvNames chan string
- err error
-}
-
-func (kl *kvLister) Status() <-chan KeyValueStatus {
- return kl.kvs
-}
-
-func (kl *kvLister) Name() <-chan string {
- return kl.kvNames
-}
-
-func (kl *kvLister) Error() error {
- return kl.err
-}
-
-func (js *jetStream) legacyJetStream() (nats.JetStreamContext, error) {
- opts := make([]nats.JSOpt, 0)
- if js.apiPrefix != "" {
- opts = append(opts, nats.APIPrefix(js.apiPrefix))
- }
- if js.clientTrace != nil {
- opts = append(opts, nats.ClientTrace{
- RequestSent: js.clientTrace.RequestSent,
- ResponseReceived: js.clientTrace.ResponseReceived,
- })
- }
- return js.conn.JetStream(opts...)
-}
-
-func bucketValid(bucket string) bool {
- if len(bucket) == 0 {
- return false
- }
- return validBucketRe.MatchString(bucket)
-}
-
-func keyValid(key string) bool {
- if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' {
- return false
- }
- return validKeyRe.MatchString(key)
-}
-
-func searchKeyValid(key string) bool {
- if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' {
- return false
- }
- return validSearchKeyRe.MatchString(key)
-}
-
-func (kv *kvs) get(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) {
- if !keyValid(key) {
- return nil, ErrInvalidKey
- }
-
- var b strings.Builder
- b.WriteString(kv.pre)
- b.WriteString(key)
-
- var m *RawStreamMsg
- var err error
-
- if revision == kvLatestRevision {
- m, err = kv.stream.GetLastMsgForSubject(ctx, b.String())
- } else {
- m, err = kv.stream.GetMsg(ctx, revision)
- // If a sequence was provided, just make sure that the retrieved
- // message subject matches the request.
- if err == nil && m.Subject != b.String() {
- return nil, ErrKeyNotFound
- }
- }
- if err != nil {
- if errors.Is(err, ErrMsgNotFound) {
- err = ErrKeyNotFound
- }
- return nil, err
- }
-
- entry := &kve{
- bucket: kv.name,
- key: key,
- value: m.Data,
- revision: m.Sequence,
- created: m.Time,
- }
-
- // Double check here that this is not a DEL Operation marker.
- if len(m.Header) > 0 {
- switch m.Header.Get(kvop) {
- case kvdel:
- entry.op = KeyValueDelete
- return entry, ErrKeyDeleted
- case kvpurge:
- entry.op = KeyValuePurge
- return entry, ErrKeyDeleted
- }
- }
-
- return entry, nil
-}
-
-// kve is the implementation of KeyValueEntry
-type kve struct {
- bucket string
- key string
- value []byte
- revision uint64
- delta uint64
- created time.Time
- op KeyValueOp
-}
-
-func (e *kve) Bucket() string { return e.bucket }
-func (e *kve) Key() string { return e.key }
-func (e *kve) Value() []byte { return e.value }
-func (e *kve) Revision() uint64 { return e.revision }
-func (e *kve) Created() time.Time { return e.created }
-func (e *kve) Delta() uint64 { return e.delta }
-func (e *kve) Operation() KeyValueOp { return e.op }
-
-// Get returns the latest value for the key.
-func (kv *kvs) Get(ctx context.Context, key string) (KeyValueEntry, error) {
- e, err := kv.get(ctx, key, kvLatestRevision)
- if err != nil {
- if errors.Is(err, ErrKeyDeleted) {
- return nil, ErrKeyNotFound
- }
- return nil, err
- }
-
- return e, nil
-}
-
-// GetRevision returns a specific revision value for the key.
-func (kv *kvs) GetRevision(ctx context.Context, key string, revision uint64) (KeyValueEntry, error) {
- e, err := kv.get(ctx, key, revision)
- if err != nil {
- if errors.Is(err, ErrKeyDeleted) {
- return nil, ErrKeyNotFound
- }
- return nil, err
- }
-
- return e, nil
-}
-
-// Put will place the new value for the key into the store.
-func (kv *kvs) Put(ctx context.Context, key string, value []byte) (uint64, error) {
- if !keyValid(key) {
- return 0, ErrInvalidKey
- }
-
- var b strings.Builder
- if kv.useJSPfx {
- b.WriteString(kv.js.apiPrefix)
- }
- if kv.putPre != "" {
- b.WriteString(kv.putPre)
- } else {
- b.WriteString(kv.pre)
- }
- b.WriteString(key)
-
- pa, err := kv.js.Publish(ctx, b.String(), value)
- if err != nil {
- return 0, err
- }
- return pa.Sequence, err
-}
-
-// PutString will place the string for the key into the store.
-func (kv *kvs) PutString(ctx context.Context, key string, value string) (uint64, error) {
- return kv.Put(ctx, key, []byte(value))
-}
-
-// Create will add the key/value pair iff it does not exist.
-func (kv *kvs) Create(ctx context.Context, key string, value []byte) (revision uint64, err error) {
- v, err := kv.Update(ctx, key, value, 0)
- if err == nil {
- return v, nil
- }
-
- if e, err := kv.get(ctx, key, kvLatestRevision); errors.Is(err, ErrKeyDeleted) {
- return kv.Update(ctx, key, value, e.Revision())
- }
-
- // Check if the expected last subject sequence is not zero which implies
- // the key already exists.
- if errors.Is(err, ErrKeyExists) {
- jserr := ErrKeyExists.(*jsError)
- return 0, fmt.Errorf("%w: %s", err, jserr.message)
- }
-
- return 0, err
-}
-
-// Update will update the value if the latest revision matches.
-func (kv *kvs) Update(ctx context.Context, key string, value []byte, revision uint64) (uint64, error) {
- if !keyValid(key) {
- return 0, ErrInvalidKey
- }
-
- var b strings.Builder
- if kv.useJSPfx {
- b.WriteString(kv.js.apiPrefix)
- }
- b.WriteString(kv.pre)
- b.WriteString(key)
-
- m := nats.Msg{Subject: b.String(), Header: nats.Header{}, Data: value}
- m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(revision, 10))
-
- pa, err := kv.js.PublishMsg(ctx, &m)
- if err != nil {
- return 0, err
- }
- return pa.Sequence, err
-}
-
-// Delete will place a delete marker and leave all revisions.
-func (kv *kvs) Delete(ctx context.Context, key string, opts ...KVDeleteOpt) error {
- if !keyValid(key) {
- return ErrInvalidKey
- }
-
- var b strings.Builder
- if kv.useJSPfx {
- b.WriteString(kv.js.apiPrefix)
- }
- if kv.putPre != "" {
- b.WriteString(kv.putPre)
- } else {
- b.WriteString(kv.pre)
- }
- b.WriteString(key)
-
- // DEL op marker. For watch functionality.
- m := nats.NewMsg(b.String())
-
- var o deleteOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureDelete(&o); err != nil {
- return err
- }
- }
- }
-
- if o.purge {
- m.Header.Set(kvop, kvpurge)
- m.Header.Set(MsgRollup, MsgRollupSubject)
- } else {
- m.Header.Set(kvop, kvdel)
- }
-
- if o.revision != 0 {
- m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(o.revision, 10))
- }
-
- _, err := kv.js.PublishMsg(ctx, m)
- return err
-}
-
-// Purge will place a delete marker and remove all previous revisions.
-func (kv *kvs) Purge(ctx context.Context, key string, opts ...KVDeleteOpt) error {
- return kv.Delete(ctx, key, append(opts, purge())...)
-}
-
-// purge removes all previous revisions.
-func purge() KVDeleteOpt {
- return deleteOptFn(func(opts *deleteOpts) error {
- opts.purge = true
- return nil
- })
-}
-
-// Implementation for Watch
-type watcher struct {
- mu sync.Mutex
- updates chan KeyValueEntry
- sub *nats.Subscription
- initDone bool
- initPending uint64
- received uint64
-}
-
-// Updates returns the interior channel.
-func (w *watcher) Updates() <-chan KeyValueEntry {
- if w == nil {
- return nil
- }
- return w.updates
-}
-
-// Stop will unsubscribe from the watcher.
-func (w *watcher) Stop() error {
- if w == nil {
- return nil
- }
- return w.sub.Unsubscribe()
-}
-
-// Watch for any updates to keys that match the keys argument which could include wildcards.
-// Watch will send a nil entry when it has received all initial values.
-func (kv *kvs) Watch(ctx context.Context, keys string, opts ...WatchOpt) (KeyWatcher, error) {
- if !searchKeyValid(keys) {
- return nil, fmt.Errorf("%w: %s", ErrInvalidKey, "keys cannot be empty and must be a valid NATS subject")
- }
- var o watchOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureWatcher(&o); err != nil {
- return nil, err
- }
- }
- }
-
- // Could be a pattern so don't check for validity as we normally do.
- var b strings.Builder
- b.WriteString(kv.pre)
- b.WriteString(keys)
- keys = b.String()
-
- // We will block below on placing items on the chan. That is by design.
- w := &watcher{updates: make(chan KeyValueEntry, 256)}
-
- update := func(m *nats.Msg) {
- tokens, err := parser.GetMetadataFields(m.Reply)
- if err != nil {
- return
- }
- if len(m.Subject) <= len(kv.pre) {
- return
- }
- subj := m.Subject[len(kv.pre):]
-
- var op KeyValueOp
- if len(m.Header) > 0 {
- switch m.Header.Get(kvop) {
- case kvdel:
- op = KeyValueDelete
- case kvpurge:
- op = KeyValuePurge
- }
- }
- delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos])
- w.mu.Lock()
- defer w.mu.Unlock()
- if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) {
- entry := &kve{
- bucket: kv.name,
- key: subj,
- value: m.Data,
- revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]),
- created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))),
- delta: delta,
- op: op,
- }
- w.updates <- entry
- }
- // Check if done and initial values.
- if !w.initDone {
- w.received++
- // We set this on the first trip through..
- if w.initPending == 0 {
- w.initPending = delta
- }
- if w.received > w.initPending || delta == 0 {
- w.initDone = true
- w.updates <- nil
- }
- }
- }
-
- // Used ordered consumer to deliver results.
- subOpts := []nats.SubOpt{nats.BindStream(kv.streamName), nats.OrderedConsumer()}
- if !o.includeHistory {
- subOpts = append(subOpts, nats.DeliverLastPerSubject())
- }
- if o.updatesOnly {
- subOpts = append(subOpts, nats.DeliverNew())
- }
- if o.metaOnly {
- subOpts = append(subOpts, nats.HeadersOnly())
- }
- if o.resumeFromRevision > 0 {
- subOpts = append(subOpts, nats.StartSequence(o.resumeFromRevision))
- }
- subOpts = append(subOpts, nats.Context(ctx))
- // Create the sub and rest of initialization under the lock.
- // We want to prevent the race between this code and the
- // update() callback.
- w.mu.Lock()
- defer w.mu.Unlock()
- sub, err := kv.pushJS.Subscribe(keys, update, subOpts...)
- if err != nil {
- return nil, err
- }
- sub.SetClosedHandler(func(_ string) {
- close(w.updates)
- })
- // If there were no pending messages at the time of the creation
- // of the consumer, send the marker.
- // Skip if UpdatesOnly() is set, since there will never be updates initially.
- if !o.updatesOnly {
- initialPending, err := sub.InitialConsumerPending()
- if err == nil && initialPending == 0 {
- w.initDone = true
- w.updates <- nil
- }
- } else {
- // if UpdatesOnly was used, mark initialization as complete
- w.initDone = true
- }
- w.sub = sub
- return w, nil
-}
-
-// WatchAll will invoke the callback for all updates.
-func (kv *kvs) WatchAll(ctx context.Context, opts ...WatchOpt) (KeyWatcher, error) {
- return kv.Watch(ctx, AllKeys, opts...)
-}
-
-// Keys will return all keys.
-func (kv *kvs) Keys(ctx context.Context, opts ...WatchOpt) ([]string, error) {
- opts = append(opts, IgnoreDeletes(), MetaOnly())
- watcher, err := kv.WatchAll(ctx, opts...)
- if err != nil {
- return nil, err
- }
- defer watcher.Stop()
-
- var keys []string
- for entry := range watcher.Updates() {
- if entry == nil {
- break
- }
- keys = append(keys, entry.Key())
- }
- if len(keys) == 0 {
- return nil, ErrNoKeysFound
- }
- return keys, nil
-}
-
-type keyLister struct {
- watcher KeyWatcher
- keys chan string
-}
-
-// Keys will return all keys.
-func (kv *kvs) ListKeys(ctx context.Context, opts ...WatchOpt) (KeyLister, error) {
- opts = append(opts, IgnoreDeletes(), MetaOnly())
- watcher, err := kv.WatchAll(ctx, opts...)
- if err != nil {
- return nil, err
- }
- kl := &keyLister{watcher: watcher, keys: make(chan string, 256)}
-
- go func() {
- defer close(kl.keys)
- defer watcher.Stop()
- for {
- select {
- case entry := <-watcher.Updates():
- if entry == nil {
- return
- }
- kl.keys <- entry.Key()
- case <-ctx.Done():
- return
- }
- }
- }()
- return kl, nil
-}
-
-func (kl *keyLister) Keys() <-chan string {
- return kl.keys
-}
-
-func (kl *keyLister) Stop() error {
- return kl.watcher.Stop()
-}
-
-// History will return all historical values for the key.
-func (kv *kvs) History(ctx context.Context, key string, opts ...WatchOpt) ([]KeyValueEntry, error) {
- opts = append(opts, IncludeHistory())
- watcher, err := kv.Watch(ctx, key, opts...)
- if err != nil {
- return nil, err
- }
- defer watcher.Stop()
-
- var entries []KeyValueEntry
- for entry := range watcher.Updates() {
- if entry == nil {
- break
- }
- entries = append(entries, entry)
- }
- if len(entries) == 0 {
- return nil, ErrKeyNotFound
- }
- return entries, nil
-}
-
-// Bucket returns the current bucket name.
-func (kv *kvs) Bucket() string {
- return kv.name
-}
-
-const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute
-
-// PurgeDeletes will remove all current delete markers.
-func (kv *kvs) PurgeDeletes(ctx context.Context, opts ...KVPurgeOpt) error {
- var o purgeOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configurePurge(&o); err != nil {
- return err
- }
- }
- }
- watcher, err := kv.WatchAll(ctx)
- if err != nil {
- return err
- }
- defer watcher.Stop()
-
- var limit time.Time
- olderThan := o.dmthr
- // Negative value is used to instruct to always remove markers, regardless
- // of age. If set to 0 (or not set), use our default value.
- if olderThan == 0 {
- olderThan = kvDefaultPurgeDeletesMarkerThreshold
- }
- if olderThan > 0 {
- limit = time.Now().Add(-olderThan)
- }
-
- var deleteMarkers []KeyValueEntry
- for entry := range watcher.Updates() {
- if entry == nil {
- break
- }
- if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge {
- deleteMarkers = append(deleteMarkers, entry)
- }
- }
-
- var b strings.Builder
- // Do actual purges here.
- for _, entry := range deleteMarkers {
- b.WriteString(kv.pre)
- b.WriteString(entry.Key())
- purgeOpts := []StreamPurgeOpt{WithPurgeSubject(b.String())}
- if olderThan > 0 && entry.Created().After(limit) {
- purgeOpts = append(purgeOpts, WithPurgeKeep(1))
- }
- if err := kv.stream.Purge(ctx, purgeOpts...); err != nil {
- return err
- }
- b.Reset()
- }
- return nil
-}
-
-// Status retrieves the status and configuration of a bucket
-func (kv *kvs) Status(ctx context.Context) (KeyValueStatus, error) {
- nfo, err := kv.stream.Info(ctx)
- if err != nil {
- return nil, err
- }
-
- return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil
-}
-
-func mapStreamToKVS(js *jetStream, pushJS nats.JetStreamContext, stream Stream) *kvs {
- info := stream.CachedInfo()
- bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre)
- kv := &kvs{
- name: bucket,
- streamName: info.Config.Name,
- pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket),
- js: js,
- pushJS: pushJS,
- stream: stream,
- // Determine if we need to use the JS prefix in front of Put and Delete operations
- useJSPfx: js.apiPrefix != DefaultAPIPrefix,
- useDirect: info.Config.AllowDirect,
- }
-
- // If we are mirroring, we will have mirror direct on, so just use the mirror name
- // and override use
- if m := info.Config.Mirror; m != nil {
- bucket := strings.TrimPrefix(m.Name, kvBucketNamePre)
- if m.External != nil && m.External.APIPrefix != "" {
- kv.useJSPfx = false
- kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket)
- kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket)
- } else {
- kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket)
- }
- }
-
- return kv
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go
deleted file mode 100644
index 07a2557..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/kv_options.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "fmt"
- "time"
-)
-
-type watchOptFn func(opts *watchOpts) error
-
-func (opt watchOptFn) configureWatcher(opts *watchOpts) error {
- return opt(opts)
-}
-
-// IncludeHistory instructs the key watcher to include historical values as
-// well (up to KeyValueMaxHistory).
-func IncludeHistory() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- if opts.updatesOnly {
- return fmt.Errorf("%w: include history can not be used with updates only", ErrInvalidOption)
- }
- opts.includeHistory = true
- return nil
- })
-}
-
-// UpdatesOnly instructs the key watcher to only include updates on values
-// (without latest values when started).
-func UpdatesOnly() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- if opts.includeHistory {
- return fmt.Errorf("%w: updates only can not be used with include history", ErrInvalidOption)
- }
- opts.updatesOnly = true
- return nil
- })
-}
-
-// IgnoreDeletes will have the key watcher not pass any deleted keys.
-func IgnoreDeletes() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- opts.ignoreDeletes = true
- return nil
- })
-}
-
-// MetaOnly instructs the key watcher to retrieve only the entry meta data, not
-// the entry value.
-func MetaOnly() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- opts.metaOnly = true
- return nil
- })
-}
-
-// ResumeFromRevision instructs the key watcher to resume from a specific
-// revision number.
-func ResumeFromRevision(revision uint64) WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- opts.resumeFromRevision = revision
- return nil
- })
-}
-
-// DeleteMarkersOlderThan indicates that delete or purge markers older than that
-// will be deleted as part of [KeyValue.PurgeDeletes] operation, otherwise, only the data
-// will be removed but markers that are recent will be kept.
-// Note that if no option is specified, the default is 30 minutes. You can set
-// this option to a negative value to instruct to always remove the markers,
-// regardless of their age.
-type DeleteMarkersOlderThan time.Duration
-
-func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error {
- opts.dmthr = time.Duration(ttl)
- return nil
-}
-
-type deleteOptFn func(opts *deleteOpts) error
-
-func (opt deleteOptFn) configureDelete(opts *deleteOpts) error {
- return opt(opts)
-}
-
-// LastRevision deletes if the latest revision matches the provided one. If the
-// provided revision is not the latest, the delete will return an error.
-func LastRevision(revision uint64) KVDeleteOpt {
- return deleteOptFn(func(opts *deleteOpts) error {
- opts.revision = revision
- return nil
- })
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/message.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/message.go
deleted file mode 100644
index 81e1512..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/message.go
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "bytes"
- "context"
- "fmt"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nats.go/internal/parser"
-)
-
-type (
- // Msg contains methods to operate on a JetStream message. Metadata, Data,
- // Headers, Subject and Reply can be used to retrieve the specific parts of
- // the underlying message. Ack, DoubleAck, Nak, NakWithDelay, InProgress and
- // Term are various flavors of ack requests.
- Msg interface {
- // Metadata returns [MsgMetadata] for a JetStream message.
- Metadata() (*MsgMetadata, error)
-
- // Data returns the message body.
- Data() []byte
-
- // Headers returns a map of headers for a message.
- Headers() nats.Header
-
- // Subject returns a subject on which a message was published/received.
- Subject() string
-
- // Reply returns a reply subject for a message.
- Reply() string
-
- // Ack acknowledges a message. This tells the server that the message was
- // successfully processed and it can move on to the next message.
- Ack() error
-
- // DoubleAck acknowledges a message and waits for ack reply from the server.
- // While it impacts performance, it is useful for scenarios where
- // message loss is not acceptable.
- DoubleAck(context.Context) error
-
- // Nak negatively acknowledges a message. This tells the server to
- // redeliver the message.
- //
- // Nak does not adhere to AckWait or Backoff configured on the consumer
- // and triggers instant redelivery. For a delayed redelivery, use
- // NakWithDelay.
- Nak() error
-
- // NakWithDelay negatively acknowledges a message. This tells the server
- // to redeliver the message after the given delay.
- NakWithDelay(delay time.Duration) error
-
- // InProgress tells the server that this message is being worked on. It
- // resets the redelivery timer on the server.
- InProgress() error
-
- // Term tells the server to not redeliver this message, regardless of
- // the value of MaxDeliver.
- Term() error
-
- // TermWithReason tells the server to not redeliver this message, regardless of
- // the value of MaxDeliver. The provided reason will be included in JetStream
- // advisory event sent by the server.
- //
- // Note: This will only work with JetStream servers >= 2.10.4.
- // For older servers, TermWithReason will be ignored by the server and the message
- // will not be terminated.
- TermWithReason(reason string) error
- }
-
- // MsgMetadata is the JetStream metadata associated with received messages.
- MsgMetadata struct {
- // Sequence is the sequence information for the message.
- Sequence SequencePair
-
- // NumDelivered is the number of times this message was delivered to the
- // consumer.
- NumDelivered uint64
-
- // NumPending is the number of messages that match the consumer's
- // filter, but have not been delivered yet.
- NumPending uint64
-
- // Timestamp is the time the message was originally stored on a stream.
- Timestamp time.Time
-
- // Stream is the stream name this message is stored on.
- Stream string
-
- // Consumer is the consumer name this message was delivered to.
- Consumer string
-
- // Domain is the domain this message was received on.
- Domain string
- }
-
- // SequencePair includes the consumer and stream sequence numbers for a
- // message.
- SequencePair struct {
- // Consumer is the consumer sequence number for message deliveries. This
- // is the total number of messages the consumer has seen (including
- // redeliveries).
- Consumer uint64 `json:"consumer_seq"`
-
- // Stream is the stream sequence number for a message.
- Stream uint64 `json:"stream_seq"`
- }
-
- jetStreamMsg struct {
- msg *nats.Msg
- ackd bool
- js *jetStream
- sync.Mutex
- }
-
- ackOpts struct {
- nakDelay time.Duration
- termReason string
- }
-
- ackType []byte
-)
-
-const (
- controlMsg = "100"
- badRequest = "400"
- noMessages = "404"
- reqTimeout = "408"
- maxBytesExceeded = "409"
- noResponders = "503"
-)
-
-// Headers used when publishing messages.
-const (
- // MsgIdHeader is used to specify a user-defined message ID. It can be used
- // e.g. for deduplication in conjunction with the Duplicates duration on
- // ConsumerConfig or to provide optimistic concurrency safety together with
- // [ExpectedLastMsgIDHeader].
- //
- // This can be set when publishing messages using [WithMsgID] option.
- MsgIDHeader = "Nats-Msg-Id"
-
- // ExpectedStreamHeader contains stream name and is used to assure that the
- // published message is received by expected stream. Server will reject the
- // message if it is not the case.
- //
- // This can be set when publishing messages using [WithExpectStream] option.
- ExpectedStreamHeader = "Nats-Expected-Stream"
-
- // ExpectedLastSeqHeader contains the expected last sequence number of the
- // stream and can be used to apply optimistic concurrency control at stream
- // level. Server will reject the message if it is not the case.
- //
- // This can be set when publishing messages using [WithExpectLastSequence]
- // option. option.
- ExpectedLastSeqHeader = "Nats-Expected-Last-Sequence"
-
- // ExpectedLastSubjSeqHeader contains the expected last sequence number on
- // the subject and can be used to apply optimistic concurrency control at
- // subject level. Server will reject the message if it is not the case.
- //
- // This can be set when publishing messages using
- // [WithExpectLastSequencePerSubject] option.
- ExpectedLastSubjSeqHeader = "Nats-Expected-Last-Subject-Sequence"
-
- // ExpectedLastMsgIDHeader contains the expected last message ID on the
- // subject and can be used to apply optimistic concurrency control at
- // stream level. Server will reject the message if it is not the case.
- //
- // This can be set when publishing messages using [WithExpectLastMsgID]
- // option.
- ExpectedLastMsgIDHeader = "Nats-Expected-Last-Msg-Id"
-
- // MsgRollup is used to apply a purge of all prior messages in the stream
- // ("all") or at the subject ("sub") before this message.
- MsgRollup = "Nats-Rollup"
-)
-
-// Headers for republished messages and direct gets. Those headers are set by
-// the server and should not be set by the client.
-const (
- // StreamHeader contains the stream name the message was republished from or
- // the stream name the message was retrieved from using direct get.
- StreamHeader = "Nats-Stream"
-
- // SequenceHeader contains the original sequence number of the message.
- SequenceHeader = "Nats-Sequence"
-
- // TimeStampHeader contains the original timestamp of the message.
- TimeStampHeaer = "Nats-Time-Stamp"
-
- // SubjectHeader contains the original subject the message was published to.
- SubjectHeader = "Nats-Subject"
-
- // LastSequenceHeader contains the last sequence of the message having the
- // same subject, otherwise zero if this is the first message for the
- // subject.
- LastSequenceHeader = "Nats-Last-Sequence"
-)
-
-// Rollups, can be subject only or all messages.
-const (
- // MsgRollupSubject is used to purge all messages before this message on the
- // message subject.
- MsgRollupSubject = "sub"
-
- // MsgRollupAll is used to purge all messages before this message on the
- // stream.
- MsgRollupAll = "all"
-)
-
-var (
- ackAck ackType = []byte("+ACK")
- ackNak ackType = []byte("-NAK")
- ackProgress ackType = []byte("+WPI")
- ackTerm ackType = []byte("+TERM")
-)
-
-// Metadata returns [MsgMetadata] for a JetStream message.
-func (m *jetStreamMsg) Metadata() (*MsgMetadata, error) {
- if err := m.checkReply(); err != nil {
- return nil, err
- }
-
- tokens, err := parser.GetMetadataFields(m.msg.Reply)
- if err != nil {
- return nil, fmt.Errorf("%w: %s", ErrNotJSMessage, err)
- }
-
- meta := &MsgMetadata{
- Domain: tokens[parser.AckDomainTokenPos],
- NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]),
- NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]),
- Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))),
- Stream: tokens[parser.AckStreamTokenPos],
- Consumer: tokens[parser.AckConsumerTokenPos],
- }
- meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos])
- meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos])
- return meta, nil
-}
-
-// Data returns the message body.
-func (m *jetStreamMsg) Data() []byte {
- return m.msg.Data
-}
-
-// Headers returns a map of headers for a message.
-func (m *jetStreamMsg) Headers() nats.Header {
- return m.msg.Header
-}
-
-// Subject returns a subject on which a message is published.
-func (m *jetStreamMsg) Subject() string {
- return m.msg.Subject
-}
-
-// Reply returns a reply subject for a JetStream message.
-func (m *jetStreamMsg) Reply() string {
- return m.msg.Reply
-}
-
-// Ack acknowledges a message. This tells the server that the message was
-// successfully processed and it can move on to the next message.
-func (m *jetStreamMsg) Ack() error {
- return m.ackReply(context.Background(), ackAck, false, ackOpts{})
-}
-
-// DoubleAck acknowledges a message and waits for ack reply from the server.
-// While it impacts performance, it is useful for scenarios where
-// message loss is not acceptable.
-func (m *jetStreamMsg) DoubleAck(ctx context.Context) error {
- return m.ackReply(ctx, ackAck, true, ackOpts{})
-}
-
-// Nak negatively acknowledges a message. This tells the server to
-// redeliver the message.
-func (m *jetStreamMsg) Nak() error {
- return m.ackReply(context.Background(), ackNak, false, ackOpts{})
-}
-
-// NakWithDelay negatively acknowledges a message. This tells the server
-// to redeliver the message after the given delay.
-func (m *jetStreamMsg) NakWithDelay(delay time.Duration) error {
- return m.ackReply(context.Background(), ackNak, false, ackOpts{nakDelay: delay})
-}
-
-// InProgress tells the server that this message is being worked on. It
-// resets the redelivery timer on the server.
-func (m *jetStreamMsg) InProgress() error {
- return m.ackReply(context.Background(), ackProgress, false, ackOpts{})
-}
-
-// Term tells the server to not redeliver this message, regardless of
-// the value of MaxDeliver.
-func (m *jetStreamMsg) Term() error {
- return m.ackReply(context.Background(), ackTerm, false, ackOpts{})
-}
-
-// TermWithReason tells the server to not redeliver this message, regardless of
-// the value of MaxDeliver. The provided reason will be included in JetStream
-// advisory event sent by the server.
-//
-// Note: This will only work with JetStream servers >= 2.10.4.
-// For older servers, TermWithReason will be ignored by the server and the message
-// will not be terminated.
-func (m *jetStreamMsg) TermWithReason(reason string) error {
- return m.ackReply(context.Background(), ackTerm, false, ackOpts{termReason: reason})
-}
-
-func (m *jetStreamMsg) ackReply(ctx context.Context, ackType ackType, sync bool, opts ackOpts) error {
- err := m.checkReply()
- if err != nil {
- return err
- }
-
- m.Lock()
- if m.ackd {
- m.Unlock()
- return ErrMsgAlreadyAckd
- }
- m.Unlock()
-
- if sync {
- var cancel context.CancelFunc
- ctx, cancel = wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- }
-
- var body []byte
- if opts.nakDelay > 0 {
- body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, opts.nakDelay.Nanoseconds()))
- } else if opts.termReason != "" {
- body = []byte(fmt.Sprintf("%s %s", ackType, opts.termReason))
- } else {
- body = ackType
- }
-
- if sync {
- _, err = m.js.conn.RequestWithContext(ctx, m.msg.Reply, body)
- } else {
- err = m.js.conn.Publish(m.msg.Reply, body)
- }
- if err != nil {
- return err
- }
-
- // Mark that the message has been acked unless it is ackProgress
- // which can be sent many times.
- if !bytes.Equal(ackType, ackProgress) {
- m.Lock()
- m.ackd = true
- m.Unlock()
- }
- return nil
-}
-
-func (m *jetStreamMsg) checkReply() error {
- if m == nil || m.msg.Sub == nil {
- return ErrMsgNotBound
- }
- if m.msg.Reply == "" {
- return ErrMsgNoReply
- }
- return nil
-}
-
-// Returns if the given message is a user message or not, and if
-// checkSts() is true, returns appropriate error based on the
-// content of the status (404, etc..)
-func checkMsg(msg *nats.Msg) (bool, error) {
- // If payload or no header, consider this a user message
- if len(msg.Data) > 0 || len(msg.Header) == 0 {
- return true, nil
- }
- // Look for status header
- val := msg.Header.Get("Status")
- descr := msg.Header.Get("Description")
- // If not present, then this is considered a user message
- if val == "" {
- return true, nil
- }
-
- switch val {
- case badRequest:
- return false, ErrBadRequest
- case noResponders:
- return false, nats.ErrNoResponders
- case noMessages:
- // 404 indicates that there are no messages.
- return false, ErrNoMessages
- case reqTimeout:
- return false, nats.ErrTimeout
- case controlMsg:
- return false, nil
- case maxBytesExceeded:
- if strings.Contains(strings.ToLower(descr), "message size exceeds maxbytes") {
- return false, ErrMaxBytesExceeded
- }
- if strings.Contains(strings.ToLower(descr), "consumer deleted") {
- return false, ErrConsumerDeleted
- }
- if strings.Contains(strings.ToLower(descr), "leadership change") {
- return false, ErrConsumerLeadershipChanged
- }
- }
- return false, fmt.Errorf("nats: %s", msg.Header.Get("Description"))
-}
-
-func parsePending(msg *nats.Msg) (int, int, error) {
- msgsLeftStr := msg.Header.Get("Nats-Pending-Messages")
- var msgsLeft int
- var err error
- if msgsLeftStr != "" {
- msgsLeft, err = strconv.Atoi(msgsLeftStr)
- if err != nil {
- return 0, 0, fmt.Errorf("nats: invalid format of Nats-Pending-Messages")
- }
- }
- bytesLeftStr := msg.Header.Get("Nats-Pending-Bytes")
- var bytesLeft int
- if bytesLeftStr != "" {
- bytesLeft, err = strconv.Atoi(bytesLeftStr)
- if err != nil {
- return 0, 0, fmt.Errorf("nats: invalid format of Nats-Pending-Bytes")
- }
- }
- return msgsLeft, bytesLeft, nil
-}
-
-// toJSMsg converts core [nats.Msg] to [jetStreamMsg], exposing JetStream-specific operations
-func (js *jetStream) toJSMsg(msg *nats.Msg) *jetStreamMsg {
- return &jetStreamMsg{
- msg: msg,
- js: js,
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/object.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/object.go
deleted file mode 100644
index a0eecff..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/object.go
+++ /dev/null
@@ -1,1607 +0,0 @@
-// Copyright 2023-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "os"
- "strings"
- "sync"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nats.go/internal/parser"
- "github.com/nats-io/nuid"
-)
-
-type (
- // ObjectStoreManager is used to manage object stores. It provides methods
- // CRUD operations on object stores.
- ObjectStoreManager interface {
- // ObjectStore will look up and bind to an existing object store
- // instance.
- //
- // If the object store with given name does not exist, ErrBucketNotFound
- // will be returned.
- ObjectStore(ctx context.Context, bucket string) (ObjectStore, error)
-
- // CreateObjectStore will create a new object store with the given
- // configuration.
- //
- // If the object store with given name already exists, ErrBucketExists
- // will be returned.
- CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error)
-
- // UpdateObjectStore will update an existing object store with the given
- // configuration.
- //
- // If the object store with given name does not exist, ErrBucketNotFound
- // will be returned.
- UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error)
-
- // CreateOrUpdateObjectStore will create a new object store with the given
- // configuration if it does not exist, or update an existing object store
- // with the given configuration.
- CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error)
-
- // DeleteObjectStore will delete the provided object store.
- //
- // If the object store with given name does not exist, ErrBucketNotFound
- // will be returned.
- DeleteObjectStore(ctx context.Context, bucket string) error
-
- // ObjectStoreNames is used to retrieve a list of bucket names.
- // It returns an ObjectStoreNamesLister exposing a channel to receive
- // the names of the object stores.
- //
- // The lister will always close the channel when done (either all names
- // have been read or an error occurred) and therefore can be used in a
- // for-range loop.
- ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister
-
- // ObjectStores is used to retrieve a list of bucket statuses.
- // It returns an ObjectStoresLister exposing a channel to receive
- // the statuses of the object stores.
- //
- // The lister will always close the channel when done (either all statuses
- // have been read or an error occurred) and therefore can be used in a
- // for-range loop.
- ObjectStores(ctx context.Context) ObjectStoresLister
- }
-
- // ObjectStore contains methods to operate on an object store.
- // Using the ObjectStore interface, it is possible to:
- //
- // - Perform CRUD operations on objects (Get, Put, Delete).
- // Get and put expose convenience methods to work with
- // byte slices, strings and files, in addition to streaming [io.Reader]
- // - Get information about an object without retrieving it.
- // - Update the metadata of an object.
- // - Add links to other objects or object stores.
- // - Watch for updates to a store
- // - List information about objects in a store
- // - Retrieve status and configuration of an object store.
- ObjectStore interface {
- // Put will place the contents from the reader into a new object. If the
- // object already exists, it will be overwritten. The object name is
- // required and is taken from the ObjectMeta.Name field.
- //
- // The reader will be read until EOF. ObjectInfo will be returned, containing
- // the object's metadata, digest and instance information.
- Put(ctx context.Context, obj ObjectMeta, reader io.Reader) (*ObjectInfo, error)
-
- // PutBytes is convenience function to put a byte slice into this object
- // store under the given name.
- //
- // ObjectInfo will be returned, containing the object's metadata, digest
- // and instance information.
- PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error)
-
- // PutString is convenience function to put a string into this object
- // store under the given name.
- //
- // ObjectInfo will be returned, containing the object's metadata, digest
- // and instance information.
- PutString(ctx context.Context, name string, data string) (*ObjectInfo, error)
-
- // PutFile is convenience function to put a file contents into this
- // object store. The name of the object will be the path of the file.
- //
- // ObjectInfo will be returned, containing the object's metadata, digest
- // and instance information.
- PutFile(ctx context.Context, file string) (*ObjectInfo, error)
-
- // Get will pull the named object from the object store. If the object
- // does not exist, ErrObjectNotFound will be returned.
- //
- // The returned ObjectResult will contain the object's metadata and a
- // reader to read the object's contents. The reader will be closed when
- // all data has been read or an error occurs.
- //
- // A GetObjectShowDeleted option can be supplied to return an object
- // even if it was marked as deleted.
- Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error)
-
- // GetBytes is a convenience function to pull an object from this object
- // store and return it as a byte slice.
- //
- // If the object does not exist, ErrObjectNotFound will be returned.
- //
- // A GetObjectShowDeleted option can be supplied to return an object
- // even if it was marked as deleted.
- GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error)
-
- // GetString is a convenience function to pull an object from this
- // object store and return it as a string.
- //
- // If the object does not exist, ErrObjectNotFound will be returned.
- //
- // A GetObjectShowDeleted option can be supplied to return an object
- // even if it was marked as deleted.
- GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error)
-
- // GetFile is a convenience function to pull an object from this object
- // store and place it in a file. If the file already exists, it will be
- // overwritten, otherwise it will be created.
- //
- // If the object does not exist, ErrObjectNotFound will be returned.
- // A GetObjectShowDeleted option can be supplied to return an object
- // even if it was marked as deleted.
- GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error
-
- // GetInfo will retrieve the current information for the object, containing
- // the object's metadata and instance information.
- //
- // If the object does not exist, ErrObjectNotFound will be returned.
- //
- // A GetObjectInfoShowDeleted option can be supplied to return an object
- // even if it was marked as deleted.
- GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error)
-
- // UpdateMeta will update the metadata for the object.
- //
- // If the object does not exist, ErrUpdateMetaDeleted will be returned.
- // If the new name is different from the old name, and an object with the
- // new name already exists, ErrObjectAlreadyExists will be returned.
- UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error
-
- // Delete will delete the named object from the object store. If the object
- // does not exist, ErrObjectNotFound will be returned. If the object is
- // already deleted, no error will be returned.
- //
- // All chunks for the object will be purged, and the object will be marked
- // as deleted.
- Delete(ctx context.Context, name string) error
-
- // AddLink will add a link to another object. A link is a reference to
- // another object. The provided name is the name of the link object.
- // The provided ObjectInfo is the info of the object being linked to.
- //
- // If an object with given name already exists, ErrObjectAlreadyExists
- // will be returned.
- // If object being linked to is deleted, ErrNoLinkToDeleted will be
- // returned.
- // If the provided object is a link, ErrNoLinkToLink will be returned.
- // If the provided object is nil or the name is empty, ErrObjectRequired
- // will be returned.
- AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error)
-
- // AddBucketLink will add a link to another object store. A link is a
- // reference to another object store. The provided name is the name of
- // the link object.
- // The provided ObjectStore is the object store being linked to.
- //
- // If an object with given name already exists, ErrObjectAlreadyExists
- // will be returned.
- // If the provided object store is nil ErrBucketRequired will be returned.
- AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error)
-
- // Seal will seal the object store, no further modifications will be allowed.
- Seal(ctx context.Context) error
-
- // Watch for any updates to objects in the store. By default, the watcher will send the latest
- // info for each object and all future updates. Watch will send a nil
- // entry when it has received all initial values. There are a few ways
- // to configure the watcher:
- //
- // - IncludeHistory will have the watcher send all historical information
- // for each object.
- // - IgnoreDeletes will have the watcher not pass any objects with
- // delete markers.
- // - UpdatesOnly will have the watcher only pass updates on objects
- // (without latest info when started).
- Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error)
-
- // List will list information about objects in the store.
- //
- // If the object store is empty, ErrNoObjectsFound will be returned.
- List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error)
-
- // Status retrieves the status and configuration of the bucket.
- Status(ctx context.Context) (ObjectStoreStatus, error)
- }
-
- // ObjectWatcher is what is returned when doing a watch. It can be used to
- // retrieve updates to objects in a bucket. If not using UpdatesOnly option,
- // it will also send the latest value for each key. After all initial values
- // have been sent, a nil entry will be sent. Stop can be used to stop the
- // watcher and close the underlying channel. Watcher will not close the
- // channel until Stop is called or connection is closed.
- ObjectWatcher interface {
- Updates() <-chan *ObjectInfo
- Stop() error
- }
-
- // ObjectStoreConfig is the configuration for the object store.
- ObjectStoreConfig struct {
- // Bucket is the name of the object store. Bucket name has to be
- // unique and can only contain alphanumeric characters, dashes, and
- // underscores.
- Bucket string `json:"bucket"`
-
- // Description is an optional description for the object store.
- Description string `json:"description,omitempty"`
-
- // TTL is the maximum age of objects in the store. If an object is not
- // updated within this time, it will be removed from the store.
- // By default, objects do not expire.
- TTL time.Duration `json:"max_age,omitempty"`
-
- // MaxBytes is the maximum size of the object store. If not specified,
- // the default is -1 (unlimited).
- MaxBytes int64 `json:"max_bytes,omitempty"`
-
- // Storage is the type of storage to use for the object store. If not
- // specified, the default is FileStorage.
- Storage StorageType `json:"storage,omitempty"`
-
- // Replicas is the number of replicas to keep for the object store in
- // clustered jetstream. Defaults to 1, maximum is 5.
- Replicas int `json:"num_replicas,omitempty"`
-
- // Placement is used to declare where the object store should be placed via
- // tags and/or an explicit cluster name.
- Placement *Placement `json:"placement,omitempty"`
-
- // Compression enables the underlying stream compression.
- // NOTE: Compression is supported for nats-server 2.10.0+
- Compression bool `json:"compression,omitempty"`
-
- // Bucket-specific metadata
- // NOTE: Metadata requires nats-server v2.10.0+
- Metadata map[string]string `json:"metadata,omitempty"`
- }
-
- // ObjectStoresLister is used to retrieve a list of object stores. It returns
- // a channel to read the bucket store statuses from. The lister will always
- // close the channel when done (either all stores have been retrieved or an
- // error occurred) and therefore can be used in range loops. Stop can be
- // used to stop the lister when not all object stores have been read.
- ObjectStoresLister interface {
- Status() <-chan ObjectStoreStatus
- Error() error
- }
-
- // ObjectStoreNamesLister is used to retrieve a list of object store names.
- // It returns a channel to read the bucket names from. The lister will
- // always close the channel when done (either all stores have been retrieved
- // or an error occurred) and therefore can be used in range loops. Stop can
- // be used to stop the lister when not all bucket names have been read.
- ObjectStoreNamesLister interface {
- Name() <-chan string
- Error() error
- }
-
- // ObjectStoreStatus is run-time status about a bucket.
- ObjectStoreStatus interface {
- // Bucket returns the name of the object store.
- Bucket() string
-
- // Description is the description supplied when creating the bucket.
- Description() string
-
- // TTL indicates how long objects are kept in the bucket.
- TTL() time.Duration
-
- // Storage indicates the underlying JetStream storage technology used to
- // store data.
- Storage() StorageType
-
- // Replicas indicates how many storage replicas are kept for the data in
- // the bucket.
- Replicas() int
-
- // Sealed indicates the stream is sealed and cannot be modified in any
- // way.
- Sealed() bool
-
- // Size is the combined size of all data in the bucket including
- // metadata, in bytes.
- Size() uint64
-
- // BackingStore indicates what technology is used for storage of the
- // bucket. Currently only JetStream is supported.
- BackingStore() string
-
- // Metadata is the user supplied metadata for the bucket.
- Metadata() map[string]string
-
- // IsCompressed indicates if the data is compressed on disk.
- IsCompressed() bool
- }
-
- // ObjectMetaOptions is used to set additional options when creating an object.
- ObjectMetaOptions struct {
- // Link contains information about a link to another object or object store.
- // It should not be set manually, but rather by using the AddLink or
- // AddBucketLink methods.
- Link *ObjectLink `json:"link,omitempty"`
-
- // ChunkSize is the maximum size of each chunk in bytes. If not specified,
- // the default is 128k.
- ChunkSize uint32 `json:"max_chunk_size,omitempty"`
- }
-
- // ObjectMeta is high level information about an object.
- ObjectMeta struct {
- // Name is the name of the object. The name is required when adding an
- // object and has to be unique within the object store.
- Name string `json:"name"`
-
- // Description is an optional description for the object.
- Description string `json:"description,omitempty"`
-
- // Headers is an optional set of user-defined headers for the object.
- Headers nats.Header `json:"headers,omitempty"`
-
- // Metadata is the user supplied metadata for the object.
- Metadata map[string]string `json:"metadata,omitempty"`
-
- // Additional options for the object.
- Opts *ObjectMetaOptions `json:"options,omitempty"`
- }
-
- // ObjectInfo contains ObjectMeta and additional information about an
- // object.
- ObjectInfo struct {
- // ObjectMeta contains high level information about the object.
- ObjectMeta
-
- // Bucket is the name of the object store.
- Bucket string `json:"bucket"`
-
- // NUID is the unique identifier for the object set when putting the
- // object into the store.
- NUID string `json:"nuid"`
-
- // Size is the size of the object in bytes. It only includes the size of
- // the object itself, not the metadata.
- Size uint64 `json:"size"`
-
- // ModTime is the last modification time of the object.
- ModTime time.Time `json:"mtime"`
-
- // Chunks is the number of chunks the object is split into. Maximum size
- // of each chunk can be specified in ObjectMetaOptions.
- Chunks uint32 `json:"chunks"`
-
- // Digest is the SHA-256 digest of the object. It is used to verify the
- // integrity of the object.
- Digest string `json:"digest,omitempty"`
-
- // Deleted indicates if the object is marked as deleted.
- Deleted bool `json:"deleted,omitempty"`
- }
-
- // ObjectLink is used to embed links to other buckets and objects.
- ObjectLink struct {
- // Bucket is the name of the object store the link is pointing to.
- Bucket string `json:"bucket"`
-
- // Name can be used to link to a single object.
- // If empty means this is a link to the whole store, like a directory.
- Name string `json:"name,omitempty"`
- }
-
- // ObjectResult will return the object info and a reader to read the object's
- // contents. The reader will be closed when all data has been read or an
- // error occurs.
- ObjectResult interface {
- io.ReadCloser
- Info() (*ObjectInfo, error)
- Error() error
- }
-
- // GetObjectOpt is used to set additional options when getting an object.
- GetObjectOpt func(opts *getObjectOpts) error
-
- // GetObjectInfoOpt is used to set additional options when getting object info.
- GetObjectInfoOpt func(opts *getObjectInfoOpts) error
-
- // ListObjectsOpt is used to set additional options when listing objects.
- ListObjectsOpt func(opts *listObjectOpts) error
-
- getObjectOpts struct {
- // Include deleted object in the result.
- showDeleted bool
- }
-
- getObjectInfoOpts struct {
- // Include deleted object in the result.
- showDeleted bool
- }
-
- listObjectOpts struct {
- // Include deleted objects in the result channel.
- showDeleted bool
- }
-
- obs struct {
- name string
- streamName string
- stream Stream
- pushJS nats.JetStreamContext
- js *jetStream
- }
-
- // ObjectResult impl.
- objResult struct {
- sync.Mutex
- info *ObjectInfo
- r io.ReadCloser
- err error
- ctx context.Context
- digest hash.Hash
- }
-)
-
-const (
- objNameTmpl = "OBJ_%s" // OBJ_ // stream name
- objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject
- objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject
- objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject
- objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject
- objNoPending = "0"
- objDefaultChunkSize = uint32(128 * 1024) // 128k
- objDigestType = "SHA-256="
- objDigestTmpl = objDigestType + "%s"
-)
-
-func (js *jetStream) CreateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) {
- scfg, err := js.prepareObjectStoreConfig(ctx, cfg)
- if err != nil {
- return nil, err
- }
-
- stream, err := js.CreateStream(ctx, scfg)
- if err != nil {
- if errors.Is(err, ErrStreamNameAlreadyInUse) {
- // errors are joined so that backwards compatibility is retained
- // and previous checks for ErrStreamNameAlreadyInUse will still work.
- err = errors.Join(fmt.Errorf("%w: %s", ErrBucketExists, cfg.Bucket), err)
- }
- return nil, err
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
-
- return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil
-}
-
-func (js *jetStream) UpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) {
- scfg, err := js.prepareObjectStoreConfig(ctx, cfg)
- if err != nil {
- return nil, err
- }
-
- // Attempt to update the stream.
- stream, err := js.UpdateStream(ctx, scfg)
- if err != nil {
- if errors.Is(err, ErrStreamNotFound) {
- return nil, fmt.Errorf("%w: %s", ErrBucketNotFound, cfg.Bucket)
- }
- return nil, err
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
-
- return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil
-}
-
-func (js *jetStream) CreateOrUpdateObjectStore(ctx context.Context, cfg ObjectStoreConfig) (ObjectStore, error) {
- scfg, err := js.prepareObjectStoreConfig(ctx, cfg)
- if err != nil {
- return nil, err
- }
-
- stream, err := js.CreateOrUpdateStream(ctx, scfg)
- if err != nil {
- return nil, err
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
-
- return mapStreamToObjectStore(js, pushJS, cfg.Bucket, stream), nil
-}
-
-func (js *jetStream) prepareObjectStoreConfig(ctx context.Context, cfg ObjectStoreConfig) (StreamConfig, error) {
- if !validBucketRe.MatchString(cfg.Bucket) {
- return StreamConfig{}, ErrInvalidStoreName
- }
-
- name := cfg.Bucket
- chunks := fmt.Sprintf(objAllChunksPreTmpl, name)
- meta := fmt.Sprintf(objAllMetaPreTmpl, name)
-
- // We will set explicitly some values so that we can do comparison
- // if we get an "already in use" error and need to check if it is same.
- // See kv
- replicas := cfg.Replicas
- if replicas == 0 {
- replicas = 1
- }
- maxBytes := cfg.MaxBytes
- if maxBytes == 0 {
- maxBytes = -1
- }
- var compression StoreCompression
- if cfg.Compression {
- compression = S2Compression
- }
- scfg := StreamConfig{
- Name: fmt.Sprintf(objNameTmpl, name),
- Description: cfg.Description,
- Subjects: []string{chunks, meta},
- MaxAge: cfg.TTL,
- MaxBytes: maxBytes,
- Storage: cfg.Storage,
- Replicas: replicas,
- Placement: cfg.Placement,
- Discard: DiscardNew,
- AllowRollup: true,
- AllowDirect: true,
- Metadata: cfg.Metadata,
- Compression: compression,
- }
-
- return scfg, nil
-}
-
-// ObjectStore will look up and bind to an existing object store instance.
-func (js *jetStream) ObjectStore(ctx context.Context, bucket string) (ObjectStore, error) {
- if !validBucketRe.MatchString(bucket) {
- return nil, ErrInvalidStoreName
- }
-
- streamName := fmt.Sprintf(objNameTmpl, bucket)
- stream, err := js.Stream(ctx, streamName)
- if err != nil {
- if errors.Is(err, ErrStreamNotFound) {
- err = ErrBucketNotFound
- }
- return nil, err
- }
- pushJS, err := js.legacyJetStream()
- if err != nil {
- return nil, err
- }
- return mapStreamToObjectStore(js, pushJS, bucket, stream), nil
-}
-
-// DeleteObjectStore will delete the underlying stream for the named object.
-func (js *jetStream) DeleteObjectStore(ctx context.Context, bucket string) error {
- stream := fmt.Sprintf(objNameTmpl, bucket)
- return js.DeleteStream(ctx, stream)
-}
-
-func encodeName(name string) string {
- return base64.URLEncoding.EncodeToString([]byte(name))
-}
-
-// Put will place the contents from the reader into this object-store.
-func (obs *obs) Put(ctx context.Context, meta ObjectMeta, r io.Reader) (*ObjectInfo, error) {
- if meta.Name == "" {
- return nil, ErrBadObjectMeta
- }
-
- if meta.Opts == nil {
- meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize}
- } else if meta.Opts.Link != nil {
- return nil, ErrLinkNotAllowed
- } else if meta.Opts.ChunkSize == 0 {
- meta.Opts.ChunkSize = objDefaultChunkSize
- }
-
- // Create the new nuid so chunks go on a new subject if the name is re-used
- newnuid := nuid.Next()
-
- // These will be used in more than one place
- chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid)
-
- // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem
- // Chunks on the old nuid can be cleaned up at the end
- einfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name
- if err != nil && err != ErrObjectNotFound {
- return nil, err
- }
-
- // For async error handling
- var perr error
- var mu sync.Mutex
- setErr := func(err error) {
- mu.Lock()
- defer mu.Unlock()
- perr = err
- }
- getErr := func() error {
- mu.Lock()
- defer mu.Unlock()
- return perr
- }
-
- // Create our own JS context to handle errors etc.
- pubJS, err := New(obs.js.conn, WithPublishAsyncErrHandler(func(js JetStream, _ *nats.Msg, err error) { setErr(err) }))
- if err != nil {
- return nil, err
- }
-
- defer pubJS.(*jetStream).cleanupReplySub()
-
- purgePartial := func() {
- // wait until all pubs are complete or up to default timeout before attempting purge
- select {
- case <-pubJS.PublishAsyncComplete():
- case <-ctx.Done():
- }
- _ = obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj))
- }
-
- m, h := nats.NewMsg(chunkSubj), sha256.New()
- chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0)
-
- // set up the info object. The chunk upload sets the size and digest
- info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: meta}
-
- for r != nil {
- if ctx != nil {
- select {
- case <-ctx.Done():
- if ctx.Err() == context.Canceled {
- err = ctx.Err()
- } else {
- err = nats.ErrTimeout
- }
- default:
- }
- if err != nil {
- purgePartial()
- return nil, err
- }
- }
-
- // Actual read.
- // TODO(dlc) - Deadline?
- n, readErr := r.Read(chunk)
-
- // Handle all non EOF errors
- if readErr != nil && readErr != io.EOF {
- purgePartial()
- return nil, readErr
- }
-
- // Add chunk only if we received data
- if n > 0 {
- // Chunk processing.
- m.Data = chunk[:n]
- h.Write(m.Data)
-
- // Send msg itself.
- if _, err := pubJS.PublishMsgAsync(m); err != nil {
- purgePartial()
- return nil, err
- }
- if err := getErr(); err != nil {
- purgePartial()
- return nil, err
- }
- // Update totals.
- sent++
- total += uint64(n)
- }
-
- // EOF Processing.
- if readErr == io.EOF {
- // Place meta info.
- info.Size, info.Chunks = uint64(total), uint32(sent)
- info.Digest = GetObjectDigestValue(h)
- break
- }
- }
-
- // Prepare the meta message
- metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name))
- mm := nats.NewMsg(metaSubj)
- mm.Header.Set(MsgRollup, MsgRollupSubject)
- mm.Data, err = json.Marshal(info)
- if err != nil {
- if r != nil {
- purgePartial()
- }
- return nil, err
- }
-
- // Publish the meta message.
- _, err = pubJS.PublishMsgAsync(mm)
- if err != nil {
- if r != nil {
- purgePartial()
- }
- return nil, err
- }
-
- // Wait for all to be processed.
- select {
- case <-pubJS.PublishAsyncComplete():
- if err := getErr(); err != nil {
- if r != nil {
- purgePartial()
- }
- return nil, err
- }
- case <-ctx.Done():
- return nil, nats.ErrTimeout
- }
-
- info.ModTime = time.Now().UTC() // This time is not actually the correct time
-
- // Delete any original chunks.
- if einfo != nil && !einfo.Deleted {
- echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID)
- _ = obs.stream.Purge(ctx, WithPurgeSubject(echunkSubj))
- }
-
- // TODO would it be okay to do this to return the info with the correct time?
- // With the understanding that it is an extra call to the server.
- // Otherwise the time the user gets back is the client time, not the server time.
- // return obs.GetInfo(info.Name)
-
- return info, nil
-}
-
-// GetObjectDigestValue calculates the base64 value of hashed data
-func GetObjectDigestValue(data hash.Hash) string {
- sha := data.Sum(nil)
- return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:]))
-}
-
-// DecodeObjectDigest decodes base64 hash
-func DecodeObjectDigest(data string) ([]byte, error) {
- digest := strings.SplitN(data, "=", 2)
- if len(digest) != 2 {
- return nil, ErrInvalidDigestFormat
- }
- return base64.URLEncoding.DecodeString(digest[1])
-}
-
-func (info *ObjectInfo) isLink() bool {
- return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil
-}
-
-// Get will pull the object from the underlying stream.
-func (obs *obs) Get(ctx context.Context, name string, opts ...GetObjectOpt) (ObjectResult, error) {
- var o getObjectOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt(&o); err != nil {
- return nil, err
- }
- }
- }
- infoOpts := make([]GetObjectInfoOpt, 0)
- if o.showDeleted {
- infoOpts = append(infoOpts, GetObjectInfoShowDeleted())
- }
-
- // Grab meta info.
- info, err := obs.GetInfo(ctx, name, infoOpts...)
- if err != nil {
- return nil, err
- }
- if info.NUID == "" {
- return nil, ErrBadObjectMeta
- }
-
- // Check for object links. If single objects we do a pass through.
- if info.isLink() {
- if info.ObjectMeta.Opts.Link.Name == "" {
- return nil, ErrCantGetBucket
- }
-
- // is the link in the same bucket?
- lbuck := info.ObjectMeta.Opts.Link.Bucket
- if lbuck == obs.name {
- return obs.Get(ctx, info.ObjectMeta.Opts.Link.Name)
- }
-
- // different bucket
- lobs, err := obs.js.ObjectStore(ctx, lbuck)
- if err != nil {
- return nil, err
- }
- return lobs.Get(ctx, info.ObjectMeta.Opts.Link.Name)
- }
-
- result := &objResult{info: info, ctx: ctx}
- if info.Size == 0 {
- return result, nil
- }
-
- pr, pw := net.Pipe()
- result.r = pr
-
- gotErr := func(m *nats.Msg, err error) {
- pw.Close()
- m.Sub.Unsubscribe()
- result.setErr(err)
- }
-
- // For calculating sum256
- result.digest = sha256.New()
-
- processChunk := func(m *nats.Msg) {
- var err error
- if ctx != nil {
- select {
- case <-ctx.Done():
- if ctx.Err() == context.Canceled {
- err = ctx.Err()
- } else {
- err = nats.ErrTimeout
- }
- default:
- }
- if err != nil {
- gotErr(m, err)
- return
- }
- }
-
- tokens, err := parser.GetMetadataFields(m.Reply)
- if err != nil {
- gotErr(m, err)
- return
- }
-
- // Write to our pipe.
- for b := m.Data; len(b) > 0; {
- n, err := pw.Write(b)
- if err != nil {
- gotErr(m, err)
- return
- }
- b = b[n:]
- }
- // Update sha256
- result.digest.Write(m.Data)
-
- // Check if we are done.
- if tokens[parser.AckNumPendingTokenPos] == objNoPending {
- pw.Close()
- m.Sub.Unsubscribe()
- }
- }
-
- chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID)
- streamName := fmt.Sprintf(objNameTmpl, obs.name)
- subscribeOpts := []nats.SubOpt{
- nats.OrderedConsumer(),
- nats.Context(ctx),
- nats.BindStream(streamName),
- }
- _, err = obs.pushJS.Subscribe(chunkSubj, processChunk, subscribeOpts...)
- if err != nil {
- return nil, err
- }
-
- return result, nil
-}
-
-// Delete will delete the object.
-func (obs *obs) Delete(ctx context.Context, name string) error {
- // Grab meta info.
- info, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted())
- if err != nil {
- return err
- }
- if info.NUID == "" {
- return ErrBadObjectMeta
- }
-
- // Place a rollup delete marker and publish the info
- info.Deleted = true
- info.Size, info.Chunks, info.Digest = 0, 0, ""
-
- if err = publishMeta(ctx, info, obs.js); err != nil {
- return err
- }
-
- // Purge chunks for the object.
- chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID)
- return obs.stream.Purge(ctx, WithPurgeSubject(chunkSubj))
-}
-
-func publishMeta(ctx context.Context, info *ObjectInfo, js *jetStream) error {
- // marshal the object into json, don't store an actual time
- info.ModTime = time.Time{}
- data, err := json.Marshal(info)
- if err != nil {
- return err
- }
-
- // Prepare and publish the message.
- mm := nats.NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name)))
- mm.Header.Set(MsgRollup, MsgRollupSubject)
- mm.Data = data
- if _, err := js.PublishMsg(ctx, mm); err != nil {
- return err
- }
-
- // set the ModTime in case it's returned to the user, even though it's not the correct time.
- info.ModTime = time.Now().UTC()
- return nil
-}
-
-// AddLink will add a link to another object if it's not deleted and not another link
-// name is the name of this link object
-// obj is what is being linked too
-func (obs *obs) AddLink(ctx context.Context, name string, obj *ObjectInfo) (*ObjectInfo, error) {
- if name == "" {
- return nil, ErrNameRequired
- }
-
- // TODO Handle stale info
-
- if obj == nil || obj.Name == "" {
- return nil, ErrObjectRequired
- }
- if obj.Deleted {
- return nil, ErrNoLinkToDeleted
- }
- if obj.isLink() {
- return nil, ErrNoLinkToLink
- }
-
- // If object with link's name is found, error.
- // If link with link's name is found, that's okay to overwrite.
- // If there was an error that was not ErrObjectNotFound, error.
- einfo, err := obs.GetInfo(ctx, name, GetObjectInfoShowDeleted())
- if einfo != nil {
- if !einfo.isLink() {
- return nil, ErrObjectAlreadyExists
- }
- } else if err != ErrObjectNotFound {
- return nil, err
- }
-
- // create the meta for the link
- meta := &ObjectMeta{
- Name: name,
- Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}},
- }
- info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta}
-
- // put the link object
- if err = publishMeta(ctx, info, obs.js); err != nil {
- return nil, err
- }
-
- return info, nil
-}
-
-// AddBucketLink will add a link to another object store.
-func (ob *obs) AddBucketLink(ctx context.Context, name string, bucket ObjectStore) (*ObjectInfo, error) {
- if name == "" {
- return nil, ErrNameRequired
- }
- if bucket == nil {
- return nil, ErrBucketRequired
- }
- bos, ok := bucket.(*obs)
- if !ok {
- return nil, ErrBucketMalformed
- }
-
- // If object with link's name is found, error.
- // If link with link's name is found, that's okay to overwrite.
- // If there was an error that was not ErrObjectNotFound, error.
- einfo, err := ob.GetInfo(ctx, name, GetObjectInfoShowDeleted())
- if einfo != nil {
- if !einfo.isLink() {
- return nil, ErrObjectAlreadyExists
- }
- } else if err != ErrObjectNotFound {
- return nil, err
- }
-
- // create the meta for the link
- meta := &ObjectMeta{
- Name: name,
- Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}},
- }
- info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta}
-
- // put the link object
- err = publishMeta(ctx, info, ob.js)
- if err != nil {
- return nil, err
- }
-
- return info, nil
-}
-
-// PutBytes is convenience function to put a byte slice into this object store.
-func (obs *obs) PutBytes(ctx context.Context, name string, data []byte) (*ObjectInfo, error) {
- return obs.Put(ctx, ObjectMeta{Name: name}, bytes.NewReader(data))
-}
-
-// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice.
-func (obs *obs) GetBytes(ctx context.Context, name string, opts ...GetObjectOpt) ([]byte, error) {
- result, err := obs.Get(ctx, name, opts...)
- if err != nil {
- return nil, err
- }
- defer result.Close()
-
- var b bytes.Buffer
- if _, err := b.ReadFrom(result); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-// PutString is convenience function to put a string into this object store.
-func (obs *obs) PutString(ctx context.Context, name string, data string) (*ObjectInfo, error) {
- return obs.Put(ctx, ObjectMeta{Name: name}, strings.NewReader(data))
-}
-
-// GetString is a convenience function to pull an object from this object store and return it as a string.
-func (obs *obs) GetString(ctx context.Context, name string, opts ...GetObjectOpt) (string, error) {
- result, err := obs.Get(ctx, name, opts...)
- if err != nil {
- return "", err
- }
- defer result.Close()
-
- var b bytes.Buffer
- if _, err := b.ReadFrom(result); err != nil {
- return "", err
- }
- return b.String(), nil
-}
-
-// PutFile is convenience function to put a file into an object store.
-func (obs *obs) PutFile(ctx context.Context, file string) (*ObjectInfo, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- return obs.Put(ctx, ObjectMeta{Name: file}, f)
-}
-
-// GetFile is a convenience function to pull and object and place in a file.
-func (obs *obs) GetFile(ctx context.Context, name, file string, opts ...GetObjectOpt) error {
- // Expect file to be new.
- f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600)
- if err != nil {
- return err
- }
- defer f.Close()
-
- result, err := obs.Get(ctx, name, opts...)
- if err != nil {
- os.Remove(f.Name())
- return err
- }
- defer result.Close()
-
- // Stream copy to the file.
- _, err = io.Copy(f, result)
- return err
-}
-
-// GetInfo will retrieve the current information for the object.
-func (obs *obs) GetInfo(ctx context.Context, name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) {
- // Grab last meta value we have.
- if name == "" {
- return nil, ErrNameRequired
- }
- var o getObjectInfoOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt(&o); err != nil {
- return nil, err
- }
- }
- }
-
- metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call
-
- m, err := obs.stream.GetLastMsgForSubject(ctx, metaSubj)
- if err != nil {
- if errors.Is(err, ErrMsgNotFound) {
- err = ErrObjectNotFound
- }
- if errors.Is(err, ErrStreamNotFound) {
- err = ErrBucketNotFound
- }
- return nil, err
- }
- var info ObjectInfo
- if err := json.Unmarshal(m.Data, &info); err != nil {
- return nil, ErrBadObjectMeta
- }
- if !o.showDeleted && info.Deleted {
- return nil, ErrObjectNotFound
- }
- info.ModTime = m.Time
- return &info, nil
-}
-
-// UpdateMeta will update the meta for the object.
-func (obs *obs) UpdateMeta(ctx context.Context, name string, meta ObjectMeta) error {
- // Grab the current meta.
- info, err := obs.GetInfo(ctx, name)
- if err != nil {
- if errors.Is(err, ErrObjectNotFound) {
- return ErrUpdateMetaDeleted
- }
- return err
- }
-
- // If the new name is different from the old, and it exists, error
- // If there was an error that was not ErrObjectNotFound, error.
- if name != meta.Name {
- existingInfo, err := obs.GetInfo(ctx, meta.Name, GetObjectInfoShowDeleted())
- if err != nil && !errors.Is(err, ErrObjectNotFound) {
- return err
- }
- if err == nil && !existingInfo.Deleted {
- return ErrObjectAlreadyExists
- }
- }
-
- // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize)
- // These should only be updated internally when appropriate.
- info.Name = meta.Name
- info.Description = meta.Description
- info.Headers = meta.Headers
- info.Metadata = meta.Metadata
-
- // Prepare the meta message
- if err = publishMeta(ctx, info, obs.js); err != nil {
- return err
- }
-
- // did the name of this object change? We just stored the meta under the new name
- // so delete the meta from the old name via purge stream for subject
- if name != meta.Name {
- metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name))
- return obs.stream.Purge(ctx, WithPurgeSubject(metaSubj))
- }
-
- return nil
-}
-
-// Seal will seal the object store, no further modifications will be allowed.
-func (obs *obs) Seal(ctx context.Context) error {
- si, err := obs.stream.Info(ctx)
- if err != nil {
- return err
- }
- // Seal the stream from being able to take on more messages.
- cfg := si.Config
- cfg.Sealed = true
- _, err = obs.js.UpdateStream(ctx, cfg)
- return err
-}
-
-// Implementation for Watch
-type objWatcher struct {
- updates chan *ObjectInfo
- sub *nats.Subscription
-}
-
-// Updates returns the interior channel.
-func (w *objWatcher) Updates() <-chan *ObjectInfo {
- if w == nil {
- return nil
- }
- return w.updates
-}
-
-// Stop will unsubscribe from the watcher.
-func (w *objWatcher) Stop() error {
- if w == nil {
- return nil
- }
- return w.sub.Unsubscribe()
-}
-
-// Watch for changes in the underlying store and receive meta information updates.
-func (obs *obs) Watch(ctx context.Context, opts ...WatchOpt) (ObjectWatcher, error) {
- var o watchOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureWatcher(&o); err != nil {
- return nil, err
- }
- }
- }
-
- var initDoneMarker bool
-
- w := &objWatcher{updates: make(chan *ObjectInfo, 32)}
-
- update := func(m *nats.Msg) {
- var info ObjectInfo
- if err := json.Unmarshal(m.Data, &info); err != nil {
- return // TODO(dlc) - Communicate this upwards?
- }
- meta, err := m.Metadata()
- if err != nil {
- return
- }
-
- if !o.ignoreDeletes || !info.Deleted {
- info.ModTime = meta.Timestamp
- w.updates <- &info
- }
-
- // if UpdatesOnly is set, no not send nil to the channel
- // as it would always be triggered after initializing the watcher
- if !initDoneMarker && meta.NumPending == 0 {
- initDoneMarker = true
- w.updates <- nil
- }
- }
-
- allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name)
- _, err := obs.stream.GetLastMsgForSubject(ctx, allMeta)
- // if there are no messages on the stream and we are not watching
- // updates only, send nil to the channel to indicate that the initial
- // watch is done
- if !o.updatesOnly {
- if errors.Is(err, ErrMsgNotFound) {
- initDoneMarker = true
- w.updates <- nil
- }
- } else {
- // if UpdatesOnly was used, mark initialization as complete
- initDoneMarker = true
- }
-
- // Used ordered consumer to deliver results.
- streamName := fmt.Sprintf(objNameTmpl, obs.name)
- subOpts := []nats.SubOpt{nats.OrderedConsumer(), nats.BindStream(streamName)}
- if !o.includeHistory {
- subOpts = append(subOpts, nats.DeliverLastPerSubject())
- }
- if o.updatesOnly {
- subOpts = append(subOpts, nats.DeliverNew())
- }
- subOpts = append(subOpts, nats.Context(ctx))
- sub, err := obs.pushJS.Subscribe(allMeta, update, subOpts...)
- if err != nil {
- return nil, err
- }
- w.sub = sub
- return w, nil
-}
-
-// List will list all the objects in this store.
-func (obs *obs) List(ctx context.Context, opts ...ListObjectsOpt) ([]*ObjectInfo, error) {
- var o listObjectOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt(&o); err != nil {
- return nil, err
- }
- }
- }
- watchOpts := make([]WatchOpt, 0)
- if !o.showDeleted {
- watchOpts = append(watchOpts, IgnoreDeletes())
- }
- watcher, err := obs.Watch(ctx, watchOpts...)
- if err != nil {
- return nil, err
- }
- defer watcher.Stop()
-
- var objs []*ObjectInfo
- updates := watcher.Updates()
-Updates:
- for {
- select {
- case entry := <-updates:
- if entry == nil {
- break Updates
- }
- objs = append(objs, entry)
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- }
- if len(objs) == 0 {
- return nil, ErrNoObjectsFound
- }
- return objs, nil
-}
-
-// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus
-type ObjectBucketStatus struct {
- nfo *StreamInfo
- bucket string
-}
-
-// Bucket is the name of the bucket
-func (s *ObjectBucketStatus) Bucket() string { return s.bucket }
-
-// Description is the description supplied when creating the bucket
-func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description }
-
-// TTL indicates how long objects are kept in the bucket
-func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge }
-
-// Storage indicates the underlying JetStream storage technology used to store data
-func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage }
-
-// Replicas indicates how many storage replicas are kept for the data in the bucket
-func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas }
-
-// Sealed indicates the stream is sealed and cannot be modified in any way
-func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed }
-
-// Size is the combined size of all data in the bucket including metadata, in bytes
-func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes }
-
-// BackingStore indicates what technology is used for storage of the bucket
-func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" }
-
-// Metadata is the metadata supplied when creating the bucket
-func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata }
-
-// StreamInfo is the stream info retrieved to create the status
-func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo }
-
-// IsCompressed indicates if the data is compressed on disk
-func (s *ObjectBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression }
-
-// Status retrieves run-time status about a bucket
-func (obs *obs) Status(ctx context.Context) (ObjectStoreStatus, error) {
- nfo, err := obs.stream.Info(ctx)
- if err != nil {
- return nil, err
- }
-
- status := &ObjectBucketStatus{
- nfo: nfo,
- bucket: obs.name,
- }
-
- return status, nil
-}
-
-// Read impl.
-func (o *objResult) Read(p []byte) (n int, err error) {
- o.Lock()
- defer o.Unlock()
- readDeadline := time.Now().Add(defaultAPITimeout)
- if ctx := o.ctx; ctx != nil {
- if deadline, ok := ctx.Deadline(); ok {
- readDeadline = deadline
- }
- select {
- case <-ctx.Done():
- if ctx.Err() == context.Canceled {
- o.err = ctx.Err()
- } else {
- o.err = nats.ErrTimeout
- }
- default:
- }
- }
- if o.err != nil {
- return 0, o.err
- }
- if o.r == nil {
- return 0, io.EOF
- }
-
- r := o.r.(net.Conn)
- _ = r.SetReadDeadline(readDeadline)
- n, err = r.Read(p)
- if err, ok := err.(net.Error); ok && err.Timeout() {
- if ctx := o.ctx; ctx != nil {
- select {
- case <-ctx.Done():
- if ctx.Err() == context.Canceled {
- return 0, ctx.Err()
- } else {
- return 0, nats.ErrTimeout
- }
- default:
- err = nil
- }
- }
- }
- if err == io.EOF {
- // Make sure the digest matches.
- sha := o.digest.Sum(nil)
- rsha, decodeErr := DecodeObjectDigest(o.info.Digest)
- if decodeErr != nil {
- o.err = decodeErr
- return 0, o.err
- }
- if !bytes.Equal(sha[:], rsha) {
- o.err = ErrDigestMismatch
- return 0, o.err
- }
- }
- return n, err
-}
-
-// Close impl.
-func (o *objResult) Close() error {
- o.Lock()
- defer o.Unlock()
- if o.r == nil {
- return nil
- }
- return o.r.Close()
-}
-
-func (o *objResult) setErr(err error) {
- o.Lock()
- defer o.Unlock()
- o.err = err
-}
-
-func (o *objResult) Info() (*ObjectInfo, error) {
- o.Lock()
- defer o.Unlock()
- return o.info, o.err
-}
-
-func (o *objResult) Error() error {
- o.Lock()
- defer o.Unlock()
- return o.err
-}
-
-// ObjectStoreNames is used to retrieve a list of bucket names
-func (js *jetStream) ObjectStoreNames(ctx context.Context) ObjectStoreNamesLister {
- res := &obsLister{
- obsNames: make(chan string),
- }
- l := &streamLister{js: js}
- streamsReq := streamsRequest{
- Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"),
- }
-
- go func() {
- defer close(res.obsNames)
- for {
- page, err := l.streamNames(ctx, streamsReq)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- res.err = err
- return
- }
- for _, name := range page {
- if !strings.HasPrefix(name, "OBJ_") {
- continue
- }
- res.obsNames <- strings.TrimPrefix(name, "OBJ_")
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
-
- return res
-}
-
-// ObjectStores is used to retrieve a list of bucket statuses
-func (js *jetStream) ObjectStores(ctx context.Context) ObjectStoresLister {
- res := &obsLister{
- obs: make(chan ObjectStoreStatus),
- }
- l := &streamLister{js: js}
- streamsReq := streamsRequest{
- Subject: fmt.Sprintf(objAllChunksPreTmpl, "*"),
- }
- go func() {
- defer close(res.obs)
- for {
- page, err := l.streamInfos(ctx, streamsReq)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- res.err = err
- return
- }
- for _, info := range page {
- if !strings.HasPrefix(info.Config.Name, "OBJ_") {
- continue
- }
- res.obs <- &ObjectBucketStatus{
- nfo: info,
- bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"),
- }
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
-
- return res
-}
-
-type obsLister struct {
- obs chan ObjectStoreStatus
- obsNames chan string
- err error
-}
-
-func (ol *obsLister) Status() <-chan ObjectStoreStatus {
- return ol.obs
-}
-
-func (ol *obsLister) Name() <-chan string {
- return ol.obsNames
-}
-
-func (ol *obsLister) Error() error {
- return ol.err
-}
-
-func mapStreamToObjectStore(js *jetStream, pushJS nats.JetStreamContext, bucket string, stream Stream) *obs {
- info := stream.CachedInfo()
-
- obs := &obs{
- name: bucket,
- js: js,
- pushJS: pushJS,
- streamName: info.Config.Name,
- stream: stream,
- }
-
- return obs
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/object_options.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/object_options.go
deleted file mode 100644
index df58364..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/object_options.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-// GetObjectShowDeleted makes [ObjectStore.Get] return object even if it was
-// marked as deleted.
-func GetObjectShowDeleted() GetObjectOpt {
- return func(opts *getObjectOpts) error {
- opts.showDeleted = true
- return nil
- }
-}
-
-// GetObjectInfoShowDeleted makes [ObjectStore.GetInfo] return object info event
-// if it was marked as deleted.
-func GetObjectInfoShowDeleted() GetObjectInfoOpt {
- return func(opts *getObjectInfoOpts) error {
- opts.showDeleted = true
- return nil
- }
-}
-
-// ListObjectsShowDeleted makes [ObjectStore.ListObjects] also return deleted
-// objects.
-func ListObjectsShowDeleted() ListObjectsOpt {
- return func(opts *listObjectOpts) error {
- opts.showDeleted = true
- return nil
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/ordered.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/ordered.go
deleted file mode 100644
index fd7fe2f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/ordered.go
+++ /dev/null
@@ -1,624 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "errors"
- "fmt"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/nats-io/nats.go"
-)
-
-type (
- orderedConsumer struct {
- jetStream *jetStream
- cfg *OrderedConsumerConfig
- stream string
- currentConsumer *pullConsumer
- cursor cursor
- namePrefix string
- serial int
- consumerType consumerType
- doReset chan struct{}
- resetInProgress uint32
- userErrHandler ConsumeErrHandlerFunc
- stopAfter int
- stopAfterMsgsLeft chan int
- withStopAfter bool
- runningFetch *fetchResult
- sync.Mutex
- }
-
- orderedSubscription struct {
- consumer *orderedConsumer
- opts []PullMessagesOpt
- done chan struct{}
- closed uint32
- }
-
- cursor struct {
- streamSeq uint64
- deliverSeq uint64
- }
-
- consumerType int
-)
-
-const (
- consumerTypeNotSet consumerType = iota
- consumerTypeConsume
- consumerTypeFetch
-)
-
-var errOrderedSequenceMismatch = errors.New("sequence mismatch")
-
-// Consume can be used to continuously receive messages and handle them
-// with the provided callback function. Consume cannot be used concurrently
-// when using ordered consumer.
-//
-// See [Consumer.Consume] for more details.
-func (c *orderedConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) {
- if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil {
- err := c.reset()
- if err != nil {
- return nil, err
- }
- } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil {
- return nil, ErrOrderedConsumerConcurrentRequests
- }
- if c.consumerType == consumerTypeFetch {
- return nil, ErrOrderConsumerUsedAsFetch
- }
- c.consumerType = consumerTypeConsume
- consumeOpts, err := parseConsumeOpts(true, opts...)
- if err != nil {
- return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
- }
- c.userErrHandler = consumeOpts.ErrHandler
- opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial)))
- if consumeOpts.StopAfter > 0 {
- c.withStopAfter = true
- c.stopAfter = consumeOpts.StopAfter
- }
- c.stopAfterMsgsLeft = make(chan int, 1)
- if c.stopAfter > 0 {
- opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
- }
- sub := &orderedSubscription{
- consumer: c,
- done: make(chan struct{}, 1),
- }
- internalHandler := func(serial int) func(msg Msg) {
- return func(msg Msg) {
- // handler is a noop if message was delivered for a consumer with different serial
- if serial != c.serial {
- return
- }
- meta, err := msg.Metadata()
- if err != nil {
- sub, ok := c.currentConsumer.getSubscription("")
- if !ok {
- return
- }
- c.errHandler(serial)(sub, err)
- return
- }
- dseq := meta.Sequence.Consumer
- if dseq != c.cursor.deliverSeq+1 {
- sub, ok := c.currentConsumer.getSubscription("")
- if !ok {
- return
- }
- c.errHandler(serial)(sub, errOrderedSequenceMismatch)
- return
- }
- c.cursor.deliverSeq = dseq
- c.cursor.streamSeq = meta.Sequence.Stream
- handler(msg)
- }
- }
-
- _, err = c.currentConsumer.Consume(internalHandler(c.serial), opts...)
- if err != nil {
- return nil, err
- }
-
- go func() {
- for {
- select {
- case <-c.doReset:
- if err := c.reset(); err != nil {
- sub, ok := c.currentConsumer.getSubscription("")
- if !ok {
- return
- }
- c.errHandler(c.serial)(sub, err)
- }
- if c.withStopAfter {
- select {
- case c.stopAfter = <-c.stopAfterMsgsLeft:
- default:
- }
- if c.stopAfter <= 0 {
- sub.Stop()
- return
- }
- }
- if c.stopAfter > 0 {
- opts = opts[:len(opts)-2]
- } else {
- opts = opts[:len(opts)-1]
- }
-
- // overwrite the previous err handler to use the new serial
- opts = append(opts, ConsumeErrHandler(c.errHandler(c.serial)))
- if c.withStopAfter {
- opts = append(opts, consumeStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
- }
- if _, err := c.currentConsumer.Consume(internalHandler(c.serial), opts...); err != nil {
- sub, ok := c.currentConsumer.getSubscription("")
- if !ok {
- return
- }
- c.errHandler(c.serial)(sub, err)
- }
- case <-sub.done:
- return
- case msgsLeft, ok := <-c.stopAfterMsgsLeft:
- if !ok {
- close(sub.done)
- }
- c.stopAfter = msgsLeft
- return
- }
- }
- }()
- return sub, nil
-}
-
-func (c *orderedConsumer) errHandler(serial int) func(cc ConsumeContext, err error) {
- return func(cc ConsumeContext, err error) {
- c.Lock()
- defer c.Unlock()
- if c.userErrHandler != nil && !errors.Is(err, errOrderedSequenceMismatch) {
- c.userErrHandler(cc, err)
- }
- if errors.Is(err, ErrNoHeartbeat) ||
- errors.Is(err, errOrderedSequenceMismatch) ||
- errors.Is(err, ErrConsumerDeleted) ||
- errors.Is(err, ErrConsumerNotFound) {
- // only reset if serial matches the current consumer serial and there is no reset in progress
- if serial == c.serial && atomic.LoadUint32(&c.resetInProgress) == 0 {
- atomic.StoreUint32(&c.resetInProgress, 1)
- c.doReset <- struct{}{}
- }
- }
- }
-}
-
-// Messages returns MessagesContext, allowing continuously iterating
-// over messages on a stream. Messages cannot be used concurrently
-// when using ordered consumer.
-//
-// See [Consumer.Messages] for more details.
-func (c *orderedConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) {
- if (c.consumerType == consumerTypeNotSet || c.consumerType == consumerTypeConsume) && c.currentConsumer == nil {
- err := c.reset()
- if err != nil {
- return nil, err
- }
- } else if c.consumerType == consumerTypeConsume && c.currentConsumer != nil {
- return nil, ErrOrderedConsumerConcurrentRequests
- }
- if c.consumerType == consumerTypeFetch {
- return nil, ErrOrderConsumerUsedAsFetch
- }
- c.consumerType = consumerTypeConsume
- consumeOpts, err := parseMessagesOpts(true, opts...)
- if err != nil {
- return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
- }
- opts = append(opts, WithMessagesErrOnMissingHeartbeat(true))
- c.stopAfterMsgsLeft = make(chan int, 1)
- if consumeOpts.StopAfter > 0 {
- c.withStopAfter = true
- c.stopAfter = consumeOpts.StopAfter
- }
- c.userErrHandler = consumeOpts.ErrHandler
- if c.stopAfter > 0 {
- opts = append(opts, messagesStopAfterNotify(c.stopAfter, c.stopAfterMsgsLeft))
- }
- _, err = c.currentConsumer.Messages(opts...)
- if err != nil {
- return nil, err
- }
-
- sub := &orderedSubscription{
- consumer: c,
- opts: opts,
- done: make(chan struct{}, 1),
- }
-
- return sub, nil
-}
-
-func (s *orderedSubscription) Next() (Msg, error) {
- for {
- currentConsumer := s.consumer.currentConsumer
- sub, ok := currentConsumer.getSubscription("")
- if !ok {
- return nil, ErrMsgIteratorClosed
- }
- msg, err := sub.Next()
- if err != nil {
- if errors.Is(err, ErrMsgIteratorClosed) {
- s.Stop()
- return nil, err
- }
- if s.consumer.withStopAfter {
- select {
- case s.consumer.stopAfter = <-s.consumer.stopAfterMsgsLeft:
- default:
- }
- if s.consumer.stopAfter <= 0 {
- s.Stop()
- return nil, ErrMsgIteratorClosed
- }
- s.opts[len(s.opts)-1] = StopAfter(s.consumer.stopAfter)
- }
- if err := s.consumer.reset(); err != nil {
- return nil, err
- }
- _, err := s.consumer.currentConsumer.Messages(s.opts...)
- if err != nil {
- return nil, err
- }
- continue
- }
- meta, err := msg.Metadata()
- if err != nil {
- s.consumer.errHandler(s.consumer.serial)(sub, err)
- continue
- }
- serial := serialNumberFromConsumer(meta.Consumer)
- dseq := meta.Sequence.Consumer
- if dseq != s.consumer.cursor.deliverSeq+1 {
- s.consumer.errHandler(serial)(sub, errOrderedSequenceMismatch)
- continue
- }
- s.consumer.cursor.deliverSeq = dseq
- s.consumer.cursor.streamSeq = meta.Sequence.Stream
- return msg, nil
- }
-}
-
-func (s *orderedSubscription) Stop() {
- if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
- return
- }
- sub, ok := s.consumer.currentConsumer.getSubscription("")
- if !ok {
- return
- }
- s.consumer.currentConsumer.Lock()
- defer s.consumer.currentConsumer.Unlock()
- sub.Stop()
- close(s.done)
-}
-
-func (s *orderedSubscription) Drain() {
- if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
- return
- }
- sub, ok := s.consumer.currentConsumer.getSubscription("")
- if !ok {
- return
- }
- s.consumer.currentConsumer.Lock()
- defer s.consumer.currentConsumer.Unlock()
- sub.Drain()
- close(s.done)
-}
-
-// Fetch is used to retrieve up to a provided number of messages from a
-// stream. This method will always send a single request and wait until
-// either all messages are retrieved or request times out.
-//
-// It is not efficient to use Fetch with on an ordered consumer, as it will
-// reset the consumer for each subsequent Fetch call.
-// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
-func (c *orderedConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) {
- if c.consumerType == consumerTypeConsume {
- return nil, ErrOrderConsumerUsedAsConsume
- }
- c.currentConsumer.Lock()
- if c.runningFetch != nil {
- if !c.runningFetch.done {
- c.currentConsumer.Unlock()
- return nil, ErrOrderedConsumerConcurrentRequests
- }
- c.cursor.streamSeq = c.runningFetch.sseq
- }
- c.currentConsumer.Unlock()
- c.consumerType = consumerTypeFetch
- err := c.reset()
- if err != nil {
- return nil, err
- }
- msgs, err := c.currentConsumer.Fetch(batch, opts...)
- if err != nil {
- return nil, err
- }
- c.runningFetch = msgs.(*fetchResult)
- return msgs, nil
-}
-
-// FetchBytes is used to retrieve up to a provided bytes from the
-// stream. This method will always send a single request and wait until
-// provided number of bytes is exceeded or request times out.
-//
-// It is not efficient to use FetchBytes with on an ordered consumer, as it will
-// reset the consumer for each subsequent Fetch call.
-// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
-func (c *orderedConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) {
- if c.consumerType == consumerTypeConsume {
- return nil, ErrOrderConsumerUsedAsConsume
- }
- if c.runningFetch != nil {
- if !c.runningFetch.done {
- return nil, ErrOrderedConsumerConcurrentRequests
- }
- c.cursor.streamSeq = c.runningFetch.sseq
- }
- c.consumerType = consumerTypeFetch
- err := c.reset()
- if err != nil {
- return nil, err
- }
- msgs, err := c.currentConsumer.FetchBytes(maxBytes, opts...)
- if err != nil {
- return nil, err
- }
- c.runningFetch = msgs.(*fetchResult)
- return msgs, nil
-}
-
-// FetchNoWait is used to retrieve up to a provided number of messages
-// from a stream. This method will always send a single request and
-// immediately return up to a provided number of messages or wait until
-// at least one message is available or request times out.
-//
-// It is not efficient to use FetchNoWait with on an ordered consumer, as it will
-// reset the consumer for each subsequent Fetch call.
-// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
-func (c *orderedConsumer) FetchNoWait(batch int) (MessageBatch, error) {
- if c.consumerType == consumerTypeConsume {
- return nil, ErrOrderConsumerUsedAsConsume
- }
- if c.runningFetch != nil && !c.runningFetch.done {
- return nil, ErrOrderedConsumerConcurrentRequests
- }
- c.consumerType = consumerTypeFetch
- err := c.reset()
- if err != nil {
- return nil, err
- }
- return c.currentConsumer.FetchNoWait(batch)
-}
-
-// Next is used to retrieve the next message from the stream. This
-// method will block until the message is retrieved or timeout is
-// reached.
-//
-// It is not efficient to use Next with on an ordered consumer, as it will
-// reset the consumer for each subsequent Fetch call.
-// Consider using [Consumer.Consume] or [Consumer.Messages] instead.
-func (c *orderedConsumer) Next(opts ...FetchOpt) (Msg, error) {
- res, err := c.Fetch(1, opts...)
- if err != nil {
- return nil, err
- }
- msg := <-res.Messages()
- if msg != nil {
- return msg, nil
- }
- if res.Error() == nil {
- return nil, nats.ErrTimeout
- }
- return nil, res.Error()
-}
-
-func serialNumberFromConsumer(name string) int {
- if len(name) == 0 {
- return 0
- }
- serial, err := strconv.Atoi(name[len(name)-1:])
- if err != nil {
- return 0
- }
- return serial
-}
-
-func (c *orderedConsumer) reset() error {
- c.Lock()
- defer c.Unlock()
- defer atomic.StoreUint32(&c.resetInProgress, 0)
- if c.currentConsumer != nil {
- sub, ok := c.currentConsumer.getSubscription("")
- c.currentConsumer.Lock()
- if ok {
- sub.Stop()
- }
- consName := c.currentConsumer.CachedInfo().Name
- c.currentConsumer.Unlock()
- var err error
- for i := 0; ; i++ {
- if c.cfg.MaxResetAttempts > 0 && i == c.cfg.MaxResetAttempts {
- return fmt.Errorf("%w: maximum number of delete attempts reached: %s", ErrOrderedConsumerReset, err)
- }
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- err = c.jetStream.DeleteConsumer(ctx, c.stream, consName)
- cancel()
- if err != nil {
- if errors.Is(err, ErrConsumerNotFound) {
- break
- }
- if errors.Is(err, nats.ErrTimeout) || errors.Is(err, context.DeadlineExceeded) {
- continue
- }
- return err
- }
- break
- }
- }
- seq := c.cursor.streamSeq + 1
- c.cursor.deliverSeq = 0
- consumerConfig := c.getConsumerConfigForSeq(seq)
-
- var err error
- var cons Consumer
- for i := 0; ; i++ {
- if c.cfg.MaxResetAttempts > 0 && i == c.cfg.MaxResetAttempts {
- return fmt.Errorf("%w: maximum number of create consumer attempts reached: %s", ErrOrderedConsumerReset, err)
- }
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- cons, err = c.jetStream.CreateOrUpdateConsumer(ctx, c.stream, *consumerConfig)
- if err != nil {
- if errors.Is(err, ErrConsumerNotFound) {
- cancel()
- break
- }
- if errors.Is(err, nats.ErrTimeout) || errors.Is(err, context.DeadlineExceeded) {
- cancel()
- continue
- }
- cancel()
- return err
- }
- cancel()
- break
- }
- c.currentConsumer = cons.(*pullConsumer)
- return nil
-}
-
-func (c *orderedConsumer) getConsumerConfigForSeq(seq uint64) *ConsumerConfig {
- c.serial++
- name := fmt.Sprintf("%s_%d", c.namePrefix, c.serial)
- cfg := &ConsumerConfig{
- Name: name,
- DeliverPolicy: DeliverByStartSequencePolicy,
- OptStartSeq: seq,
- AckPolicy: AckNonePolicy,
- InactiveThreshold: 5 * time.Minute,
- Replicas: 1,
- HeadersOnly: c.cfg.HeadersOnly,
- MemoryStorage: true,
- }
- if len(c.cfg.FilterSubjects) == 1 {
- cfg.FilterSubject = c.cfg.FilterSubjects[0]
- } else {
- cfg.FilterSubjects = c.cfg.FilterSubjects
- }
-
- if seq != c.cfg.OptStartSeq+1 {
- return cfg
- }
-
- // initial request, some options may be modified at that point
- cfg.DeliverPolicy = c.cfg.DeliverPolicy
- if c.cfg.DeliverPolicy == DeliverLastPerSubjectPolicy ||
- c.cfg.DeliverPolicy == DeliverLastPolicy ||
- c.cfg.DeliverPolicy == DeliverNewPolicy ||
- c.cfg.DeliverPolicy == DeliverAllPolicy {
-
- cfg.OptStartSeq = 0
- }
-
- if cfg.DeliverPolicy == DeliverLastPerSubjectPolicy && len(c.cfg.FilterSubjects) == 0 {
- cfg.FilterSubjects = []string{">"}
- }
- if c.cfg.OptStartTime != nil {
- cfg.OptStartSeq = 0
- cfg.DeliverPolicy = DeliverByStartTimePolicy
- cfg.OptStartTime = c.cfg.OptStartTime
- }
- if c.cfg.InactiveThreshold != 0 {
- cfg.InactiveThreshold = c.cfg.InactiveThreshold
- }
-
- return cfg
-}
-
-func consumeStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullConsumeOpt {
- return pullOptFunc(func(opts *consumeOpts) error {
- opts.StopAfter = numMsgs
- opts.stopAfterMsgsLeft = msgsLeftAfterStop
- return nil
- })
-}
-
-func messagesStopAfterNotify(numMsgs int, msgsLeftAfterStop chan int) PullMessagesOpt {
- return pullOptFunc(func(opts *consumeOpts) error {
- opts.StopAfter = numMsgs
- opts.stopAfterMsgsLeft = msgsLeftAfterStop
- return nil
- })
-}
-
-// Info returns information about the ordered consumer.
-// Note that this method will fetch the latest instance of the
-// consumer from the server, which can be deleted by the library at any time.
-func (c *orderedConsumer) Info(ctx context.Context) (*ConsumerInfo, error) {
- c.Lock()
- defer c.Unlock()
- if c.currentConsumer == nil {
- return nil, ErrOrderedConsumerNotCreated
- }
- infoSubject := apiSubj(c.jetStream.apiPrefix, fmt.Sprintf(apiConsumerInfoT, c.stream, c.currentConsumer.name))
- var resp consumerInfoResponse
-
- if _, err := c.jetStream.apiRequestJSON(ctx, infoSubject, &resp); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeConsumerNotFound {
- return nil, ErrConsumerNotFound
- }
- return nil, resp.Error
- }
- if resp.Error == nil && resp.ConsumerInfo == nil {
- return nil, ErrConsumerNotFound
- }
-
- c.currentConsumer.info = resp.ConsumerInfo
- return resp.ConsumerInfo, nil
-}
-
-// CachedInfo returns cached information about the consumer currently
-// used by the ordered consumer. Cached info will be updated on every call
-// to [Consumer.Info] or on consumer reset.
-func (c *orderedConsumer) CachedInfo() *ConsumerInfo {
- c.Lock()
- defer c.Unlock()
- if c.currentConsumer == nil {
- return nil
- }
- return c.currentConsumer.info
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/publish.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/publish.go
deleted file mode 100644
index 1a3a4fd..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/publish.go
+++ /dev/null
@@ -1,581 +0,0 @@
-// Copyright 2022-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "crypto/sha256"
- "encoding/json"
- "errors"
- "fmt"
- "math/rand"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nuid"
-)
-
-type (
- asyncPublisherOpts struct {
- // For async publish error handling.
- aecb MsgErrHandler
- // Max async pub ack in flight
- maxpa int
- }
-
- // PublishOpt are the options that can be passed to Publish methods.
- PublishOpt func(*pubOpts) error
-
- pubOpts struct {
- id string
- lastMsgID string // Expected last msgId
- stream string // Expected stream name
- lastSeq *uint64 // Expected last sequence
- lastSubjectSeq *uint64 // Expected last sequence per subject
-
- // Publish retries for NoResponders err.
- retryWait time.Duration // Retry wait between attempts
- retryAttempts int // Retry attempts
-
- // stallWait is the max wait of a async pub ack.
- stallWait time.Duration
-
- // internal option to re-use existing paf in case of retry.
- pafRetry *pubAckFuture
- }
-
- // PubAckFuture is a future for a PubAck.
- // It can be used to wait for a PubAck or an error after an async publish.
- PubAckFuture interface {
- // Ok returns a receive only channel that can be used to get a PubAck.
- Ok() <-chan *PubAck
-
- // Err returns a receive only channel that can be used to get the error from an async publish.
- Err() <-chan error
-
- // Msg returns the message that was sent to the server.
- Msg() *nats.Msg
- }
-
- pubAckFuture struct {
- jsClient *jetStreamClient
- msg *nats.Msg
- retries int
- maxRetries int
- retryWait time.Duration
- ack *PubAck
- err error
- errCh chan error
- doneCh chan *PubAck
- }
-
- jetStreamClient struct {
- asyncPublishContext
- asyncPublisherOpts
- }
-
- // MsgErrHandler is used to process asynchronous errors from JetStream
- // PublishAsync. It will return the original message sent to the server for
- // possible retransmitting and the error encountered.
- MsgErrHandler func(JetStream, *nats.Msg, error)
-
- asyncPublishContext struct {
- sync.RWMutex
- replyPrefix string
- replySub *nats.Subscription
- acks map[string]*pubAckFuture
- stallCh chan struct{}
- doneCh chan struct{}
- rr *rand.Rand
- // channel to signal when server is disconnected or conn is closed
- connStatusCh chan (nats.Status)
- }
-
- pubAckResponse struct {
- apiResponse
- *PubAck
- }
-
- // PubAck is an ack received after successfully publishing a message.
- PubAck struct {
- // Stream is the stream name the message was published to.
- Stream string `json:"stream"`
-
- // Sequence is the stream sequence number of the message.
- Sequence uint64 `json:"seq"`
-
- // Duplicate indicates whether the message was a duplicate.
- // Duplicate can be detected using the [MsgIDHeader] and [StreamConfig.Duplicates].
- Duplicate bool `json:"duplicate,omitempty"`
-
- // Domain is the domain the message was published to.
- Domain string `json:"domain,omitempty"`
- }
-)
-
-const (
- // Default time wait between retries on Publish if err is ErrNoResponders.
- DefaultPubRetryWait = 250 * time.Millisecond
-
- // Default number of retries
- DefaultPubRetryAttempts = 2
-)
-
-const (
- statusHdr = "Status"
-
- rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
- base = 62
-)
-
-// Publish performs a synchronous publish to a stream and waits for ack
-// from server. It accepts subject name (which must be bound to a stream)
-// and message payload.
-func (js *jetStream) Publish(ctx context.Context, subj string, data []byte, opts ...PublishOpt) (*PubAck, error) {
- return js.PublishMsg(ctx, &nats.Msg{Subject: subj, Data: data}, opts...)
-}
-
-// PublishMsg performs a synchronous publish to a stream and waits for
-// ack from server. It accepts subject name (which must be bound to a
-// stream) and nats.Message.
-func (js *jetStream) PublishMsg(ctx context.Context, m *nats.Msg, opts ...PublishOpt) (*PubAck, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- o := pubOpts{
- retryWait: DefaultPubRetryWait,
- retryAttempts: DefaultPubRetryAttempts,
- }
- if len(opts) > 0 {
- if m.Header == nil {
- m.Header = nats.Header{}
- }
- for _, opt := range opts {
- if err := opt(&o); err != nil {
- return nil, err
- }
- }
- }
- if o.stallWait > 0 {
- return nil, fmt.Errorf("%w: stall wait cannot be set to sync publish", ErrInvalidOption)
- }
-
- if o.id != "" {
- m.Header.Set(MsgIDHeader, o.id)
- }
- if o.lastMsgID != "" {
- m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID)
- }
- if o.stream != "" {
- m.Header.Set(ExpectedStreamHeader, o.stream)
- }
- if o.lastSeq != nil {
- m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10))
- }
- if o.lastSubjectSeq != nil {
- m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10))
- }
-
- var resp *nats.Msg
- var err error
-
- resp, err = js.conn.RequestMsgWithContext(ctx, m)
-
- if err != nil {
- for r := 0; errors.Is(err, nats.ErrNoResponders) && (r < o.retryAttempts || o.retryAttempts < 0); r++ {
- // To protect against small blips in leadership changes etc, if we get a no responders here retry.
- select {
- case <-ctx.Done():
- case <-time.After(o.retryWait):
- }
- resp, err = js.conn.RequestMsgWithContext(ctx, m)
- }
- if err != nil {
- if errors.Is(err, nats.ErrNoResponders) {
- return nil, ErrNoStreamResponse
- }
- return nil, err
- }
- }
-
- var ackResp pubAckResponse
- if err := json.Unmarshal(resp.Data, &ackResp); err != nil {
- return nil, ErrInvalidJSAck
- }
- if ackResp.Error != nil {
- return nil, fmt.Errorf("nats: %w", ackResp.Error)
- }
- if ackResp.PubAck == nil || ackResp.PubAck.Stream == "" {
- return nil, ErrInvalidJSAck
- }
- return ackResp.PubAck, nil
-}
-
-// PublishAsync performs an asynchronous publish to a stream and returns
-// [PubAckFuture] interface. It accepts subject name (which must be bound
-// to a stream) and message payload.
-func (js *jetStream) PublishAsync(subj string, data []byte, opts ...PublishOpt) (PubAckFuture, error) {
- return js.PublishMsgAsync(&nats.Msg{Subject: subj, Data: data}, opts...)
-}
-
-// PublishMsgAsync performs an asynchronous publish to a stream and
-// returns [PubAckFuture] interface. It accepts subject name (which must
-// be bound to a stream) and nats.Message.
-func (js *jetStream) PublishMsgAsync(m *nats.Msg, opts ...PublishOpt) (PubAckFuture, error) {
- o := pubOpts{
- retryWait: DefaultPubRetryWait,
- retryAttempts: DefaultPubRetryAttempts,
- }
- if len(opts) > 0 {
- if m.Header == nil {
- m.Header = nats.Header{}
- }
- for _, opt := range opts {
- if err := opt(&o); err != nil {
- return nil, err
- }
- }
- }
- defaultStallWait := 200 * time.Millisecond
-
- stallWait := defaultStallWait
- if o.stallWait > 0 {
- stallWait = o.stallWait
- }
-
- if o.id != "" {
- m.Header.Set(MsgIDHeader, o.id)
- }
- if o.lastMsgID != "" {
- m.Header.Set(ExpectedLastMsgIDHeader, o.lastMsgID)
- }
- if o.stream != "" {
- m.Header.Set(ExpectedStreamHeader, o.stream)
- }
- if o.lastSeq != nil {
- m.Header.Set(ExpectedLastSeqHeader, strconv.FormatUint(*o.lastSeq, 10))
- }
- if o.lastSubjectSeq != nil {
- m.Header.Set(ExpectedLastSubjSeqHeader, strconv.FormatUint(*o.lastSubjectSeq, 10))
- }
-
- paf := o.pafRetry
- if paf == nil && m.Reply != "" {
- return nil, ErrAsyncPublishReplySubjectSet
- }
-
- var id string
-
- // register new paf if not retrying
- if paf == nil {
- var err error
- m.Reply, err = js.newAsyncReply()
- defer func() { m.Reply = "" }()
- if err != nil {
- return nil, fmt.Errorf("nats: error creating async reply handler: %s", err)
- }
- id = m.Reply[js.replyPrefixLen:]
- paf = &pubAckFuture{msg: m, jsClient: js.publisher, maxRetries: o.retryAttempts, retryWait: o.retryWait}
- numPending, maxPending := js.registerPAF(id, paf)
-
- if maxPending > 0 && numPending > maxPending {
- select {
- case <-js.asyncStall():
- case <-time.After(stallWait):
- js.clearPAF(id)
- return nil, ErrTooManyStalledMsgs
- }
- }
- } else {
- // when retrying, get the ID from existing reply subject
- id = m.Reply[js.replyPrefixLen:]
- }
-
- if err := js.conn.PublishMsg(m); err != nil {
- js.clearPAF(id)
- return nil, err
- }
-
- return paf, nil
-}
-
-// For quick token lookup etc.
-const (
- aReplyTokensize = 6
-)
-
-func (js *jetStream) newAsyncReply() (string, error) {
- js.publisher.Lock()
- if js.publisher.replySub == nil {
- // Create our wildcard reply subject.
- sha := sha256.New()
- sha.Write([]byte(nuid.Next()))
- b := sha.Sum(nil)
- for i := 0; i < aReplyTokensize; i++ {
- b[i] = rdigits[int(b[i]%base)]
- }
- js.publisher.replyPrefix = fmt.Sprintf("%s%s.", js.replyPrefix, b[:aReplyTokensize])
- sub, err := js.conn.Subscribe(fmt.Sprintf("%s*", js.publisher.replyPrefix), js.handleAsyncReply)
- if err != nil {
- js.publisher.Unlock()
- return "", err
- }
- js.publisher.replySub = sub
- js.publisher.rr = rand.New(rand.NewSource(time.Now().UnixNano()))
- }
- if js.publisher.connStatusCh == nil {
- js.publisher.connStatusCh = js.conn.StatusChanged(nats.RECONNECTING, nats.CLOSED)
- go js.resetPendingAcksOnReconnect()
- }
- var sb strings.Builder
- sb.WriteString(js.publisher.replyPrefix)
- rn := js.publisher.rr.Int63()
- var b [aReplyTokensize]byte
- for i, l := 0, rn; i < len(b); i++ {
- b[i] = rdigits[l%base]
- l /= base
- }
- sb.Write(b[:])
- js.publisher.Unlock()
- return sb.String(), nil
-}
-
-// Handle an async reply from PublishAsync.
-func (js *jetStream) handleAsyncReply(m *nats.Msg) {
- if len(m.Subject) <= js.replyPrefixLen {
- return
- }
- id := m.Subject[js.replyPrefixLen:]
-
- js.publisher.Lock()
-
- paf := js.getPAF(id)
- if paf == nil {
- js.publisher.Unlock()
- return
- }
-
- doErr := func(err error) {
- paf.err = err
- if paf.errCh != nil {
- paf.errCh <- paf.err
- }
- cb := js.publisher.asyncPublisherOpts.aecb
- js.publisher.Unlock()
- if cb != nil {
- paf.msg.Reply = ""
- cb(js, paf.msg, err)
- }
- }
-
- // Process no responders etc.
- if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
- if paf.retries < paf.maxRetries {
- paf.retries++
- paf.msg.Reply = m.Subject
- time.AfterFunc(paf.retryWait, func() {
- js.publisher.Lock()
- paf := js.getPAF(id)
- js.publisher.Unlock()
- if paf == nil {
- return
- }
- _, err := js.PublishMsgAsync(paf.msg, func(po *pubOpts) error {
- po.pafRetry = paf
- return nil
- })
- if err != nil {
- js.publisher.Lock()
- doErr(err)
- }
- })
- js.publisher.Unlock()
- return
- }
- delete(js.publisher.acks, id)
- doErr(ErrNoStreamResponse)
- return
- }
-
- // Remove
- delete(js.publisher.acks, id)
-
- // Check on anyone stalled and waiting.
- if js.publisher.stallCh != nil && len(js.publisher.acks) < js.publisher.asyncPublisherOpts.maxpa {
- close(js.publisher.stallCh)
- js.publisher.stallCh = nil
- }
- // Check on anyone waiting on done status.
- if js.publisher.doneCh != nil && len(js.publisher.acks) == 0 {
- dch := js.publisher.doneCh
- js.publisher.doneCh = nil
- // Defer here so error is processed and can be checked.
- defer close(dch)
- }
-
- var pa pubAckResponse
- if err := json.Unmarshal(m.Data, &pa); err != nil {
- doErr(ErrInvalidJSAck)
- return
- }
- if pa.Error != nil {
- doErr(pa.Error)
- return
- }
- if pa.PubAck == nil || pa.PubAck.Stream == "" {
- doErr(ErrInvalidJSAck)
- return
- }
-
- // So here we have received a proper puback.
- paf.ack = pa.PubAck
- if paf.doneCh != nil {
- paf.doneCh <- paf.ack
- }
- js.publisher.Unlock()
-}
-
-func (js *jetStream) resetPendingAcksOnReconnect() {
- js.publisher.Lock()
- connStatusCh := js.publisher.connStatusCh
- js.publisher.Unlock()
- for {
- newStatus, ok := <-connStatusCh
- if !ok || newStatus == nats.CLOSED {
- return
- }
- js.publisher.Lock()
- errCb := js.publisher.asyncPublisherOpts.aecb
- for id, paf := range js.publisher.acks {
- paf.err = nats.ErrDisconnected
- if paf.errCh != nil {
- paf.errCh <- paf.err
- }
- if errCb != nil {
- js.publisher.Unlock()
- // clear reply subject so that new one is created on republish
- paf.msg.Reply = ""
- errCb(js, paf.msg, nats.ErrDisconnected)
- js.publisher.Lock()
- }
- delete(js.publisher.acks, id)
- }
- if js.publisher.doneCh != nil {
- close(js.publisher.doneCh)
- js.publisher.doneCh = nil
- }
- js.publisher.Unlock()
- }
-}
-
-// registerPAF will register for a PubAckFuture.
-func (js *jetStream) registerPAF(id string, paf *pubAckFuture) (int, int) {
- js.publisher.Lock()
- if js.publisher.acks == nil {
- js.publisher.acks = make(map[string]*pubAckFuture)
- }
- js.publisher.acks[id] = paf
- np := len(js.publisher.acks)
- maxpa := js.publisher.asyncPublisherOpts.maxpa
- js.publisher.Unlock()
- return np, maxpa
-}
-
-// Lock should be held.
-func (js *jetStream) getPAF(id string) *pubAckFuture {
- if js.publisher.acks == nil {
- return nil
- }
- return js.publisher.acks[id]
-}
-
-// clearPAF will remove a PubAckFuture that was registered.
-func (js *jetStream) clearPAF(id string) {
- js.publisher.Lock()
- delete(js.publisher.acks, id)
- js.publisher.Unlock()
-}
-
-func (js *jetStream) asyncStall() <-chan struct{} {
- js.publisher.Lock()
- if js.publisher.stallCh == nil {
- js.publisher.stallCh = make(chan struct{})
- }
- stc := js.publisher.stallCh
- js.publisher.Unlock()
- return stc
-}
-
-func (paf *pubAckFuture) Ok() <-chan *PubAck {
- paf.jsClient.Lock()
- defer paf.jsClient.Unlock()
-
- if paf.doneCh == nil {
- paf.doneCh = make(chan *PubAck, 1)
- if paf.ack != nil {
- paf.doneCh <- paf.ack
- }
- }
-
- return paf.doneCh
-}
-
-func (paf *pubAckFuture) Err() <-chan error {
- paf.jsClient.Lock()
- defer paf.jsClient.Unlock()
-
- if paf.errCh == nil {
- paf.errCh = make(chan error, 1)
- if paf.err != nil {
- paf.errCh <- paf.err
- }
- }
-
- return paf.errCh
-}
-
-func (paf *pubAckFuture) Msg() *nats.Msg {
- paf.jsClient.RLock()
- defer paf.jsClient.RUnlock()
- return paf.msg
-}
-
-// PublishAsyncPending returns the number of async publishes outstanding
-// for this context.
-func (js *jetStream) PublishAsyncPending() int {
- js.publisher.RLock()
- defer js.publisher.RUnlock()
- return len(js.publisher.acks)
-}
-
-// PublishAsyncComplete returns a channel that will be closed when all
-// outstanding asynchronously published messages are acknowledged by the
-// server.
-func (js *jetStream) PublishAsyncComplete() <-chan struct{} {
- js.publisher.Lock()
- defer js.publisher.Unlock()
- if js.publisher.doneCh == nil {
- js.publisher.doneCh = make(chan struct{})
- }
- dch := js.publisher.doneCh
- if len(js.publisher.acks) == 0 {
- close(js.publisher.doneCh)
- js.publisher.doneCh = nil
- }
- return dch
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/pull.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/pull.go
deleted file mode 100644
index 3196ad5..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/pull.go
+++ /dev/null
@@ -1,1154 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nuid"
-)
-
-type (
- // MessagesContext supports iterating over a messages on a stream.
- // It is returned by [Consumer.Messages] method.
- MessagesContext interface {
- // Next retrieves next message on a stream. It will block until the next
- // message is available. If the context is canceled, Next will return
- // ErrMsgIteratorClosed error.
- Next() (Msg, error)
-
- // Stop unsubscribes from the stream and cancels subscription. Calling
- // Next after calling Stop will return ErrMsgIteratorClosed error.
- // All messages that are already in the buffer are discarded.
- Stop()
-
- // Drain unsubscribes from the stream and cancels subscription. All
- // messages that are already in the buffer will be available on
- // subsequent calls to Next. After the buffer is drained, Next will
- // return ErrMsgIteratorClosed error.
- Drain()
- }
-
- // ConsumeContext supports processing incoming messages from a stream.
- // It is returned by [Consumer.Consume] method.
- ConsumeContext interface {
- // Stop unsubscribes from the stream and cancels subscription.
- // No more messages will be received after calling this method.
- // All messages that are already in the buffer are discarded.
- Stop()
-
- // Drain unsubscribes from the stream and cancels subscription.
- // All messages that are already in the buffer will be processed in callback function.
- Drain()
- }
-
- // MessageHandler is a handler function used as callback in [Consume].
- MessageHandler func(msg Msg)
-
- // PullConsumeOpt represent additional options used in [Consume] for pull consumers.
- PullConsumeOpt interface {
- configureConsume(*consumeOpts) error
- }
-
- // PullMessagesOpt represent additional options used in [Messages] for pull consumers.
- PullMessagesOpt interface {
- configureMessages(*consumeOpts) error
- }
-
- pullConsumer struct {
- sync.Mutex
- jetStream *jetStream
- stream string
- durable bool
- name string
- info *ConsumerInfo
- subscriptions map[string]*pullSubscription
- }
-
- pullRequest struct {
- Expires time.Duration `json:"expires,omitempty"`
- Batch int `json:"batch,omitempty"`
- MaxBytes int `json:"max_bytes,omitempty"`
- NoWait bool `json:"no_wait,omitempty"`
- Heartbeat time.Duration `json:"idle_heartbeat,omitempty"`
- }
-
- consumeOpts struct {
- Expires time.Duration
- MaxMessages int
- MaxBytes int
- Heartbeat time.Duration
- ErrHandler ConsumeErrHandlerFunc
- ReportMissingHeartbeats bool
- ThresholdMessages int
- ThresholdBytes int
- StopAfter int
- stopAfterMsgsLeft chan int
- }
-
- ConsumeErrHandlerFunc func(consumeCtx ConsumeContext, err error)
-
- pullSubscription struct {
- sync.Mutex
- id string
- consumer *pullConsumer
- subscription *nats.Subscription
- msgs chan *nats.Msg
- errs chan error
- pending pendingMsgs
- hbMonitor *hbMonitor
- fetchInProgress uint32
- closed uint32
- draining uint32
- done chan struct{}
- connStatusChanged chan nats.Status
- fetchNext chan *pullRequest
- consumeOpts *consumeOpts
- delivered int
- }
-
- pendingMsgs struct {
- msgCount int
- byteCount int
- }
-
- MessageBatch interface {
- Messages() <-chan Msg
- Error() error
- }
-
- fetchResult struct {
- msgs chan Msg
- err error
- done bool
- sseq uint64
- }
-
- FetchOpt func(*pullRequest) error
-
- hbMonitor struct {
- timer *time.Timer
- sync.Mutex
- }
-)
-
-const (
- DefaultMaxMessages = 500
- DefaultExpires = 30 * time.Second
- unset = -1
-)
-
-func min(x, y int) int {
- if x < y {
- return x
- }
- return y
-}
-
-// Consume can be used to continuously receive messages and handle them
-// with the provided callback function. Consume cannot be used concurrently
-// when using ordered consumer.
-//
-// See [Consumer.Consume] for more details.
-func (p *pullConsumer) Consume(handler MessageHandler, opts ...PullConsumeOpt) (ConsumeContext, error) {
- if handler == nil {
- return nil, ErrHandlerRequired
- }
- consumeOpts, err := parseConsumeOpts(false, opts...)
- if err != nil {
- return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
- }
- p.Lock()
-
- subject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiRequestNextT, p.stream, p.name))
-
- // for single consume, use empty string as id
- // this is useful for ordered consumer, where only a single subscription is valid
- var consumeID string
- if len(p.subscriptions) > 0 {
- consumeID = nuid.Next()
- }
- sub := &pullSubscription{
- id: consumeID,
- consumer: p,
- errs: make(chan error, 1),
- done: make(chan struct{}, 1),
- fetchNext: make(chan *pullRequest, 1),
- consumeOpts: consumeOpts,
- }
- sub.connStatusChanged = p.jetStream.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING)
-
- sub.hbMonitor = sub.scheduleHeartbeatCheck(consumeOpts.Heartbeat)
-
- p.subscriptions[sub.id] = sub
- p.Unlock()
-
- internalHandler := func(msg *nats.Msg) {
- if sub.hbMonitor != nil {
- sub.hbMonitor.Stop()
- }
- userMsg, msgErr := checkMsg(msg)
- if !userMsg && msgErr == nil {
- if sub.hbMonitor != nil {
- sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat)
- }
- return
- }
- defer func() {
- sub.Lock()
- sub.checkPending()
- if sub.hbMonitor != nil {
- sub.hbMonitor.Reset(2 * consumeOpts.Heartbeat)
- }
- sub.Unlock()
- }()
- if !userMsg {
- // heartbeat message
- if msgErr == nil {
- return
- }
-
- sub.Lock()
- err := sub.handleStatusMsg(msg, msgErr)
- sub.Unlock()
-
- if err != nil {
- if atomic.LoadUint32(&sub.closed) == 1 {
- return
- }
- if sub.consumeOpts.ErrHandler != nil {
- sub.consumeOpts.ErrHandler(sub, err)
- }
- sub.Stop()
- }
- return
- }
- handler(p.jetStream.toJSMsg(msg))
- sub.Lock()
- sub.decrementPendingMsgs(msg)
- sub.incrementDeliveredMsgs()
- sub.Unlock()
-
- if sub.consumeOpts.StopAfter > 0 && sub.consumeOpts.StopAfter == sub.delivered {
- sub.Stop()
- }
- }
- inbox := p.jetStream.conn.NewInbox()
- sub.subscription, err = p.jetStream.conn.Subscribe(inbox, internalHandler)
- if err != nil {
- return nil, err
- }
- sub.subscription.SetClosedHandler(func(sid string) func(string) {
- return func(subject string) {
- p.Lock()
- defer p.Unlock()
- delete(p.subscriptions, sid)
- atomic.CompareAndSwapUint32(&sub.draining, 1, 0)
- }
- }(sub.id))
-
- sub.Lock()
- // initial pull
- sub.resetPendingMsgs()
- batchSize := sub.consumeOpts.MaxMessages
- if sub.consumeOpts.StopAfter > 0 {
- batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered)
- }
- if err := sub.pull(&pullRequest{
- Expires: consumeOpts.Expires,
- Batch: batchSize,
- MaxBytes: consumeOpts.MaxBytes,
- Heartbeat: consumeOpts.Heartbeat,
- }, subject); err != nil {
- sub.errs <- err
- }
- sub.Unlock()
-
- go func() {
- isConnected := true
- for {
- if atomic.LoadUint32(&sub.closed) == 1 {
- return
- }
- select {
- case status, ok := <-sub.connStatusChanged:
- if !ok {
- continue
- }
- if status == nats.RECONNECTING {
- if sub.hbMonitor != nil {
- sub.hbMonitor.Stop()
- }
- isConnected = false
- }
- if status == nats.CONNECTED {
- sub.Lock()
- if !isConnected {
- isConnected = true
- // try fetching consumer info several times to make sure consumer is available after reconnect
- backoffOpts := backoffOpts{
- attempts: 10,
- initialInterval: 1 * time.Second,
- disableInitialExecution: true,
- factor: 2,
- maxInterval: 10 * time.Second,
- cancel: sub.done,
- }
- err = retryWithBackoff(func(attempt int) (bool, error) {
- isClosed := atomic.LoadUint32(&sub.closed) == 1
- if isClosed {
- return false, nil
- }
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- _, err := p.Info(ctx)
- if err != nil {
- if sub.consumeOpts.ErrHandler != nil {
- err = fmt.Errorf("[%d] attempting to fetch consumer info after reconnect: %w", attempt, err)
- if attempt == backoffOpts.attempts-1 {
- err = errors.Join(err, fmt.Errorf("maximum retry attempts reached"))
- }
- sub.consumeOpts.ErrHandler(sub, err)
- }
- return true, err
- }
- return false, nil
- }, backoffOpts)
- if err != nil {
- if sub.consumeOpts.ErrHandler != nil {
- sub.consumeOpts.ErrHandler(sub, err)
- }
- sub.Unlock()
- sub.cleanup()
- return
- }
-
- sub.fetchNext <- &pullRequest{
- Expires: sub.consumeOpts.Expires,
- Batch: sub.consumeOpts.MaxMessages,
- MaxBytes: sub.consumeOpts.MaxBytes,
- Heartbeat: sub.consumeOpts.Heartbeat,
- }
- if sub.hbMonitor != nil {
- sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat)
- }
- sub.resetPendingMsgs()
- }
- sub.Unlock()
- }
- case err := <-sub.errs:
- sub.Lock()
- if sub.consumeOpts.ErrHandler != nil {
- sub.consumeOpts.ErrHandler(sub, err)
- }
- if errors.Is(err, ErrNoHeartbeat) {
- batchSize := sub.consumeOpts.MaxMessages
- if sub.consumeOpts.StopAfter > 0 {
- batchSize = min(batchSize, sub.consumeOpts.StopAfter-sub.delivered)
- }
- sub.fetchNext <- &pullRequest{
- Expires: sub.consumeOpts.Expires,
- Batch: batchSize,
- MaxBytes: sub.consumeOpts.MaxBytes,
- Heartbeat: sub.consumeOpts.Heartbeat,
- }
- if sub.hbMonitor != nil {
- sub.hbMonitor.Reset(2 * sub.consumeOpts.Heartbeat)
- }
- sub.resetPendingMsgs()
- }
- sub.Unlock()
- case <-sub.done:
- return
- }
- }
- }()
-
- go sub.pullMessages(subject)
-
- return sub, nil
-}
-
-// resetPendingMsgs resets pending message count and byte count
-// to the values set in consumeOpts
-// lock should be held before calling this method
-func (s *pullSubscription) resetPendingMsgs() {
- s.pending.msgCount = s.consumeOpts.MaxMessages
- s.pending.byteCount = s.consumeOpts.MaxBytes
-}
-
-// decrementPendingMsgs decrements pending message count and byte count
-// lock should be held before calling this method
-func (s *pullSubscription) decrementPendingMsgs(msg *nats.Msg) {
- s.pending.msgCount--
- if s.consumeOpts.MaxBytes != 0 {
- s.pending.byteCount -= msg.Size()
- }
-}
-
-// incrementDeliveredMsgs increments delivered message count
-// lock should be held before calling this method
-func (s *pullSubscription) incrementDeliveredMsgs() {
- s.delivered++
-}
-
-// checkPending verifies whether there are enough messages in
-// the buffer to trigger a new pull request.
-// lock should be held before calling this method
-func (s *pullSubscription) checkPending() {
- if (s.pending.msgCount < s.consumeOpts.ThresholdMessages ||
- (s.pending.byteCount < s.consumeOpts.ThresholdBytes && s.consumeOpts.MaxBytes != 0)) &&
- atomic.LoadUint32(&s.fetchInProgress) == 0 {
-
- var batchSize, maxBytes int
- if s.consumeOpts.MaxBytes == 0 {
- // if using messages, calculate appropriate batch size
- batchSize = s.consumeOpts.MaxMessages - s.pending.msgCount
- } else {
- // if using bytes, use the max value
- batchSize = s.consumeOpts.MaxMessages
- maxBytes = s.consumeOpts.MaxBytes - s.pending.byteCount
- }
- if s.consumeOpts.StopAfter > 0 {
- batchSize = min(batchSize, s.consumeOpts.StopAfter-s.delivered-s.pending.msgCount)
- }
- if batchSize > 0 {
- s.fetchNext <- &pullRequest{
- Expires: s.consumeOpts.Expires,
- Batch: batchSize,
- MaxBytes: maxBytes,
- Heartbeat: s.consumeOpts.Heartbeat,
- }
-
- s.pending.msgCount = s.consumeOpts.MaxMessages
- s.pending.byteCount = s.consumeOpts.MaxBytes
- }
- }
-}
-
-// Messages returns MessagesContext, allowing continuously iterating
-// over messages on a stream. Messages cannot be used concurrently
-// when using ordered consumer.
-//
-// See [Consumer.Messages] for more details.
-func (p *pullConsumer) Messages(opts ...PullMessagesOpt) (MessagesContext, error) {
- consumeOpts, err := parseMessagesOpts(false, opts...)
- if err != nil {
- return nil, fmt.Errorf("%w: %s", ErrInvalidOption, err)
- }
-
- p.Lock()
- subject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiRequestNextT, p.stream, p.name))
-
- msgs := make(chan *nats.Msg, consumeOpts.MaxMessages)
-
- // for single consume, use empty string as id
- // this is useful for ordered consumer, where only a single subscription is valid
- var consumeID string
- if len(p.subscriptions) > 0 {
- consumeID = nuid.Next()
- }
- sub := &pullSubscription{
- id: consumeID,
- consumer: p,
- done: make(chan struct{}, 1),
- msgs: msgs,
- errs: make(chan error, 1),
- fetchNext: make(chan *pullRequest, 1),
- consumeOpts: consumeOpts,
- }
- sub.connStatusChanged = p.jetStream.conn.StatusChanged(nats.CONNECTED, nats.RECONNECTING)
- inbox := p.jetStream.conn.NewInbox()
- sub.subscription, err = p.jetStream.conn.ChanSubscribe(inbox, sub.msgs)
- if err != nil {
- p.Unlock()
- return nil, err
- }
- sub.subscription.SetClosedHandler(func(sid string) func(string) {
- return func(subject string) {
- p.Lock()
- defer p.Unlock()
- if atomic.LoadUint32(&sub.draining) != 1 {
- // if we're not draining, subscription can be closed as soon
- // as closed handler is called
- // otherwise, we need to wait until all messages are drained
- // in Next
- delete(p.subscriptions, sid)
- }
- close(msgs)
- }
- }(sub.id))
-
- p.subscriptions[sub.id] = sub
- p.Unlock()
-
- go sub.pullMessages(subject)
-
- go func() {
- for {
- select {
- case status, ok := <-sub.connStatusChanged:
- if !ok {
- return
- }
- if status == nats.CONNECTED {
- sub.errs <- errConnected
- }
- if status == nats.RECONNECTING {
- sub.errs <- errDisconnected
- }
- case <-sub.done:
- return
- }
- }
- }()
-
- return sub, nil
-}
-
-var (
- errConnected = errors.New("connected")
- errDisconnected = errors.New("disconnected")
-)
-
-// Next retrieves next message on a stream. It will block until the next
-// message is available. If the context is canceled, Next will return
-// ErrMsgIteratorClosed error.
-func (s *pullSubscription) Next() (Msg, error) {
- s.Lock()
- defer s.Unlock()
- drainMode := atomic.LoadUint32(&s.draining) == 1
- closed := atomic.LoadUint32(&s.closed) == 1
- if closed && !drainMode {
- return nil, ErrMsgIteratorClosed
- }
- hbMonitor := s.scheduleHeartbeatCheck(2 * s.consumeOpts.Heartbeat)
- defer func() {
- if hbMonitor != nil {
- hbMonitor.Stop()
- }
- }()
-
- isConnected := true
- if s.consumeOpts.StopAfter > 0 && s.delivered >= s.consumeOpts.StopAfter {
- s.Stop()
- return nil, ErrMsgIteratorClosed
- }
-
- for {
- s.checkPending()
- select {
- case msg, ok := <-s.msgs:
- if !ok {
- // if msgs channel is closed, it means that subscription was either drained or stopped
- delete(s.consumer.subscriptions, s.id)
- atomic.CompareAndSwapUint32(&s.draining, 1, 0)
- return nil, ErrMsgIteratorClosed
- }
- if hbMonitor != nil {
- hbMonitor.Reset(2 * s.consumeOpts.Heartbeat)
- }
- userMsg, msgErr := checkMsg(msg)
- if !userMsg {
- // heartbeat message
- if msgErr == nil {
- continue
- }
- if err := s.handleStatusMsg(msg, msgErr); err != nil {
- s.Stop()
- return nil, err
- }
- continue
- }
- s.decrementPendingMsgs(msg)
- s.incrementDeliveredMsgs()
- return s.consumer.jetStream.toJSMsg(msg), nil
- case err := <-s.errs:
- if errors.Is(err, ErrNoHeartbeat) {
- s.pending.msgCount = 0
- s.pending.byteCount = 0
- if s.consumeOpts.ReportMissingHeartbeats {
- return nil, err
- }
- if hbMonitor != nil {
- hbMonitor.Reset(2 * s.consumeOpts.Heartbeat)
- }
- }
- if errors.Is(err, errConnected) {
- if !isConnected {
- isConnected = true
- // try fetching consumer info several times to make sure consumer is available after reconnect
- backoffOpts := backoffOpts{
- attempts: 10,
- initialInterval: 1 * time.Second,
- disableInitialExecution: true,
- factor: 2,
- maxInterval: 10 * time.Second,
- cancel: s.done,
- }
- err = retryWithBackoff(func(attempt int) (bool, error) {
- isClosed := atomic.LoadUint32(&s.closed) == 1
- if isClosed {
- return false, nil
- }
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- _, err := s.consumer.Info(ctx)
- if err != nil {
- if errors.Is(err, ErrConsumerNotFound) {
- return false, err
- }
- if attempt == backoffOpts.attempts-1 {
- return true, fmt.Errorf("could not get consumer info after server reconnect: %w", err)
- }
- return true, err
- }
- return false, nil
- }, backoffOpts)
- if err != nil {
- s.Stop()
- return nil, err
- }
-
- s.pending.msgCount = 0
- s.pending.byteCount = 0
- if hbMonitor != nil {
- hbMonitor.Reset(2 * s.consumeOpts.Heartbeat)
- }
- }
- }
- if errors.Is(err, errDisconnected) {
- if hbMonitor != nil {
- hbMonitor.Reset(2 * s.consumeOpts.Heartbeat)
- }
- isConnected = false
- }
- }
- }
-}
-
-func (s *pullSubscription) handleStatusMsg(msg *nats.Msg, msgErr error) error {
- if !errors.Is(msgErr, nats.ErrTimeout) && !errors.Is(msgErr, ErrMaxBytesExceeded) {
- if errors.Is(msgErr, ErrConsumerDeleted) || errors.Is(msgErr, ErrBadRequest) {
- return msgErr
- }
- if s.consumeOpts.ErrHandler != nil {
- s.consumeOpts.ErrHandler(s, msgErr)
- }
- if errors.Is(msgErr, ErrConsumerLeadershipChanged) {
- s.pending.msgCount = 0
- s.pending.byteCount = 0
- }
- return nil
- }
- msgsLeft, bytesLeft, err := parsePending(msg)
- if err != nil {
- return err
- }
- s.pending.msgCount -= msgsLeft
- if s.pending.msgCount < 0 {
- s.pending.msgCount = 0
- }
- if s.consumeOpts.MaxBytes > 0 {
- s.pending.byteCount -= bytesLeft
- if s.pending.byteCount < 0 {
- s.pending.byteCount = 0
- }
- }
- return nil
-}
-
-func (hb *hbMonitor) Stop() {
- hb.Mutex.Lock()
- hb.timer.Stop()
- hb.Mutex.Unlock()
-}
-
-func (hb *hbMonitor) Reset(dur time.Duration) {
- hb.Mutex.Lock()
- hb.timer.Reset(dur)
- hb.Mutex.Unlock()
-}
-
-// Stop unsubscribes from the stream and cancels subscription. Calling
-// Next after calling Stop will return ErrMsgIteratorClosed error.
-// All messages that are already in the buffer are discarded.
-func (s *pullSubscription) Stop() {
- if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
- return
- }
- close(s.done)
- if s.consumeOpts.stopAfterMsgsLeft != nil {
- if s.delivered >= s.consumeOpts.StopAfter {
- close(s.consumeOpts.stopAfterMsgsLeft)
- } else {
- s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered
- }
- }
-}
-
-// Drain unsubscribes from the stream and cancels subscription. All
-// messages that are already in the buffer will be available on
-// subsequent calls to Next. After the buffer is drained, Next will
-// return ErrMsgIteratorClosed error.
-func (s *pullSubscription) Drain() {
- if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
- return
- }
- atomic.StoreUint32(&s.draining, 1)
- close(s.done)
- if s.consumeOpts.stopAfterMsgsLeft != nil {
- if s.delivered >= s.consumeOpts.StopAfter {
- close(s.consumeOpts.stopAfterMsgsLeft)
- } else {
- s.consumeOpts.stopAfterMsgsLeft <- s.consumeOpts.StopAfter - s.delivered
- }
- }
-}
-
-// Fetch sends a single request to retrieve given number of messages.
-// It will wait up to provided expiry time if not all messages are available.
-func (p *pullConsumer) Fetch(batch int, opts ...FetchOpt) (MessageBatch, error) {
- req := &pullRequest{
- Batch: batch,
- Expires: DefaultExpires,
- Heartbeat: unset,
- }
- for _, opt := range opts {
- if err := opt(req); err != nil {
- return nil, err
- }
- }
- // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls
- // and disable it for shorter pulls
- if req.Heartbeat == unset {
- if req.Expires >= 10*time.Second {
- req.Heartbeat = 5 * time.Second
- } else {
- req.Heartbeat = 0
- }
- }
- if req.Expires < 2*req.Heartbeat {
- return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption)
- }
-
- return p.fetch(req)
-}
-
-// FetchBytes is used to retrieve up to a provided bytes from the stream.
-func (p *pullConsumer) FetchBytes(maxBytes int, opts ...FetchOpt) (MessageBatch, error) {
- req := &pullRequest{
- Batch: 1000000,
- MaxBytes: maxBytes,
- Expires: DefaultExpires,
- Heartbeat: unset,
- }
- for _, opt := range opts {
- if err := opt(req); err != nil {
- return nil, err
- }
- }
- // if heartbeat was not explicitly set, set it to 5 seconds for longer pulls
- // and disable it for shorter pulls
- if req.Heartbeat == unset {
- if req.Expires >= 10*time.Second {
- req.Heartbeat = 5 * time.Second
- } else {
- req.Heartbeat = 0
- }
- }
- if req.Expires < 2*req.Heartbeat {
- return nil, fmt.Errorf("%w: expiry time should be at least 2 times the heartbeat", ErrInvalidOption)
- }
-
- return p.fetch(req)
-}
-
-// FetchNoWait sends a single request to retrieve given number of messages.
-// FetchNoWait will only return messages that are available at the time of the
-// request. It will not wait for more messages to arrive.
-func (p *pullConsumer) FetchNoWait(batch int) (MessageBatch, error) {
- req := &pullRequest{
- Batch: batch,
- NoWait: true,
- }
-
- return p.fetch(req)
-}
-
-func (p *pullConsumer) fetch(req *pullRequest) (MessageBatch, error) {
- res := &fetchResult{
- msgs: make(chan Msg, req.Batch),
- }
- msgs := make(chan *nats.Msg, 2*req.Batch)
- subject := apiSubj(p.jetStream.apiPrefix, fmt.Sprintf(apiRequestNextT, p.stream, p.name))
-
- sub := &pullSubscription{
- consumer: p,
- done: make(chan struct{}, 1),
- msgs: msgs,
- errs: make(chan error, 1),
- }
- inbox := p.jetStream.conn.NewInbox()
- var err error
- sub.subscription, err = p.jetStream.conn.ChanSubscribe(inbox, sub.msgs)
- if err != nil {
- return nil, err
- }
- if err := sub.pull(req, subject); err != nil {
- return nil, err
- }
-
- var receivedMsgs, receivedBytes int
- hbTimer := sub.scheduleHeartbeatCheck(req.Heartbeat)
- go func(res *fetchResult) {
- defer sub.subscription.Unsubscribe()
- defer close(res.msgs)
- for {
- select {
- case msg := <-msgs:
- p.Lock()
- if hbTimer != nil {
- hbTimer.Reset(2 * req.Heartbeat)
- }
- userMsg, err := checkMsg(msg)
- if err != nil {
- errNotTimeoutOrNoMsgs := !errors.Is(err, nats.ErrTimeout) && !errors.Is(err, ErrNoMessages)
- if errNotTimeoutOrNoMsgs && !errors.Is(err, ErrMaxBytesExceeded) {
- res.err = err
- }
- res.done = true
- p.Unlock()
- return
- }
- if !userMsg {
- p.Unlock()
- continue
- }
- res.msgs <- p.jetStream.toJSMsg(msg)
- meta, err := msg.Metadata()
- if err != nil {
- res.err = fmt.Errorf("parsing message metadata: %s", err)
- }
- res.sseq = meta.Sequence.Stream
- receivedMsgs++
- if req.MaxBytes != 0 {
- receivedBytes += msg.Size()
- }
- if receivedMsgs == req.Batch || (req.MaxBytes != 0 && receivedBytes >= req.MaxBytes) {
- res.done = true
- p.Unlock()
- return
- }
- p.Unlock()
- case err := <-sub.errs:
- res.err = err
- res.done = true
- return
- case <-time.After(req.Expires + 1*time.Second):
- res.done = true
- return
- }
- }
- }(res)
- return res, nil
-}
-
-func (fr *fetchResult) Messages() <-chan Msg {
- return fr.msgs
-}
-
-func (fr *fetchResult) Error() error {
- return fr.err
-}
-
-// Next is used to retrieve the next message from the stream. This
-// method will block until the message is retrieved or timeout is
-// reached.
-func (p *pullConsumer) Next(opts ...FetchOpt) (Msg, error) {
- res, err := p.Fetch(1, opts...)
- if err != nil {
- return nil, err
- }
- msg := <-res.Messages()
- if msg != nil {
- return msg, nil
- }
- if res.Error() == nil {
- return nil, nats.ErrTimeout
- }
- return nil, res.Error()
-}
-
-func (s *pullSubscription) pullMessages(subject string) {
- for {
- select {
- case req := <-s.fetchNext:
- atomic.StoreUint32(&s.fetchInProgress, 1)
-
- if err := s.pull(req, subject); err != nil {
- if errors.Is(err, ErrMsgIteratorClosed) {
- s.cleanup()
- return
- }
- s.errs <- err
- }
- atomic.StoreUint32(&s.fetchInProgress, 0)
- case <-s.done:
- s.cleanup()
- return
- }
- }
-}
-
-func (s *pullSubscription) scheduleHeartbeatCheck(dur time.Duration) *hbMonitor {
- if dur == 0 {
- return nil
- }
- return &hbMonitor{
- timer: time.AfterFunc(2*dur, func() {
- s.errs <- ErrNoHeartbeat
- }),
- }
-}
-
-func (s *pullSubscription) cleanup() {
- // For now this function does not need to hold the lock.
- // Holding the lock here might cause a deadlock if Next()
- // is already holding the lock and waiting.
- // The fields that are read (subscription, hbMonitor)
- // are read only (Only written on creation of pullSubscription).
- if s.subscription == nil || !s.subscription.IsValid() {
- return
- }
- if s.hbMonitor != nil {
- s.hbMonitor.Stop()
- }
- drainMode := atomic.LoadUint32(&s.draining) == 1
- if drainMode {
- s.subscription.Drain()
- } else {
- s.subscription.Unsubscribe()
- }
- atomic.StoreUint32(&s.closed, 1)
-}
-
-// pull sends a pull request to the server and waits for messages using a subscription from [pullSubscription].
-// Messages will be fetched up to given batch_size or until there are no more messages or timeout is returned
-func (s *pullSubscription) pull(req *pullRequest, subject string) error {
- s.consumer.Lock()
- defer s.consumer.Unlock()
- if atomic.LoadUint32(&s.closed) == 1 {
- return ErrMsgIteratorClosed
- }
- if req.Batch < 1 {
- return fmt.Errorf("%w: batch size must be at least 1", nats.ErrInvalidArg)
- }
- reqJSON, err := json.Marshal(req)
- if err != nil {
- return err
- }
-
- reply := s.subscription.Subject
- if err := s.consumer.jetStream.conn.PublishRequest(subject, reply, reqJSON); err != nil {
- return err
- }
- return nil
-}
-
-func parseConsumeOpts(ordered bool, opts ...PullConsumeOpt) (*consumeOpts, error) {
- consumeOpts := &consumeOpts{
- MaxMessages: unset,
- MaxBytes: unset,
- Expires: DefaultExpires,
- Heartbeat: unset,
- ReportMissingHeartbeats: true,
- StopAfter: unset,
- }
- for _, opt := range opts {
- if err := opt.configureConsume(consumeOpts); err != nil {
- return nil, err
- }
- }
- if err := consumeOpts.setDefaults(ordered); err != nil {
- return nil, err
- }
- return consumeOpts, nil
-}
-
-func parseMessagesOpts(ordered bool, opts ...PullMessagesOpt) (*consumeOpts, error) {
- consumeOpts := &consumeOpts{
- MaxMessages: unset,
- MaxBytes: unset,
- Expires: DefaultExpires,
- Heartbeat: unset,
- ReportMissingHeartbeats: true,
- StopAfter: unset,
- }
- for _, opt := range opts {
- if err := opt.configureMessages(consumeOpts); err != nil {
- return nil, err
- }
- }
- if err := consumeOpts.setDefaults(ordered); err != nil {
- return nil, err
- }
- return consumeOpts, nil
-}
-
-func (consumeOpts *consumeOpts) setDefaults(ordered bool) error {
- if consumeOpts.MaxBytes != unset && consumeOpts.MaxMessages != unset {
- return fmt.Errorf("only one of MaxMessages and MaxBytes can be specified")
- }
- if consumeOpts.MaxBytes != unset {
- // when max_bytes is used, set batch size to a very large number
- consumeOpts.MaxMessages = 1000000
- } else if consumeOpts.MaxMessages != unset {
- consumeOpts.MaxBytes = 0
- } else {
- if consumeOpts.MaxBytes == unset {
- consumeOpts.MaxBytes = 0
- }
- if consumeOpts.MaxMessages == unset {
- consumeOpts.MaxMessages = DefaultMaxMessages
- }
- }
-
- if consumeOpts.ThresholdMessages == 0 {
- consumeOpts.ThresholdMessages = int(math.Ceil(float64(consumeOpts.MaxMessages) / 2))
- }
- if consumeOpts.ThresholdBytes == 0 {
- consumeOpts.ThresholdBytes = int(math.Ceil(float64(consumeOpts.MaxBytes) / 2))
- }
- if consumeOpts.Heartbeat == unset {
- if ordered {
- consumeOpts.Heartbeat = 5 * time.Second
- if consumeOpts.Expires < 10*time.Second {
- consumeOpts.Heartbeat = consumeOpts.Expires / 2
- }
- } else {
- consumeOpts.Heartbeat = consumeOpts.Expires / 2
- if consumeOpts.Heartbeat > 30*time.Second {
- consumeOpts.Heartbeat = 30 * time.Second
- }
- }
- }
- if consumeOpts.Heartbeat > consumeOpts.Expires/2 {
- return fmt.Errorf("the value of Heartbeat must be less than 50%% of expiry")
- }
- return nil
-}
-
-type backoffOpts struct {
- // total retry attempts
- // -1 for unlimited
- attempts int
- // initial interval after which first retry will be performed
- // defaults to 1s
- initialInterval time.Duration
- // determines whether first function execution should be performed immediately
- disableInitialExecution bool
- // multiplier on each attempt
- // defaults to 2
- factor float64
- // max interval between retries
- // after reaching this value, all subsequent
- // retries will be performed with this interval
- // defaults to 1 minute
- maxInterval time.Duration
- // custom backoff intervals
- // if set, overrides all other options except attempts
- // if attempts are set, then the last interval will be used
- // for all subsequent retries after reaching the limit
- customBackoff []time.Duration
- // cancel channel
- // if set, retry will be canceled when this channel is closed
- cancel <-chan struct{}
-}
-
-func retryWithBackoff(f func(int) (bool, error), opts backoffOpts) error {
- var err error
- var shouldContinue bool
- // if custom backoff is set, use it instead of other options
- if len(opts.customBackoff) > 0 {
- if opts.attempts != 0 {
- return fmt.Errorf("cannot use custom backoff intervals when attempts are set")
- }
- for i, interval := range opts.customBackoff {
- select {
- case <-opts.cancel:
- return nil
- case <-time.After(interval):
- }
- shouldContinue, err = f(i)
- if !shouldContinue {
- return err
- }
- }
- return err
- }
-
- // set default options
- if opts.initialInterval == 0 {
- opts.initialInterval = 1 * time.Second
- }
- if opts.factor == 0 {
- opts.factor = 2
- }
- if opts.maxInterval == 0 {
- opts.maxInterval = 1 * time.Minute
- }
- if opts.attempts == 0 {
- return fmt.Errorf("retry attempts have to be set when not using custom backoff intervals")
- }
- interval := opts.initialInterval
- for i := 0; ; i++ {
- if i == 0 && opts.disableInitialExecution {
- time.Sleep(interval)
- continue
- }
- shouldContinue, err = f(i)
- if !shouldContinue {
- return err
- }
- if opts.attempts > 0 && i >= opts.attempts-1 {
- break
- }
- select {
- case <-opts.cancel:
- return nil
- case <-time.After(interval):
- }
- interval = time.Duration(float64(interval) * opts.factor)
- if interval >= opts.maxInterval {
- interval = opts.maxInterval
- }
- }
- return err
-}
-
-func (c *pullConsumer) getSubscription(id string) (*pullSubscription, bool) {
- c.Lock()
- defer c.Unlock()
- sub, ok := c.subscriptions[id]
- return sub, ok
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/stream.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/stream.go
deleted file mode 100644
index 01c9d58..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/stream.go
+++ /dev/null
@@ -1,719 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
- "time"
-
- "github.com/nats-io/nats.go"
- "github.com/nats-io/nuid"
-)
-
-type (
- // Stream contains CRUD methods on a consumer via [ConsumerManager], as well
- // as operations on an existing stream. It allows fetching and removing
- // messages from a stream, as well as purging a stream.
- Stream interface {
- ConsumerManager
-
- // Info returns StreamInfo from the server.
- Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error)
-
- // CachedInfo returns ConsumerInfo currently cached on this stream.
- // This method does not perform any network requests. The cached
- // StreamInfo is updated on every call to Info and Update.
- CachedInfo() *StreamInfo
-
- // Purge removes messages from a stream. It is a destructive operation.
- // Use with caution. See StreamPurgeOpt for available options.
- Purge(ctx context.Context, opts ...StreamPurgeOpt) error
-
- // GetMsg retrieves a raw stream message stored in JetStream by sequence number.
- GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error)
-
- // GetLastMsgForSubject retrieves the last raw stream message stored in
- // JetStream on a given subject subject.
- GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error)
-
- // DeleteMsg deletes a message from a stream.
- // On the server, the message is marked as erased, but not overwritten.
- DeleteMsg(ctx context.Context, seq uint64) error
-
- // SecureDeleteMsg deletes a message from a stream. The deleted message
- // is overwritten with random data. As a result, this operation is slower
- // than DeleteMsg.
- SecureDeleteMsg(ctx context.Context, seq uint64) error
- }
-
- // ConsumerManager provides CRUD API for managing consumers. It is
- // available as a part of [Stream] interface. CreateConsumer,
- // UpdateConsumer, CreateOrUpdateConsumer and Consumer methods return a
- // [Consumer] interface, allowing to operate on a consumer (e.g. consume
- // messages).
- ConsumerManager interface {
- // CreateOrUpdateConsumer creates a consumer on a given stream with
- // given config. If consumer already exists, it will be updated (if
- // possible). Consumer interface is returned, allowing to operate on a
- // consumer (e.g. fetch messages).
- CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
-
- // CreateConsumer creates a consumer on a given stream with given
- // config. If consumer already exists and the provided configuration
- // differs from its configuration, ErrConsumerExists is returned. If the
- // provided configuration is the same as the existing consumer, the
- // existing consumer is returned. Consumer interface is returned,
- // allowing to operate on a consumer (e.g. fetch messages).
- CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
-
- // UpdateConsumer updates an existing consumer. If consumer does not
- // exist, ErrConsumerDoesNotExist is returned. Consumer interface is
- // returned, allowing to operate on a consumer (e.g. fetch messages).
- UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error)
-
- // OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
- // are managed by the library and provide a simple way to consume
- // messages from a stream. Ordered consumers are ephemeral in-memory
- // pull consumers and are resilient to deletes and restarts.
- OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error)
-
- // Consumer returns an interface to an existing consumer, allowing processing
- // of messages. If consumer does not exist, ErrConsumerNotFound is
- // returned.
- Consumer(ctx context.Context, consumer string) (Consumer, error)
-
- // DeleteConsumer removes a consumer with given name from a stream.
- // If consumer does not exist, ErrConsumerNotFound is returned.
- DeleteConsumer(ctx context.Context, consumer string) error
-
- // ListConsumers returns ConsumerInfoLister enabling iterating over a
- // channel of consumer infos.
- ListConsumers(context.Context) ConsumerInfoLister
-
- // ConsumerNames returns a ConsumerNameLister enabling iterating over a
- // channel of consumer names.
- ConsumerNames(context.Context) ConsumerNameLister
- }
-
- RawStreamMsg struct {
- Subject string
- Sequence uint64
- Header nats.Header
- Data []byte
- Time time.Time
- }
-
- stream struct {
- name string
- info *StreamInfo
- jetStream *jetStream
- }
-
- // StreamInfoOpt is a function setting options for [Stream.Info]
- StreamInfoOpt func(*streamInfoRequest) error
-
- streamInfoRequest struct {
- apiPaged
- DeletedDetails bool `json:"deleted_details,omitempty"`
- SubjectFilter string `json:"subjects_filter,omitempty"`
- }
-
- consumerInfoResponse struct {
- apiResponse
- *ConsumerInfo
- }
-
- // StreamPurgeOpt is a function setting options for [Stream.Purge]
- StreamPurgeOpt func(*StreamPurgeRequest) error
-
- // StreamPurgeRequest is an API request body to purge a stream.
-
- StreamPurgeRequest struct {
- // Purge up to but not including sequence.
- Sequence uint64 `json:"seq,omitempty"`
- // Subject to match against messages for the purge command.
- Subject string `json:"filter,omitempty"`
- // Number of messages to keep.
- Keep uint64 `json:"keep,omitempty"`
- }
-
- streamPurgeResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
- Purged uint64 `json:"purged"`
- }
-
- consumerDeleteResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
- }
-
- // GetMsgOpt is a function setting options for [Stream.GetMsg]
- GetMsgOpt func(*apiMsgGetRequest) error
-
- apiMsgGetRequest struct {
- Seq uint64 `json:"seq,omitempty"`
- LastFor string `json:"last_by_subj,omitempty"`
- NextFor string `json:"next_by_subj,omitempty"`
- }
-
- // apiMsgGetResponse is the response for a Stream get request.
- apiMsgGetResponse struct {
- apiResponse
- Message *storedMsg `json:"message,omitempty"`
- }
-
- // storedMsg is a raw message stored in JetStream.
- storedMsg struct {
- Subject string `json:"subject"`
- Sequence uint64 `json:"seq"`
- Header []byte `json:"hdrs,omitempty"`
- Data []byte `json:"data,omitempty"`
- Time time.Time `json:"time"`
- }
-
- msgDeleteRequest struct {
- Seq uint64 `json:"seq"`
- NoErase bool `json:"no_erase,omitempty"`
- }
-
- msgDeleteResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
- }
-
- // ConsumerInfoLister is used to iterate over a channel of consumer infos.
- // Err method can be used to check for errors encountered during iteration.
- // Info channel is always closed and therefore can be used in a range loop.
- ConsumerInfoLister interface {
- Info() <-chan *ConsumerInfo
- Err() error
- }
-
- // ConsumerNameLister is used to iterate over a channel of consumer names.
- // Err method can be used to check for errors encountered during iteration.
- // Name channel is always closed and therefore can be used in a range loop.
- ConsumerNameLister interface {
- Name() <-chan string
- Err() error
- }
-
- consumerLister struct {
- js *jetStream
- offset int
- pageInfo *apiPaged
-
- consumers chan *ConsumerInfo
- names chan string
- err error
- }
-
- consumerListResponse struct {
- apiResponse
- apiPaged
- Consumers []*ConsumerInfo `json:"consumers"`
- }
-
- consumerNamesResponse struct {
- apiResponse
- apiPaged
- Consumers []string `json:"consumers"`
- }
-)
-
-// CreateOrUpdateConsumer creates a consumer on a given stream with
-// given config. If consumer already exists, it will be updated (if
-// possible). Consumer interface is returned, allowing to operate on a
-// consumer (e.g. fetch messages).
-func (s *stream) CreateOrUpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
- return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionCreateOrUpdate)
-}
-
-// CreateConsumer creates a consumer on a given stream with given
-// config. If consumer already exists and the provided configuration
-// differs from its configuration, ErrConsumerExists is returned. If the
-// provided configuration is the same as the existing consumer, the
-// existing consumer is returned. Consumer interface is returned,
-// allowing to operate on a consumer (e.g. fetch messages).
-func (s *stream) CreateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
- return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionCreate)
-}
-
-// UpdateConsumer updates an existing consumer. If consumer does not
-// exist, ErrConsumerDoesNotExist is returned. Consumer interface is
-// returned, allowing to operate on a consumer (e.g. fetch messages).
-func (s *stream) UpdateConsumer(ctx context.Context, cfg ConsumerConfig) (Consumer, error) {
- return upsertConsumer(ctx, s.jetStream, s.name, cfg, consumerActionUpdate)
-}
-
-// OrderedConsumer returns an OrderedConsumer instance. OrderedConsumer
-// are managed by the library and provide a simple way to consume
-// messages from a stream. Ordered consumers are ephemeral in-memory
-// pull consumers and are resilient to deletes and restarts.
-func (s *stream) OrderedConsumer(ctx context.Context, cfg OrderedConsumerConfig) (Consumer, error) {
- oc := &orderedConsumer{
- jetStream: s.jetStream,
- cfg: &cfg,
- stream: s.name,
- namePrefix: nuid.Next(),
- doReset: make(chan struct{}, 1),
- }
- if cfg.OptStartSeq != 0 {
- oc.cursor.streamSeq = cfg.OptStartSeq - 1
- }
- err := oc.reset()
- if err != nil {
- return nil, err
- }
-
- return oc, nil
-}
-
-// Consumer returns an interface to an existing consumer, allowing processing
-// of messages. If consumer does not exist, ErrConsumerNotFound is
-// returned.
-func (s *stream) Consumer(ctx context.Context, name string) (Consumer, error) {
- return getConsumer(ctx, s.jetStream, s.name, name)
-}
-
-// DeleteConsumer removes a consumer with given name from a stream.
-// If consumer does not exist, ErrConsumerNotFound is returned.
-func (s *stream) DeleteConsumer(ctx context.Context, name string) error {
- return deleteConsumer(ctx, s.jetStream, s.name, name)
-}
-
-// Info returns StreamInfo from the server.
-func (s *stream) Info(ctx context.Context, opts ...StreamInfoOpt) (*StreamInfo, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- var infoReq *streamInfoRequest
- for _, opt := range opts {
- if infoReq == nil {
- infoReq = &streamInfoRequest{}
- }
- if err := opt(infoReq); err != nil {
- return nil, err
- }
- }
- var req []byte
- var err error
- var subjectMap map[string]uint64
- var offset int
-
- infoSubject := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiStreamInfoT, s.name))
- var info *StreamInfo
- for {
- if infoReq != nil {
- if infoReq.SubjectFilter != "" {
- if subjectMap == nil {
- subjectMap = make(map[string]uint64)
- }
- infoReq.Offset = offset
- }
- req, err = json.Marshal(infoReq)
- if err != nil {
- return nil, err
- }
- }
- var resp streamInfoResponse
- if _, err = s.jetStream.apiRequestJSON(ctx, infoSubject, &resp, req); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeStreamNotFound {
- return nil, ErrStreamNotFound
- }
- return nil, resp.Error
- }
- info = resp.StreamInfo
- var total int
- if resp.Total != 0 {
- total = resp.Total
- }
- if len(resp.StreamInfo.State.Subjects) > 0 {
- for subj, msgs := range resp.StreamInfo.State.Subjects {
- subjectMap[subj] = msgs
- }
- offset = len(subjectMap)
- }
- if total == 0 || total <= offset {
- info.State.Subjects = nil
- // we don't want to store subjects in cache
- cached := *info
- s.info = &cached
- info.State.Subjects = subjectMap
- break
- }
- }
-
- return info, nil
-}
-
-// CachedInfo returns ConsumerInfo currently cached on this stream.
-// This method does not perform any network requests. The cached
-// StreamInfo is updated on every call to Info and Update.
-func (s *stream) CachedInfo() *StreamInfo {
- return s.info
-}
-
-// Purge removes messages from a stream. It is a destructive operation.
-// Use with caution. See StreamPurgeOpt for available options.
-func (s *stream) Purge(ctx context.Context, opts ...StreamPurgeOpt) error {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- var purgeReq StreamPurgeRequest
- for _, opt := range opts {
- if err := opt(&purgeReq); err != nil {
- return err
- }
- }
- var req []byte
- var err error
- req, err = json.Marshal(purgeReq)
- if err != nil {
- return err
- }
-
- purgeSubject := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiStreamPurgeT, s.name))
-
- var resp streamPurgeResponse
- if _, err = s.jetStream.apiRequestJSON(ctx, purgeSubject, &resp, req); err != nil {
- return err
- }
- if resp.Error != nil {
- return resp.Error
- }
-
- return nil
-}
-
-// GetMsg retrieves a raw stream message stored in JetStream by sequence number.
-func (s *stream) GetMsg(ctx context.Context, seq uint64, opts ...GetMsgOpt) (*RawStreamMsg, error) {
- req := &apiMsgGetRequest{Seq: seq}
- for _, opt := range opts {
- if err := opt(req); err != nil {
- return nil, err
- }
- }
- return s.getMsg(ctx, req)
-}
-
-// GetLastMsgForSubject retrieves the last raw stream message stored in
-// JetStream on a given subject subject.
-func (s *stream) GetLastMsgForSubject(ctx context.Context, subject string) (*RawStreamMsg, error) {
- return s.getMsg(ctx, &apiMsgGetRequest{LastFor: subject})
-}
-
-func (s *stream) getMsg(ctx context.Context, mreq *apiMsgGetRequest) (*RawStreamMsg, error) {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- req, err := json.Marshal(mreq)
- if err != nil {
- return nil, err
- }
- var gmSubj string
-
- // handle direct gets
- if s.info.Config.AllowDirect {
- if mreq.LastFor != "" {
- gmSubj = apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiDirectMsgGetLastBySubjectT, s.name, mreq.LastFor))
- r, err := s.jetStream.apiRequest(ctx, gmSubj, nil)
- if err != nil {
- return nil, err
- }
- return convertDirectGetMsgResponseToMsg(s.name, r.msg)
- }
- gmSubj = apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiDirectMsgGetT, s.name))
- r, err := s.jetStream.apiRequest(ctx, gmSubj, req)
- if err != nil {
- return nil, err
- }
- return convertDirectGetMsgResponseToMsg(s.name, r.msg)
- }
-
- var resp apiMsgGetResponse
- dsSubj := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiMsgGetT, s.name))
- _, err = s.jetStream.apiRequestJSON(ctx, dsSubj, &resp, req)
- if err != nil {
- return nil, err
- }
-
- if resp.Error != nil {
- if resp.Error.ErrorCode == JSErrCodeMessageNotFound {
- return nil, ErrMsgNotFound
- }
- return nil, resp.Error
- }
-
- msg := resp.Message
-
- var hdr nats.Header
- if len(msg.Header) > 0 {
- hdr, err = nats.DecodeHeadersMsg(msg.Header)
- if err != nil {
- return nil, err
- }
- }
-
- return &RawStreamMsg{
- Subject: msg.Subject,
- Sequence: msg.Sequence,
- Header: hdr,
- Data: msg.Data,
- Time: msg.Time,
- }, nil
-}
-
-func convertDirectGetMsgResponseToMsg(name string, r *nats.Msg) (*RawStreamMsg, error) {
- // Check for 404/408. We would get a no-payload message and a "Status" header
- if len(r.Data) == 0 {
- val := r.Header.Get(statusHdr)
- if val != "" {
- switch val {
- case noMessages:
- return nil, ErrMsgNotFound
- default:
- desc := r.Header.Get("Description")
- if desc == "" {
- desc = "unable to get message"
- }
- return nil, fmt.Errorf("nats: %s", desc)
- }
- }
- }
- // Check for headers that give us the required information to
- // reconstruct the message.
- if len(r.Header) == 0 {
- return nil, fmt.Errorf("nats: response should have headers")
- }
- stream := r.Header.Get(StreamHeader)
- if stream == "" {
- return nil, fmt.Errorf("nats: missing stream header")
- }
-
- seqStr := r.Header.Get(SequenceHeader)
- if seqStr == "" {
- return nil, fmt.Errorf("nats: missing sequence header")
- }
- seq, err := strconv.ParseUint(seqStr, 10, 64)
- if err != nil {
- return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err)
- }
- timeStr := r.Header.Get(TimeStampHeaer)
- if timeStr == "" {
- return nil, fmt.Errorf("nats: missing timestamp header")
- }
-
- tm, err := time.Parse(time.RFC3339Nano, timeStr)
- if err != nil {
- return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err)
- }
- subj := r.Header.Get(SubjectHeader)
- if subj == "" {
- return nil, fmt.Errorf("nats: missing subject header")
- }
- return &RawStreamMsg{
- Subject: subj,
- Sequence: seq,
- Header: r.Header,
- Data: r.Data,
- Time: tm,
- }, nil
-}
-
-// DeleteMsg deletes a message from a stream.
-// On the server, the message is marked as erased, but not overwritten.
-func (s *stream) DeleteMsg(ctx context.Context, seq uint64) error {
- return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq, NoErase: true})
-}
-
-// SecureDeleteMsg deletes a message from a stream. The deleted message
-// is overwritten with random data. As a result, this operation is slower
-// than DeleteMsg.
-func (s *stream) SecureDeleteMsg(ctx context.Context, seq uint64) error {
- return s.deleteMsg(ctx, &msgDeleteRequest{Seq: seq})
-}
-
-func (s *stream) deleteMsg(ctx context.Context, req *msgDeleteRequest) error {
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- r, err := json.Marshal(req)
- if err != nil {
- return err
- }
- subj := apiSubj(s.jetStream.apiPrefix, fmt.Sprintf(apiMsgDeleteT, s.name))
- var resp msgDeleteResponse
- if _, err = s.jetStream.apiRequestJSON(ctx, subj, &resp, r); err != nil {
- return err
- }
- if !resp.Success {
- return fmt.Errorf("%w: %s", ErrMsgDeleteUnsuccessful, err)
- }
- return nil
-}
-
-// ListConsumers returns ConsumerInfoLister enabling iterating over a
-// channel of consumer infos.
-func (s *stream) ListConsumers(ctx context.Context) ConsumerInfoLister {
- l := &consumerLister{
- js: s.jetStream,
- consumers: make(chan *ConsumerInfo),
- }
- go func() {
- defer close(l.consumers)
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- for {
- page, err := l.consumerInfos(ctx, s.name)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- l.err = err
- return
- }
- for _, info := range page {
- select {
- case <-ctx.Done():
- l.err = ctx.Err()
- return
- default:
- }
- if info != nil {
- l.consumers <- info
- }
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
-
- return l
-}
-
-func (s *consumerLister) Info() <-chan *ConsumerInfo {
- return s.consumers
-}
-
-func (s *consumerLister) Err() error {
- return s.err
-}
-
-// ConsumerNames returns a ConsumerNameLister enabling iterating over a
-// channel of consumer names.
-func (s *stream) ConsumerNames(ctx context.Context) ConsumerNameLister {
- l := &consumerLister{
- js: s.jetStream,
- names: make(chan string),
- }
- go func() {
- defer close(l.names)
- ctx, cancel := wrapContextWithoutDeadline(ctx)
- if cancel != nil {
- defer cancel()
- }
- for {
- page, err := l.consumerNames(ctx, s.name)
- if err != nil && !errors.Is(err, ErrEndOfData) {
- l.err = err
- return
- }
- for _, info := range page {
- select {
- case l.names <- info:
- case <-ctx.Done():
- l.err = ctx.Err()
- return
- }
- }
- if errors.Is(err, ErrEndOfData) {
- return
- }
- }
- }()
-
- return l
-}
-
-func (s *consumerLister) Name() <-chan string {
- return s.names
-}
-
-// consumerInfos fetches the next ConsumerInfo page
-func (s *consumerLister) consumerInfos(ctx context.Context, stream string) ([]*ConsumerInfo, error) {
- if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
- return nil, ErrEndOfData
- }
-
- req, err := json.Marshal(
- apiPagedRequest{Offset: s.offset},
- )
- if err != nil {
- return nil, err
- }
-
- slSubj := apiSubj(s.js.apiPrefix, fmt.Sprintf(apiConsumerListT, stream))
- var resp consumerListResponse
- _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req)
- if err != nil {
- return nil, err
- }
- if resp.Error != nil {
- return nil, resp.Error
- }
-
- s.pageInfo = &resp.apiPaged
- s.offset += len(resp.Consumers)
- return resp.Consumers, nil
-}
-
-// consumerNames fetches the next consumer names page
-func (s *consumerLister) consumerNames(ctx context.Context, stream string) ([]string, error) {
- if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
- return nil, ErrEndOfData
- }
-
- req, err := json.Marshal(
- apiPagedRequest{Offset: s.offset},
- )
- if err != nil {
- return nil, err
- }
-
- slSubj := apiSubj(s.js.apiPrefix, fmt.Sprintf(apiConsumerNamesT, stream))
- var resp consumerNamesResponse
- _, err = s.js.apiRequestJSON(ctx, slSubj, &resp, req)
- if err != nil {
- return nil, err
- }
- if resp.Error != nil {
- return nil, resp.Error
- }
-
- s.pageInfo = &resp.apiPaged
- s.offset += len(resp.Consumers)
- return resp.Consumers, nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go
deleted file mode 100644
index dd1f9d9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jetstream/stream_config.go
+++ /dev/null
@@ -1,606 +0,0 @@
-// Copyright 2022-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jetstream
-
-import (
- "encoding/json"
- "fmt"
- "strings"
- "time"
-
- "golang.org/x/text/cases"
- "golang.org/x/text/language"
-)
-
-type (
- // StreamInfo shows config and current state for this stream.
- StreamInfo struct {
- // Config contains the configuration settings of the stream, set when
- // creating or updating the stream.
- Config StreamConfig `json:"config"`
-
- // Created is the timestamp when the stream was created.
- Created time.Time `json:"created"`
-
- // State provides the state of the stream at the time of request,
- // including metrics like the number of messages in the stream, total
- // bytes, etc.
- State StreamState `json:"state"`
-
- // Cluster contains information about the cluster to which this stream
- // belongs (if applicable).
- Cluster *ClusterInfo `json:"cluster,omitempty"`
-
- // Mirror contains information about another stream this one is
- // mirroring. Mirroring is used to create replicas of another stream's
- // data. This field is omitted if the stream is not mirroring another
- // stream.
- Mirror *StreamSourceInfo `json:"mirror,omitempty"`
-
- // Sources is a list of source streams from which this stream collects
- // data.
- Sources []*StreamSourceInfo `json:"sources,omitempty"`
-
- // TimeStamp indicates when the info was gathered by the server.
- TimeStamp time.Time `json:"ts"`
- }
-
- // StreamConfig is the configuration of a JetStream stream.
- StreamConfig struct {
- // Name is the name of the stream. It is required and must be unique
- // across the JetStream account.
- //
- // Name Names cannot contain whitespace, ., *, >, path separators
- // (forward or backwards slash), and non-printable characters.
- Name string `json:"name"`
-
- // Description is an optional description of the stream.
- Description string `json:"description,omitempty"`
-
- // Subjects is a list of subjects that the stream is listening on.
- // Wildcards are supported. Subjects cannot be set if the stream is
- // created as a mirror.
- Subjects []string `json:"subjects,omitempty"`
-
- // Retention defines the message retention policy for the stream.
- // Defaults to LimitsPolicy.
- Retention RetentionPolicy `json:"retention"`
-
- // MaxConsumers specifies the maximum number of consumers allowed for
- // the stream.
- MaxConsumers int `json:"max_consumers"`
-
- // MaxMsgs is the maximum number of messages the stream will store.
- // After reaching the limit, stream adheres to the discard policy.
- // If not set, server default is -1 (unlimited).
- MaxMsgs int64 `json:"max_msgs"`
-
- // MaxBytes is the maximum total size of messages the stream will store.
- // After reaching the limit, stream adheres to the discard policy.
- // If not set, server default is -1 (unlimited).
- MaxBytes int64 `json:"max_bytes"`
-
- // Discard defines the policy for handling messages when the stream
- // reaches its limits in terms of number of messages or total bytes.
- Discard DiscardPolicy `json:"discard"`
-
- // DiscardNewPerSubject is a flag to enable discarding new messages per
- // subject when limits are reached. Requires DiscardPolicy to be
- // DiscardNew and the MaxMsgsPerSubject to be set.
- DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"`
-
- // MaxAge is the maximum age of messages that the stream will retain.
- MaxAge time.Duration `json:"max_age"`
-
- // MaxMsgsPerSubject is the maximum number of messages per subject that
- // the stream will retain.
- MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"`
-
- // MaxMsgSize is the maximum size of any single message in the stream.
- MaxMsgSize int32 `json:"max_msg_size,omitempty"`
-
- // Storage specifies the type of storage backend used for the stream
- // (file or memory).
- Storage StorageType `json:"storage"`
-
- // Replicas is the number of stream replicas in clustered JetStream.
- // Defaults to 1, maximum is 5.
- Replicas int `json:"num_replicas"`
-
- // NoAck is a flag to disable acknowledging messages received by this
- // stream.
- //
- // If set to true, publish methods from the JetStream client will not
- // work as expected, since they rely on acknowledgements. Core NATS
- // publish methods should be used instead. Note that this will make
- // message delivery less reliable.
- NoAck bool `json:"no_ack,omitempty"`
-
- // Duplicates is the window within which to track duplicate messages.
- // If not set, server default is 2 minutes.
- Duplicates time.Duration `json:"duplicate_window,omitempty"`
-
- // Placement is used to declare where the stream should be placed via
- // tags and/or an explicit cluster name.
- Placement *Placement `json:"placement,omitempty"`
-
- // Mirror defines the configuration for mirroring another stream.
- Mirror *StreamSource `json:"mirror,omitempty"`
-
- // Sources is a list of other streams this stream sources messages from.
- Sources []*StreamSource `json:"sources,omitempty"`
-
- // Sealed streams do not allow messages to be published or deleted via limits or API,
- // sealed streams can not be unsealed via configuration update. Can only
- // be set on already created streams via the Update API.
- Sealed bool `json:"sealed,omitempty"`
-
- // DenyDelete restricts the ability to delete messages from a stream via
- // the API. Defaults to false.
- DenyDelete bool `json:"deny_delete,omitempty"`
-
- // DenyPurge restricts the ability to purge messages from a stream via
- // the API. Defaults to false.
- DenyPurge bool `json:"deny_purge,omitempty"`
-
- // AllowRollup allows the use of the Nats-Rollup header to replace all
- // contents of a stream, or subject in a stream, with a single new
- // message.
- AllowRollup bool `json:"allow_rollup_hdrs,omitempty"`
-
- // Compression specifies the message storage compression algorithm.
- // Defaults to NoCompression.
- Compression StoreCompression `json:"compression"`
-
- // FirstSeq is the initial sequence number of the first message in the
- // stream.
- FirstSeq uint64 `json:"first_seq,omitempty"`
-
- // SubjectTransform allows applying a transformation to matching
- // messages' subjects.
- SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"`
-
- // RePublish allows immediate republishing a message to the configured
- // subject after it's stored.
- RePublish *RePublish `json:"republish,omitempty"`
-
- // AllowDirect enables direct access to individual messages using direct
- // get API. Defaults to false.
- AllowDirect bool `json:"allow_direct"`
-
- // MirrorDirect enables direct access to individual messages from the
- // origin stream using direct get API. Defaults to false.
- MirrorDirect bool `json:"mirror_direct"`
-
- // ConsumerLimits defines limits of certain values that consumers can
- // set, defaults for those who don't set these settings
- ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"`
-
- // Metadata is a set of application-defined key-value pairs for
- // associating metadata on the stream. This feature requires nats-server
- // v2.10.0 or later.
- Metadata map[string]string `json:"metadata,omitempty"`
-
- // Template identifies the template that manages the Stream. DEPRECATED:
- // This feature is no longer supported.
- Template string `json:"template_owner,omitempty"`
- }
-
- // StreamSourceInfo shows information about an upstream stream
- // source/mirror.
- StreamSourceInfo struct {
- // Name is the name of the stream that is being replicated.
- Name string `json:"name"`
-
- // Lag informs how many messages behind the source/mirror operation is.
- // This will only show correctly if there is active communication
- // with stream/mirror.
- Lag uint64 `json:"lag"`
-
- // Active informs when last the mirror or sourced stream had activity.
- // Value will be -1 when there has been no activity.
- Active time.Duration `json:"active"`
-
- // FilterSubject is the subject filter defined for this source/mirror.
- FilterSubject string `json:"filter_subject,omitempty"`
-
- // SubjectTransforms is a list of subject transforms defined for this
- // source/mirror.
- SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
- }
-
- // StreamState is the state of a JetStream stream at the time of request.
- StreamState struct {
- // Msgs is the number of messages stored in the stream.
- Msgs uint64 `json:"messages"`
-
- // Bytes is the number of bytes stored in the stream.
- Bytes uint64 `json:"bytes"`
-
- // FirstSeq is the sequence number of the first message in the stream.
- FirstSeq uint64 `json:"first_seq"`
-
- // FirstTime is the timestamp of the first message in the stream.
- FirstTime time.Time `json:"first_ts"`
-
- // LastSeq is the sequence number of the last message in the stream.
- LastSeq uint64 `json:"last_seq"`
-
- // LastTime is the timestamp of the last message in the stream.
- LastTime time.Time `json:"last_ts"`
-
- // Consumers is the number of consumers on the stream.
- Consumers int `json:"consumer_count"`
-
- // Deleted is a list of sequence numbers that have been removed from the
- // stream. This field will only be returned if the stream has been
- // fetched with the DeletedDetails option.
- Deleted []uint64 `json:"deleted"`
-
- // NumDeleted is the number of messages that have been removed from the
- // stream. Only deleted messages causing a gap in stream sequence numbers
- // are counted. Messages deleted at the beginning or end of the stream
- // are not counted.
- NumDeleted int `json:"num_deleted"`
-
- // NumSubjects is the number of unique subjects the stream has received
- // messages on.
- NumSubjects uint64 `json:"num_subjects"`
-
- // Subjects is a map of subjects the stream has received messages on
- // with message count per subject. This field will only be returned if
- // the stream has been fetched with the SubjectFilter option.
- Subjects map[string]uint64 `json:"subjects"`
- }
-
- // ClusterInfo shows information about the underlying set of servers that
- // make up the stream or consumer.
- ClusterInfo struct {
- // Name is the name of the cluster.
- Name string `json:"name,omitempty"`
-
- // Leader is the server name of the RAFT leader.
- Leader string `json:"leader,omitempty"`
-
- // Replicas is the list of members of the RAFT cluster
- Replicas []*PeerInfo `json:"replicas,omitempty"`
- }
-
- // PeerInfo shows information about the peers in the cluster that are
- // supporting the stream or consumer.
- PeerInfo struct {
- // Name is the server name of the peer.
- Name string `json:"name"`
-
- // Current indicates if the peer is up to date and synchronized with the
- // leader.
- Current bool `json:"current"`
-
- // Offline indicates if the peer is considered offline by the group.
- Offline bool `json:"offline,omitempty"`
-
- // Active it the duration since this peer was last seen.
- Active time.Duration `json:"active"`
-
- // Lag is the number of uncommitted operations this peer is behind the
- // leader.
- Lag uint64 `json:"lag,omitempty"`
- }
-
- // SubjectTransformConfig is for applying a subject transform (to matching
- // messages) before doing anything else when a new message is received.
- SubjectTransformConfig struct {
- // Source is the subject pattern to match incoming messages against.
- Source string `json:"src"`
-
- // Destination is the subject pattern to remap the subject to.
- Destination string `json:"dest"`
- }
-
- // RePublish is for republishing messages once committed to a stream. The
- // original subject is remapped from the subject pattern to the destination
- // pattern.
- RePublish struct {
- // Source is the subject pattern to match incoming messages against.
- Source string `json:"src,omitempty"`
-
- // Destination is the subject pattern to republish the subject to.
- Destination string `json:"dest"`
-
- // HeadersOnly is a flag to indicate that only the headers should be
- // republished.
- HeadersOnly bool `json:"headers_only,omitempty"`
- }
-
- // Placement is used to guide placement of streams in clustered JetStream.
- Placement struct {
- // Cluster is the name of the cluster to which the stream should be
- // assigned.
- Cluster string `json:"cluster"`
-
- // Tags are used to match streams to servers in the cluster. A stream
- // will be assigned to a server with a matching tag.
- Tags []string `json:"tags,omitempty"`
- }
-
- // StreamSource dictates how streams can source from other streams.
- StreamSource struct {
- // Name is the name of the stream to source from.
- Name string `json:"name"`
-
- // OptStartSeq is the sequence number to start sourcing from.
- OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
-
- // OptStartTime is the timestamp of messages to start sourcing from.
- OptStartTime *time.Time `json:"opt_start_time,omitempty"`
-
- // FilterSubject is the subject filter used to only replicate messages
- // with matching subjects.
- FilterSubject string `json:"filter_subject,omitempty"`
-
- // SubjectTransforms is a list of subject transforms to apply to
- // matching messages.
- //
- // Subject transforms on sources and mirrors are also used as subject
- // filters with optional transformations.
- SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
-
- // External is a configuration referencing a stream source in another
- // account or JetStream domain.
- External *ExternalStream `json:"external,omitempty"`
-
- // Domain is used to configure a stream source in another JetStream
- // domain. This setting will set the External field with the appropriate
- // APIPrefix.
- Domain string `json:"-"`
- }
-
- // ExternalStream allows you to qualify access to a stream source in another
- // account.
- ExternalStream struct {
- // APIPrefix is the subject prefix that imports the other account/domain
- // $JS.API.CONSUMER.> subjects.
- APIPrefix string `json:"api"`
-
- // DeliverPrefix is the delivery subject to use for the push consumer.
- DeliverPrefix string `json:"deliver"`
- }
-
- // StreamConsumerLimits are the limits for a consumer on a stream. These can
- // be overridden on a per consumer basis.
- StreamConsumerLimits struct {
- // InactiveThreshold is a duration which instructs the server to clean
- // up the consumer if it has been inactive for the specified duration.
- InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
-
- // MaxAckPending is a maximum number of outstanding unacknowledged
- // messages for a consumer.
- MaxAckPending int `json:"max_ack_pending,omitempty"`
- }
-
- // DiscardPolicy determines how to proceed when limits of messages or bytes
- // are reached.
- DiscardPolicy int
-
- // RetentionPolicy determines how messages in a stream are retained.
- RetentionPolicy int
-
- // StorageType determines how messages are stored for retention.
- StorageType int
-
- // StoreCompression determines how messages are compressed.
- StoreCompression uint8
-)
-
-const (
- // LimitsPolicy (default) means that messages are retained until any given
- // limit is reached. This could be one of MaxMsgs, MaxBytes, or MaxAge.
- LimitsPolicy RetentionPolicy = iota
-
- // InterestPolicy specifies that when all known observables have
- // acknowledged a message it can be removed.
- InterestPolicy
-
- // WorkQueuePolicy specifies that when the first worker or subscriber
- // acknowledges the message it can be removed.
- WorkQueuePolicy
-)
-
-const (
- // DiscardOld will remove older messages to return to the limits. This is
- // the default.
- DiscardOld DiscardPolicy = iota
-
- // DiscardNew will fail to store new messages once the limits are reached.
- DiscardNew
-)
-
-const (
- limitsPolicyString = "limits"
- interestPolicyString = "interest"
- workQueuePolicyString = "workqueue"
-)
-
-func (rp RetentionPolicy) String() string {
- switch rp {
- case LimitsPolicy:
- return "Limits"
- case InterestPolicy:
- return "Interest"
- case WorkQueuePolicy:
- return "WorkQueue"
- default:
- return "Unknown Retention Policy"
- }
-}
-
-func (rp RetentionPolicy) MarshalJSON() ([]byte, error) {
- switch rp {
- case LimitsPolicy:
- return json.Marshal(limitsPolicyString)
- case InterestPolicy:
- return json.Marshal(interestPolicyString)
- case WorkQueuePolicy:
- return json.Marshal(workQueuePolicyString)
- default:
- return nil, fmt.Errorf("nats: can not marshal %v", rp)
- }
-}
-
-func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString(limitsPolicyString):
- *rp = LimitsPolicy
- case jsonString(interestPolicyString):
- *rp = InterestPolicy
- case jsonString(workQueuePolicyString):
- *rp = WorkQueuePolicy
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-func (dp DiscardPolicy) String() string {
- switch dp {
- case DiscardOld:
- return "DiscardOld"
- case DiscardNew:
- return "DiscardNew"
- default:
- return "Unknown Discard Policy"
- }
-}
-
-func (dp DiscardPolicy) MarshalJSON() ([]byte, error) {
- switch dp {
- case DiscardOld:
- return json.Marshal("old")
- case DiscardNew:
- return json.Marshal("new")
- default:
- return nil, fmt.Errorf("nats: can not marshal %v", dp)
- }
-}
-
-func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error {
- switch strings.ToLower(string(data)) {
- case jsonString("old"):
- *dp = DiscardOld
- case jsonString("new"):
- *dp = DiscardNew
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-const (
- // FileStorage specifies on disk storage. It's the default.
- FileStorage StorageType = iota
- // MemoryStorage specifies in memory only.
- MemoryStorage
-)
-
-const (
- memoryStorageString = "memory"
- fileStorageString = "file"
-)
-
-func (st StorageType) String() string {
- caser := cases.Title(language.AmericanEnglish)
- switch st {
- case MemoryStorage:
- return caser.String(memoryStorageString)
- case FileStorage:
- return caser.String(fileStorageString)
- default:
- return "Unknown Storage Type"
- }
-}
-
-func (st StorageType) MarshalJSON() ([]byte, error) {
- switch st {
- case MemoryStorage:
- return json.Marshal(memoryStorageString)
- case FileStorage:
- return json.Marshal(fileStorageString)
- default:
- return nil, fmt.Errorf("nats: can not marshal %v", st)
- }
-}
-
-func (st *StorageType) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString(memoryStorageString):
- *st = MemoryStorage
- case jsonString(fileStorageString):
- *st = FileStorage
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-func jsonString(s string) string {
- return "\"" + s + "\""
-}
-
-const (
- // NoCompression disables compression on the stream. This is the default.
- NoCompression StoreCompression = iota
-
- // S2Compression enables S2 compression on the stream.
- S2Compression
-)
-
-func (alg StoreCompression) String() string {
- switch alg {
- case NoCompression:
- return "None"
- case S2Compression:
- return "S2"
- default:
- return "Unknown StoreCompression"
- }
-}
-
-func (alg StoreCompression) MarshalJSON() ([]byte, error) {
- var str string
- switch alg {
- case S2Compression:
- str = "s2"
- case NoCompression:
- str = "none"
- default:
- return nil, fmt.Errorf("unknown compression algorithm")
- }
- return json.Marshal(str)
-}
-
-func (alg *StoreCompression) UnmarshalJSON(b []byte) error {
- var str string
- if err := json.Unmarshal(b, &str); err != nil {
- return err
- }
- switch str {
- case "s2":
- *alg = S2Compression
- case "none":
- *alg = NoCompression
- default:
- return fmt.Errorf("unknown compression algorithm")
- }
- return nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/js.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/js.go
deleted file mode 100644
index 5f8dfe3..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/js.go
+++ /dev/null
@@ -1,3880 +0,0 @@
-// Copyright 2020-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/json"
- "errors"
- "fmt"
- "math/rand"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/nats-io/nats.go/internal/parser"
- "github.com/nats-io/nuid"
-)
-
-// JetStream allows persistent messaging through JetStream.
-//
-// NOTE: JetStream is part of legacy API.
-// Users are encouraged to switch to the new JetStream API for enhanced capabilities and
-// simplified API. Please refer to the `jetstream` package.
-// See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md
-type JetStream interface {
- // Publish publishes a message to JetStream.
- Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error)
-
- // PublishMsg publishes a Msg to JetStream.
- PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error)
-
- // PublishAsync publishes a message to JetStream and returns a PubAckFuture.
- // The data should not be changed until the PubAckFuture has been processed.
- PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error)
-
- // PublishMsgAsync publishes a Msg to JetStream and returns a PubAckFuture.
- // The message should not be changed until the PubAckFuture has been processed.
- PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error)
-
- // PublishAsyncPending returns the number of async publishes outstanding for this context.
- PublishAsyncPending() int
-
- // PublishAsyncComplete returns a channel that will be closed when all outstanding messages are ack'd.
- PublishAsyncComplete() <-chan struct{}
-
- // Subscribe creates an async Subscription for JetStream.
- // The stream and consumer names can be provided with the nats.Bind() option.
- // For creating an ephemeral (where the consumer name is picked by the server),
- // you can provide the stream name with nats.BindStream().
- // If no stream name is specified, the library will attempt to figure out which
- // stream the subscription is for. See important notes below for more details.
- //
- // IMPORTANT NOTES:
- // * If none of the options Bind() nor Durable() are specified, the library will
- // send a request to the server to create an ephemeral JetStream consumer,
- // which will be deleted after an Unsubscribe() or Drain(), or automatically
- // by the server after a short period of time after the NATS subscription is
- // gone.
- // * If Durable() option is specified, the library will attempt to lookup a JetStream
- // consumer with this name, and if found, will bind to it and not attempt to
- // delete it. However, if not found, the library will send a request to
- // create such durable JetStream consumer. Note that the library will delete
- // the JetStream consumer after an Unsubscribe() or Drain() only if it
- // created the durable consumer while subscribing. If the durable consumer
- // already existed prior to subscribing it won't be deleted.
- // * If Bind() option is provided, the library will attempt to lookup the
- // consumer with the given name, and if successful, bind to it. If the lookup fails,
- // then the Subscribe() call will return an error.
- Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error)
-
- // SubscribeSync creates a Subscription that can be used to process messages synchronously.
- // See important note in Subscribe()
- SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error)
-
- // ChanSubscribe creates channel based Subscription.
- // See important note in Subscribe()
- ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error)
-
- // ChanQueueSubscribe creates channel based Subscription with a queue group.
- // See important note in QueueSubscribe()
- ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error)
-
- // QueueSubscribe creates a Subscription with a queue group.
- // If no optional durable name nor binding options are specified, the queue name will be used as a durable name.
- // See important note in Subscribe()
- QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error)
-
- // QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously.
- // See important note in QueueSubscribe()
- QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error)
-
- // PullSubscribe creates a Subscription that can fetch messages.
- // See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be
- // set to an empty string.
- PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error)
-}
-
-// JetStreamContext allows JetStream messaging and stream management.
-//
-// NOTE: JetStreamContext is part of legacy API.
-// Users are encouraged to switch to the new JetStream API for enhanced capabilities and
-// simplified API. Please refer to the `jetstream` package.
-// See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md
-type JetStreamContext interface {
- JetStream
- JetStreamManager
- KeyValueManager
- ObjectStoreManager
-}
-
-// Request API subjects for JetStream.
-const (
- // defaultAPIPrefix is the default prefix for the JetStream API.
- defaultAPIPrefix = "$JS.API."
-
- // jsDomainT is used to create JetStream API prefix by specifying only Domain
- jsDomainT = "$JS.%s.API."
-
- // jsExtDomainT is used to create a StreamSource External APIPrefix
- jsExtDomainT = "$JS.%s.API"
-
- // apiAccountInfo is for obtaining general information about JetStream.
- apiAccountInfo = "INFO"
-
- // apiConsumerCreateT is used to create consumers.
- // it accepts stream name and consumer name.
- apiConsumerCreateT = "CONSUMER.CREATE.%s.%s"
-
- // apiConsumerCreateT is used to create consumers.
- // it accepts stream name, consumer name and filter subject
- apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s"
-
- // apiLegacyConsumerCreateT is used to create consumers.
- // this is a legacy endpoint to support creating ephemerals before nats-server v2.9.0.
- apiLegacyConsumerCreateT = "CONSUMER.CREATE.%s"
-
- // apiDurableCreateT is used to create durable consumers.
- // this is a legacy endpoint to support creating durable consumers before nats-server v2.9.0.
- apiDurableCreateT = "CONSUMER.DURABLE.CREATE.%s.%s"
-
- // apiConsumerInfoT is used to create consumers.
- apiConsumerInfoT = "CONSUMER.INFO.%s.%s"
-
- // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode.
- apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s"
-
- // apiConsumerDeleteT is used to delete consumers.
- apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s"
-
- // apiConsumerListT is used to return all detailed consumer information
- apiConsumerListT = "CONSUMER.LIST.%s"
-
- // apiConsumerNamesT is used to return a list with all consumer names for the stream.
- apiConsumerNamesT = "CONSUMER.NAMES.%s"
-
- // apiStreams can lookup a stream by subject.
- apiStreams = "STREAM.NAMES"
-
- // apiStreamCreateT is the endpoint to create new streams.
- apiStreamCreateT = "STREAM.CREATE.%s"
-
- // apiStreamInfoT is the endpoint to get information on a stream.
- apiStreamInfoT = "STREAM.INFO.%s"
-
- // apiStreamUpdateT is the endpoint to update existing streams.
- apiStreamUpdateT = "STREAM.UPDATE.%s"
-
- // apiStreamDeleteT is the endpoint to delete streams.
- apiStreamDeleteT = "STREAM.DELETE.%s"
-
- // apiStreamPurgeT is the endpoint to purge streams.
- apiStreamPurgeT = "STREAM.PURGE.%s"
-
- // apiStreamListT is the endpoint that will return all detailed stream information
- apiStreamListT = "STREAM.LIST"
-
- // apiMsgGetT is the endpoint to get a message.
- apiMsgGetT = "STREAM.MSG.GET.%s"
-
- // apiMsgGetT is the endpoint to perform a direct get of a message.
- apiDirectMsgGetT = "DIRECT.GET.%s"
-
- // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject.
- apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s"
-
- // apiMsgDeleteT is the endpoint to remove a message.
- apiMsgDeleteT = "STREAM.MSG.DELETE.%s"
-
- // orderedHeartbeatsInterval is how fast we want HBs from the server during idle.
- orderedHeartbeatsInterval = 5 * time.Second
-
- // Scale for threshold of missed HBs or lack of activity.
- hbcThresh = 2
-
- // For ChanSubscription, we can't update sub.delivered as we do for other
- // type of subscriptions, since the channel is user provided.
- // With flow control in play, we will check for flow control on incoming
- // messages (as opposed to when they are delivered), but also from a go
- // routine. Without this, the subscription would possibly stall until
- // a new message or heartbeat/fc are received.
- chanSubFCCheckInterval = 250 * time.Millisecond
-
- // Default time wait between retries on Publish iff err is NoResponders.
- DefaultPubRetryWait = 250 * time.Millisecond
-
- // Default number of retries
- DefaultPubRetryAttempts = 2
-
- // defaultAsyncPubAckInflight is the number of async pub acks inflight.
- defaultAsyncPubAckInflight = 4000
-)
-
-// Types of control messages, so far heartbeat and flow control
-const (
- jsCtrlHB = 1
- jsCtrlFC = 2
-)
-
-// js is an internal struct from a JetStreamContext.
-type js struct {
- nc *Conn
- opts *jsOpts
-
- // For async publish context.
- mu sync.RWMutex
- rpre string
- rsub *Subscription
- pafs map[string]*pubAckFuture
- stc chan struct{}
- dch chan struct{}
- rr *rand.Rand
- connStatusCh chan (Status)
- replyPrefix string
- replyPrefixLen int
-}
-
-type jsOpts struct {
- ctx context.Context
- // For importing JetStream from other accounts.
- pre string
- // Amount of time to wait for API requests.
- wait time.Duration
- // For async publish error handling.
- aecb MsgErrHandler
- // Max async pub ack in flight
- maxpa int
- // the domain that produced the pre
- domain string
- // enables protocol tracing
- ctrace ClientTrace
- shouldTrace bool
- // purgeOpts contains optional stream purge options
- purgeOpts *StreamPurgeRequest
- // streamInfoOpts contains optional stream info options
- streamInfoOpts *StreamInfoRequest
- // streamListSubject is used for subject filtering when listing streams / stream names
- streamListSubject string
- // For direct get message requests
- directGet bool
- // For direct get next message
- directNextFor string
-
- // featureFlags are used to enable/disable specific JetStream features
- featureFlags featureFlags
-}
-
-const (
- defaultRequestWait = 5 * time.Second
- defaultAccountCheck = 20 * time.Second
-)
-
-// JetStream returns a JetStreamContext for messaging and stream management.
-// Errors are only returned if inconsistent options are provided.
-//
-// NOTE: JetStreamContext is part of legacy API.
-// Users are encouraged to switch to the new JetStream API for enhanced capabilities and
-// simplified API. Please refer to the `jetstream` package.
-// See: https://github.com/nats-io/nats.go/blob/main/jetstream/README.md
-func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) {
- js := &js{
- nc: nc,
- opts: &jsOpts{
- pre: defaultAPIPrefix,
- wait: defaultRequestWait,
- maxpa: defaultAsyncPubAckInflight,
- },
- }
- inboxPrefix := InboxPrefix
- if js.nc.Opts.InboxPrefix != _EMPTY_ {
- inboxPrefix = js.nc.Opts.InboxPrefix + "."
- }
- js.replyPrefix = inboxPrefix
- js.replyPrefixLen = len(js.replyPrefix) + aReplyTokensize + 1
-
- for _, opt := range opts {
- if err := opt.configureJSContext(js.opts); err != nil {
- return nil, err
- }
- }
- return js, nil
-}
-
-// JSOpt configures a JetStreamContext.
-type JSOpt interface {
- configureJSContext(opts *jsOpts) error
-}
-
-// jsOptFn configures an option for the JetStreamContext.
-type jsOptFn func(opts *jsOpts) error
-
-func (opt jsOptFn) configureJSContext(opts *jsOpts) error {
- return opt(opts)
-}
-
-type featureFlags struct {
- useDurableConsumerCreate bool
-}
-
-// UseLegacyDurableConsumers makes JetStream use the legacy (pre nats-server v2.9.0) subjects for consumer creation.
-// If this option is used when creating JetStremContext, $JS.API.CONSUMER.DURABLE.CREATE.. will be used
-// to create a consumer with Durable provided, rather than $JS.API.CONSUMER.CREATE...
-func UseLegacyDurableConsumers() JSOpt {
- return jsOptFn(func(opts *jsOpts) error {
- opts.featureFlags.useDurableConsumerCreate = true
- return nil
- })
-}
-
-// ClientTrace can be used to trace API interactions for the JetStream Context.
-type ClientTrace struct {
- RequestSent func(subj string, payload []byte)
- ResponseReceived func(subj string, payload []byte, hdr Header)
-}
-
-func (ct ClientTrace) configureJSContext(js *jsOpts) error {
- js.ctrace = ct
- js.shouldTrace = true
- return nil
-}
-
-// Domain changes the domain part of JetStream API prefix.
-func Domain(domain string) JSOpt {
- if domain == _EMPTY_ {
- return APIPrefix(_EMPTY_)
- }
-
- return jsOptFn(func(js *jsOpts) error {
- js.domain = domain
- js.pre = fmt.Sprintf(jsDomainT, domain)
-
- return nil
- })
-
-}
-
-func (s *StreamPurgeRequest) configureJSContext(js *jsOpts) error {
- js.purgeOpts = s
- return nil
-}
-
-func (s *StreamInfoRequest) configureJSContext(js *jsOpts) error {
- js.streamInfoOpts = s
- return nil
-}
-
-// APIPrefix changes the default prefix used for the JetStream API.
-func APIPrefix(pre string) JSOpt {
- return jsOptFn(func(js *jsOpts) error {
- if pre == _EMPTY_ {
- return nil
- }
-
- js.pre = pre
- if !strings.HasSuffix(js.pre, ".") {
- js.pre = js.pre + "."
- }
-
- return nil
- })
-}
-
-// DirectGet is an option that can be used to make GetMsg() or GetLastMsg()
-// retrieve message directly from a group of servers (leader and replicas)
-// if the stream was created with the AllowDirect option.
-func DirectGet() JSOpt {
- return jsOptFn(func(js *jsOpts) error {
- js.directGet = true
- return nil
- })
-}
-
-// DirectGetNext is an option that can be used to make GetMsg() retrieve message
-// directly from a group of servers (leader and replicas) if the stream was
-// created with the AllowDirect option.
-// The server will find the next message matching the filter `subject` starting
-// at the start sequence (argument in GetMsg()). The filter `subject` can be a
-// wildcard.
-func DirectGetNext(subject string) JSOpt {
- return jsOptFn(func(js *jsOpts) error {
- js.directGet = true
- js.directNextFor = subject
- return nil
- })
-}
-
-// StreamListFilter is an option that can be used to configure `StreamsInfo()` and `StreamNames()` requests.
-// It allows filtering the returned streams by subject associated with each stream.
-// Wildcards can be used. For example, `StreamListFilter(FOO.*.A) will return
-// all streams which have at least one subject matching the provided pattern (e.g. FOO.TEST.A).
-func StreamListFilter(subject string) JSOpt {
- return jsOptFn(func(opts *jsOpts) error {
- opts.streamListSubject = subject
- return nil
- })
-}
-
-func (js *js) apiSubj(subj string) string {
- if js.opts.pre == _EMPTY_ {
- return subj
- }
- var b strings.Builder
- b.WriteString(js.opts.pre)
- b.WriteString(subj)
- return b.String()
-}
-
-// PubOpt configures options for publishing JetStream messages.
-type PubOpt interface {
- configurePublish(opts *pubOpts) error
-}
-
-// pubOptFn is a function option used to configure JetStream Publish.
-type pubOptFn func(opts *pubOpts) error
-
-func (opt pubOptFn) configurePublish(opts *pubOpts) error {
- return opt(opts)
-}
-
-type pubOpts struct {
- ctx context.Context
- ttl time.Duration
- id string
- lid string // Expected last msgId
- str string // Expected stream name
- seq *uint64 // Expected last sequence
- lss *uint64 // Expected last sequence per subject
-
- // Publish retries for NoResponders err.
- rwait time.Duration // Retry wait between attempts
- rnum int // Retry attempts
-
- // stallWait is the max wait of a async pub ack.
- stallWait time.Duration
-}
-
-// pubAckResponse is the ack response from the JetStream API when publishing a message.
-type pubAckResponse struct {
- apiResponse
- *PubAck
-}
-
-// PubAck is an ack received after successfully publishing a message.
-type PubAck struct {
- Stream string `json:"stream"`
- Sequence uint64 `json:"seq"`
- Duplicate bool `json:"duplicate,omitempty"`
- Domain string `json:"domain,omitempty"`
-}
-
-// Headers for published messages.
-const (
- MsgIdHdr = "Nats-Msg-Id"
- ExpectedStreamHdr = "Nats-Expected-Stream"
- ExpectedLastSeqHdr = "Nats-Expected-Last-Sequence"
- ExpectedLastSubjSeqHdr = "Nats-Expected-Last-Subject-Sequence"
- ExpectedLastMsgIdHdr = "Nats-Expected-Last-Msg-Id"
- MsgRollup = "Nats-Rollup"
-)
-
-// Headers for republished messages and direct gets.
-const (
- JSStream = "Nats-Stream"
- JSSequence = "Nats-Sequence"
- JSTimeStamp = "Nats-Time-Stamp"
- JSSubject = "Nats-Subject"
- JSLastSequence = "Nats-Last-Sequence"
-)
-
-// MsgSize is a header that will be part of a consumer's delivered message if HeadersOnly requested.
-const MsgSize = "Nats-Msg-Size"
-
-// Rollups, can be subject only or all messages.
-const (
- MsgRollupSubject = "sub"
- MsgRollupAll = "all"
-)
-
-// PublishMsg publishes a Msg to a stream from JetStream.
-func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) {
- var o = pubOpts{rwait: DefaultPubRetryWait, rnum: DefaultPubRetryAttempts}
- if len(opts) > 0 {
- if m.Header == nil {
- m.Header = Header{}
- }
- for _, opt := range opts {
- if err := opt.configurePublish(&o); err != nil {
- return nil, err
- }
- }
- }
- // Check for option collisions. Right now just timeout and context.
- if o.ctx != nil && o.ttl != 0 {
- return nil, ErrContextAndTimeout
- }
- if o.ttl == 0 && o.ctx == nil {
- o.ttl = js.opts.wait
- }
- if o.stallWait > 0 {
- return nil, fmt.Errorf("nats: stall wait cannot be set to sync publish")
- }
-
- if o.id != _EMPTY_ {
- m.Header.Set(MsgIdHdr, o.id)
- }
- if o.lid != _EMPTY_ {
- m.Header.Set(ExpectedLastMsgIdHdr, o.lid)
- }
- if o.str != _EMPTY_ {
- m.Header.Set(ExpectedStreamHdr, o.str)
- }
- if o.seq != nil {
- m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10))
- }
- if o.lss != nil {
- m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10))
- }
-
- var resp *Msg
- var err error
-
- if o.ttl > 0 {
- resp, err = js.nc.RequestMsg(m, time.Duration(o.ttl))
- } else {
- resp, err = js.nc.RequestMsgWithContext(o.ctx, m)
- }
-
- if err != nil {
- for r, ttl := 0, o.ttl; errors.Is(err, ErrNoResponders) && (r < o.rnum || o.rnum < 0); r++ {
- // To protect against small blips in leadership changes etc, if we get a no responders here retry.
- if o.ctx != nil {
- select {
- case <-o.ctx.Done():
- case <-time.After(o.rwait):
- }
- } else {
- time.Sleep(o.rwait)
- }
- if o.ttl > 0 {
- ttl -= o.rwait
- if ttl <= 0 {
- err = ErrTimeout
- break
- }
- resp, err = js.nc.RequestMsg(m, time.Duration(ttl))
- } else {
- resp, err = js.nc.RequestMsgWithContext(o.ctx, m)
- }
- }
- if err != nil {
- if errors.Is(err, ErrNoResponders) {
- err = ErrNoStreamResponse
- }
- return nil, err
- }
- }
-
- var pa pubAckResponse
- if err := json.Unmarshal(resp.Data, &pa); err != nil {
- return nil, ErrInvalidJSAck
- }
- if pa.Error != nil {
- return nil, pa.Error
- }
- if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ {
- return nil, ErrInvalidJSAck
- }
- return pa.PubAck, nil
-}
-
-// Publish publishes a message to a stream from JetStream.
-func (js *js) Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) {
- return js.PublishMsg(&Msg{Subject: subj, Data: data}, opts...)
-}
-
-// PubAckFuture is a future for a PubAck.
-type PubAckFuture interface {
- // Ok returns a receive only channel that can be used to get a PubAck.
- Ok() <-chan *PubAck
-
- // Err returns a receive only channel that can be used to get the error from an async publish.
- Err() <-chan error
-
- // Msg returns the message that was sent to the server.
- Msg() *Msg
-}
-
-type pubAckFuture struct {
- js *js
- msg *Msg
- pa *PubAck
- st time.Time
- err error
- errCh chan error
- doneCh chan *PubAck
-}
-
-func (paf *pubAckFuture) Ok() <-chan *PubAck {
- paf.js.mu.Lock()
- defer paf.js.mu.Unlock()
-
- if paf.doneCh == nil {
- paf.doneCh = make(chan *PubAck, 1)
- if paf.pa != nil {
- paf.doneCh <- paf.pa
- }
- }
-
- return paf.doneCh
-}
-
-func (paf *pubAckFuture) Err() <-chan error {
- paf.js.mu.Lock()
- defer paf.js.mu.Unlock()
-
- if paf.errCh == nil {
- paf.errCh = make(chan error, 1)
- if paf.err != nil {
- paf.errCh <- paf.err
- }
- }
-
- return paf.errCh
-}
-
-func (paf *pubAckFuture) Msg() *Msg {
- paf.js.mu.RLock()
- defer paf.js.mu.RUnlock()
- return paf.msg
-}
-
-// For quick token lookup etc.
-const aReplyTokensize = 6
-
-func (js *js) newAsyncReply() string {
- js.mu.Lock()
- if js.rsub == nil {
- // Create our wildcard reply subject.
- sha := sha256.New()
- sha.Write([]byte(nuid.Next()))
- b := sha.Sum(nil)
- for i := 0; i < aReplyTokensize; i++ {
- b[i] = rdigits[int(b[i]%base)]
- }
- js.rpre = fmt.Sprintf("%s%s.", js.replyPrefix, b[:aReplyTokensize])
- sub, err := js.nc.Subscribe(fmt.Sprintf("%s*", js.rpre), js.handleAsyncReply)
- if err != nil {
- js.mu.Unlock()
- return _EMPTY_
- }
- js.rsub = sub
- js.rr = rand.New(rand.NewSource(time.Now().UnixNano()))
- }
- if js.connStatusCh == nil {
- js.connStatusCh = js.nc.StatusChanged(RECONNECTING, CLOSED)
- go js.resetPendingAcksOnReconnect()
- }
- var sb strings.Builder
- sb.WriteString(js.rpre)
- rn := js.rr.Int63()
- var b [aReplyTokensize]byte
- for i, l := 0, rn; i < len(b); i++ {
- b[i] = rdigits[l%base]
- l /= base
- }
- sb.Write(b[:])
- js.mu.Unlock()
- return sb.String()
-}
-
-func (js *js) resetPendingAcksOnReconnect() {
- js.mu.Lock()
- connStatusCh := js.connStatusCh
- js.mu.Unlock()
- for {
- newStatus, ok := <-connStatusCh
- if !ok || newStatus == CLOSED {
- return
- }
- js.mu.Lock()
- errCb := js.opts.aecb
- for id, paf := range js.pafs {
- paf.err = ErrDisconnected
- if paf.errCh != nil {
- paf.errCh <- paf.err
- }
- if errCb != nil {
- // clear reply subject so that new one is created on republish
- js.mu.Unlock()
- errCb(js, paf.msg, ErrDisconnected)
- js.mu.Lock()
- }
- delete(js.pafs, id)
- }
- if js.dch != nil {
- close(js.dch)
- js.dch = nil
- }
- js.mu.Unlock()
- }
-}
-
-func (js *js) cleanupReplySub() {
- js.mu.Lock()
- if js.rsub != nil {
- js.rsub.Unsubscribe()
- js.rsub = nil
- }
- if js.connStatusCh != nil {
- close(js.connStatusCh)
- js.connStatusCh = nil
- }
- js.mu.Unlock()
-}
-
-// registerPAF will register for a PubAckFuture.
-func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) {
- js.mu.Lock()
- if js.pafs == nil {
- js.pafs = make(map[string]*pubAckFuture)
- }
- paf.js = js
- js.pafs[id] = paf
- np := len(js.pafs)
- maxpa := js.opts.maxpa
- js.mu.Unlock()
- return np, maxpa
-}
-
-// Lock should be held.
-func (js *js) getPAF(id string) *pubAckFuture {
- if js.pafs == nil {
- return nil
- }
- return js.pafs[id]
-}
-
-// clearPAF will remove a PubAckFuture that was registered.
-func (js *js) clearPAF(id string) {
- js.mu.Lock()
- delete(js.pafs, id)
- js.mu.Unlock()
-}
-
-// PublishAsyncPending returns how many PubAckFutures are pending.
-func (js *js) PublishAsyncPending() int {
- js.mu.RLock()
- defer js.mu.RUnlock()
- return len(js.pafs)
-}
-
-func (js *js) asyncStall() <-chan struct{} {
- js.mu.Lock()
- if js.stc == nil {
- js.stc = make(chan struct{})
- }
- stc := js.stc
- js.mu.Unlock()
- return stc
-}
-
-// Handle an async reply from PublishAsync.
-func (js *js) handleAsyncReply(m *Msg) {
- if len(m.Subject) <= js.replyPrefixLen {
- return
- }
- id := m.Subject[js.replyPrefixLen:]
-
- js.mu.Lock()
- paf := js.getPAF(id)
- if paf == nil {
- js.mu.Unlock()
- return
- }
- // Remove
- delete(js.pafs, id)
-
- // Check on anyone stalled and waiting.
- if js.stc != nil && len(js.pafs) < js.opts.maxpa {
- close(js.stc)
- js.stc = nil
- }
- // Check on anyone one waiting on done status.
- if js.dch != nil && len(js.pafs) == 0 {
- dch := js.dch
- js.dch = nil
- // Defer here so error is processed and can be checked.
- defer close(dch)
- }
-
- doErr := func(err error) {
- paf.err = err
- if paf.errCh != nil {
- paf.errCh <- paf.err
- }
- cb := js.opts.aecb
- js.mu.Unlock()
- if cb != nil {
- cb(paf.js, paf.msg, err)
- }
- }
-
- // Process no responders etc.
- if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
- doErr(ErrNoResponders)
- return
- }
-
- var pa pubAckResponse
- if err := json.Unmarshal(m.Data, &pa); err != nil {
- doErr(ErrInvalidJSAck)
- return
- }
- if pa.Error != nil {
- doErr(pa.Error)
- return
- }
- if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ {
- doErr(ErrInvalidJSAck)
- return
- }
-
- // So here we have received a proper puback.
- paf.pa = pa.PubAck
- if paf.doneCh != nil {
- paf.doneCh <- paf.pa
- }
- js.mu.Unlock()
-}
-
-// MsgErrHandler is used to process asynchronous errors from
-// JetStream PublishAsync. It will return the original
-// message sent to the server for possible retransmitting and the error encountered.
-type MsgErrHandler func(JetStream, *Msg, error)
-
-// PublishAsyncErrHandler sets the error handler for async publishes in JetStream.
-func PublishAsyncErrHandler(cb MsgErrHandler) JSOpt {
- return jsOptFn(func(js *jsOpts) error {
- js.aecb = cb
- return nil
- })
-}
-
-// PublishAsyncMaxPending sets the maximum outstanding async publishes that can be inflight at one time.
-func PublishAsyncMaxPending(max int) JSOpt {
- return jsOptFn(func(js *jsOpts) error {
- if max < 1 {
- return errors.New("nats: max ack pending should be >= 1")
- }
- js.maxpa = max
- return nil
- })
-}
-
-// PublishAsync publishes a message to JetStream and returns a PubAckFuture
-func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) {
- return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...)
-}
-
-const defaultStallWait = 200 * time.Millisecond
-
-func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) {
- var o pubOpts
- if len(opts) > 0 {
- if m.Header == nil {
- m.Header = Header{}
- }
- for _, opt := range opts {
- if err := opt.configurePublish(&o); err != nil {
- return nil, err
- }
- }
- }
-
- // Timeouts and contexts do not make sense for these.
- if o.ttl != 0 || o.ctx != nil {
- return nil, ErrContextAndTimeout
- }
- stallWait := defaultStallWait
- if o.stallWait > 0 {
- stallWait = o.stallWait
- }
-
- // FIXME(dlc) - Make common.
- if o.id != _EMPTY_ {
- m.Header.Set(MsgIdHdr, o.id)
- }
- if o.lid != _EMPTY_ {
- m.Header.Set(ExpectedLastMsgIdHdr, o.lid)
- }
- if o.str != _EMPTY_ {
- m.Header.Set(ExpectedStreamHdr, o.str)
- }
- if o.seq != nil {
- m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10))
- }
- if o.lss != nil {
- m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10))
- }
-
- // Reply
- if m.Reply != _EMPTY_ {
- return nil, errors.New("nats: reply subject should be empty")
- }
- reply := m.Reply
- m.Reply = js.newAsyncReply()
- defer func() { m.Reply = reply }()
-
- if m.Reply == _EMPTY_ {
- return nil, errors.New("nats: error creating async reply handler")
- }
-
- id := m.Reply[js.replyPrefixLen:]
- paf := &pubAckFuture{msg: m, st: time.Now()}
- numPending, maxPending := js.registerPAF(id, paf)
-
- if maxPending > 0 && numPending >= maxPending {
- select {
- case <-js.asyncStall():
- case <-time.After(stallWait):
- js.clearPAF(id)
- return nil, errors.New("nats: stalled with too many outstanding async published messages")
- }
- }
- if err := js.nc.PublishMsg(m); err != nil {
- js.clearPAF(id)
- return nil, err
- }
-
- return paf, nil
-}
-
-// PublishAsyncComplete returns a channel that will be closed when all outstanding messages have been ack'd.
-func (js *js) PublishAsyncComplete() <-chan struct{} {
- js.mu.Lock()
- defer js.mu.Unlock()
- if js.dch == nil {
- js.dch = make(chan struct{})
- }
- dch := js.dch
- if len(js.pafs) == 0 {
- close(js.dch)
- js.dch = nil
- }
- return dch
-}
-
-// MsgId sets the message ID used for deduplication.
-func MsgId(id string) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- opts.id = id
- return nil
- })
-}
-
-// ExpectStream sets the expected stream to respond from the publish.
-func ExpectStream(stream string) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- opts.str = stream
- return nil
- })
-}
-
-// ExpectLastSequence sets the expected sequence in the response from the publish.
-func ExpectLastSequence(seq uint64) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- opts.seq = &seq
- return nil
- })
-}
-
-// ExpectLastSequencePerSubject sets the expected sequence per subject in the response from the publish.
-func ExpectLastSequencePerSubject(seq uint64) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- opts.lss = &seq
- return nil
- })
-}
-
-// ExpectLastMsgId sets the expected last msgId in the response from the publish.
-func ExpectLastMsgId(id string) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- opts.lid = id
- return nil
- })
-}
-
-// RetryWait sets the retry wait time when ErrNoResponders is encountered.
-func RetryWait(dur time.Duration) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- opts.rwait = dur
- return nil
- })
-}
-
-// RetryAttempts sets the retry number of attempts when ErrNoResponders is encountered.
-func RetryAttempts(num int) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- opts.rnum = num
- return nil
- })
-}
-
-// StallWait sets the max wait when the producer becomes stall producing messages.
-func StallWait(ttl time.Duration) PubOpt {
- return pubOptFn(func(opts *pubOpts) error {
- if ttl <= 0 {
- return fmt.Errorf("nats: stall wait should be more than 0")
- }
- opts.stallWait = ttl
- return nil
- })
-}
-
-type ackOpts struct {
- ttl time.Duration
- ctx context.Context
- nakDelay time.Duration
-}
-
-// AckOpt are the options that can be passed when acknowledge a message.
-type AckOpt interface {
- configureAck(opts *ackOpts) error
-}
-
-// MaxWait sets the maximum amount of time we will wait for a response.
-type MaxWait time.Duration
-
-func (ttl MaxWait) configureJSContext(js *jsOpts) error {
- js.wait = time.Duration(ttl)
- return nil
-}
-
-func (ttl MaxWait) configurePull(opts *pullOpts) error {
- opts.ttl = time.Duration(ttl)
- return nil
-}
-
-// AckWait sets the maximum amount of time we will wait for an ack.
-type AckWait time.Duration
-
-func (ttl AckWait) configurePublish(opts *pubOpts) error {
- opts.ttl = time.Duration(ttl)
- return nil
-}
-
-func (ttl AckWait) configureSubscribe(opts *subOpts) error {
- opts.cfg.AckWait = time.Duration(ttl)
- return nil
-}
-
-func (ttl AckWait) configureAck(opts *ackOpts) error {
- opts.ttl = time.Duration(ttl)
- return nil
-}
-
-// ContextOpt is an option used to set a context.Context.
-type ContextOpt struct {
- context.Context
-}
-
-func (ctx ContextOpt) configureJSContext(opts *jsOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-func (ctx ContextOpt) configurePublish(opts *pubOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-func (ctx ContextOpt) configureSubscribe(opts *subOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-func (ctx ContextOpt) configurePull(opts *pullOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-func (ctx ContextOpt) configureAck(opts *ackOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-// Context returns an option that can be used to configure a context for APIs
-// that are context aware such as those part of the JetStream interface.
-func Context(ctx context.Context) ContextOpt {
- return ContextOpt{ctx}
-}
-
-type nakDelay time.Duration
-
-func (d nakDelay) configureAck(opts *ackOpts) error {
- opts.nakDelay = time.Duration(d)
- return nil
-}
-
-// Subscribe
-
-// ConsumerConfig is the configuration of a JetStream consumer.
-type ConsumerConfig struct {
- Durable string `json:"durable_name,omitempty"`
- Name string `json:"name,omitempty"`
- Description string `json:"description,omitempty"`
- DeliverPolicy DeliverPolicy `json:"deliver_policy"`
- OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
- OptStartTime *time.Time `json:"opt_start_time,omitempty"`
- AckPolicy AckPolicy `json:"ack_policy"`
- AckWait time.Duration `json:"ack_wait,omitempty"`
- MaxDeliver int `json:"max_deliver,omitempty"`
- BackOff []time.Duration `json:"backoff,omitempty"`
- FilterSubject string `json:"filter_subject,omitempty"`
- FilterSubjects []string `json:"filter_subjects,omitempty"`
- ReplayPolicy ReplayPolicy `json:"replay_policy"`
- RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec
- SampleFrequency string `json:"sample_freq,omitempty"`
- MaxWaiting int `json:"max_waiting,omitempty"`
- MaxAckPending int `json:"max_ack_pending,omitempty"`
- FlowControl bool `json:"flow_control,omitempty"`
- Heartbeat time.Duration `json:"idle_heartbeat,omitempty"`
- HeadersOnly bool `json:"headers_only,omitempty"`
-
- // Pull based options.
- MaxRequestBatch int `json:"max_batch,omitempty"`
- MaxRequestExpires time.Duration `json:"max_expires,omitempty"`
- MaxRequestMaxBytes int `json:"max_bytes,omitempty"`
-
- // Push based consumers.
- DeliverSubject string `json:"deliver_subject,omitempty"`
- DeliverGroup string `json:"deliver_group,omitempty"`
-
- // Inactivity threshold.
- InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
-
- // Generally inherited by parent stream and other markers, now can be configured directly.
- Replicas int `json:"num_replicas"`
- // Force memory storage.
- MemoryStorage bool `json:"mem_storage,omitempty"`
-
- // Metadata is additional metadata for the Consumer.
- // Keys starting with `_nats` are reserved.
- // NOTE: Metadata requires nats-server v2.10.0+
- Metadata map[string]string `json:"metadata,omitempty"`
-}
-
-// ConsumerInfo is the info from a JetStream consumer.
-type ConsumerInfo struct {
- Stream string `json:"stream_name"`
- Name string `json:"name"`
- Created time.Time `json:"created"`
- Config ConsumerConfig `json:"config"`
- Delivered SequenceInfo `json:"delivered"`
- AckFloor SequenceInfo `json:"ack_floor"`
- NumAckPending int `json:"num_ack_pending"`
- NumRedelivered int `json:"num_redelivered"`
- NumWaiting int `json:"num_waiting"`
- NumPending uint64 `json:"num_pending"`
- Cluster *ClusterInfo `json:"cluster,omitempty"`
- PushBound bool `json:"push_bound,omitempty"`
-}
-
-// SequenceInfo has both the consumer and the stream sequence and last activity.
-type SequenceInfo struct {
- Consumer uint64 `json:"consumer_seq"`
- Stream uint64 `json:"stream_seq"`
- Last *time.Time `json:"last_active,omitempty"`
-}
-
-// SequencePair includes the consumer and stream sequence info from a JetStream consumer.
-type SequencePair struct {
- Consumer uint64 `json:"consumer_seq"`
- Stream uint64 `json:"stream_seq"`
-}
-
-// nextRequest is for getting next messages for pull based consumers from JetStream.
-type nextRequest struct {
- Expires time.Duration `json:"expires,omitempty"`
- Batch int `json:"batch,omitempty"`
- NoWait bool `json:"no_wait,omitempty"`
- MaxBytes int `json:"max_bytes,omitempty"`
- Heartbeat time.Duration `json:"idle_heartbeat,omitempty"`
-}
-
-// jsSub includes JetStream subscription info.
-type jsSub struct {
- js *js
-
- // For pull subscribers, this is the next message subject to send requests to.
- nms string
-
- psubj string // the subject that was passed by user to the subscribe calls
- consumer string
- stream string
- deliver string
- pull bool
- dc bool // Delete JS consumer
- ackNone bool
-
- // This is ConsumerInfo's Pending+Consumer.Delivered that we get from the
- // add consumer response. Note that some versions of the server gather the
- // consumer info *after* the creation of the consumer, which means that
- // some messages may have been already delivered. So the sum of the two
- // is a more accurate representation of the number of messages pending or
- // in the process of being delivered to the subscription when created.
- pending uint64
-
- // Ordered consumers
- ordered bool
- dseq uint64
- sseq uint64
- ccreq *createConsumerRequest
-
- // Heartbeats and Flow Control handling from push consumers.
- hbc *time.Timer
- hbi time.Duration
- active bool
- cmeta string
- fcr string
- fcd uint64
- fciseq uint64
- csfct *time.Timer
-
- // Cancellation function to cancel context on drain/unsubscribe.
- cancel func()
-}
-
-// Deletes the JS Consumer.
-// No connection nor subscription lock must be held on entry.
-func (sub *Subscription) deleteConsumer() error {
- sub.mu.Lock()
- jsi := sub.jsi
- if jsi == nil {
- sub.mu.Unlock()
- return nil
- }
- if jsi.stream == _EMPTY_ || jsi.consumer == _EMPTY_ {
- sub.mu.Unlock()
- return nil
- }
- stream, consumer := jsi.stream, jsi.consumer
- js := jsi.js
- sub.mu.Unlock()
-
- return js.DeleteConsumer(stream, consumer)
-}
-
-// SubOpt configures options for subscribing to JetStream consumers.
-type SubOpt interface {
- configureSubscribe(opts *subOpts) error
-}
-
-// subOptFn is a function option used to configure a JetStream Subscribe.
-type subOptFn func(opts *subOpts) error
-
-func (opt subOptFn) configureSubscribe(opts *subOpts) error {
- return opt(opts)
-}
-
-// Subscribe creates an async Subscription for JetStream.
-// The stream and consumer names can be provided with the nats.Bind() option.
-// For creating an ephemeral (where the consumer name is picked by the server),
-// you can provide the stream name with nats.BindStream().
-// If no stream name is specified, the library will attempt to figure out which
-// stream the subscription is for. See important notes below for more details.
-//
-// IMPORTANT NOTES:
-// * If none of the options Bind() nor Durable() are specified, the library will
-// send a request to the server to create an ephemeral JetStream consumer,
-// which will be deleted after an Unsubscribe() or Drain(), or automatically
-// by the server after a short period of time after the NATS subscription is
-// gone.
-// * If Durable() option is specified, the library will attempt to lookup a JetStream
-// consumer with this name, and if found, will bind to it and not attempt to
-// delete it. However, if not found, the library will send a request to create
-// such durable JetStream consumer. The library will delete the JetStream consumer
-// after an Unsubscribe() or Drain().
-// * If Bind() option is provided, the library will attempt to lookup the
-// consumer with the given name, and if successful, bind to it. If the lookup fails,
-// then the Subscribe() call will return an error.
-func (js *js) Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) {
- if cb == nil {
- return nil, ErrBadSubscription
- }
- return js.subscribe(subj, _EMPTY_, cb, nil, false, false, opts)
-}
-
-// SubscribeSync creates a Subscription that can be used to process messages synchronously.
-// See important note in Subscribe()
-func (js *js) SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) {
- mch := make(chan *Msg, js.nc.Opts.SubChanLen)
- return js.subscribe(subj, _EMPTY_, nil, mch, true, false, opts)
-}
-
-// QueueSubscribe creates a Subscription with a queue group.
-// If no optional durable name nor binding options are specified, the queue name will be used as a durable name.
-// See important note in Subscribe()
-func (js *js) QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) {
- if cb == nil {
- return nil, ErrBadSubscription
- }
- return js.subscribe(subj, queue, cb, nil, false, false, opts)
-}
-
-// QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously.
-// See important note in QueueSubscribe()
-func (js *js) QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) {
- mch := make(chan *Msg, js.nc.Opts.SubChanLen)
- return js.subscribe(subj, queue, nil, mch, true, false, opts)
-}
-
-// ChanSubscribe creates channel based Subscription.
-// Using ChanSubscribe without buffered capacity is not recommended since
-// it will be prone to dropping messages with a slow consumer error. Make sure to give the channel enough
-// capacity to handle bursts in traffic, for example other Subscribe APIs use a default of 512k capacity in comparison.
-// See important note in Subscribe()
-func (js *js) ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) {
- return js.subscribe(subj, _EMPTY_, nil, ch, false, false, opts)
-}
-
-// ChanQueueSubscribe creates channel based Subscription with a queue group.
-// See important note in QueueSubscribe()
-func (js *js) ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) {
- return js.subscribe(subj, queue, nil, ch, false, false, opts)
-}
-
-// PullSubscribe creates a Subscription that can fetch messages.
-// See important note in Subscribe()
-func (js *js) PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) {
- mch := make(chan *Msg, js.nc.Opts.SubChanLen)
- if durable != "" {
- opts = append(opts, Durable(durable))
- }
- return js.subscribe(subj, _EMPTY_, nil, mch, true, true, opts)
-}
-
-func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode bool, subj, queue string) (string, error) {
- ccfg := &info.Config
-
- // Make sure this new subject matches or is a subset.
- if ccfg.FilterSubject != _EMPTY_ && subj != ccfg.FilterSubject {
- return _EMPTY_, ErrSubjectMismatch
- }
-
- // Prevent binding a subscription against incompatible consumer types.
- if isPullMode && ccfg.DeliverSubject != _EMPTY_ {
- return _EMPTY_, ErrPullSubscribeToPushConsumer
- } else if !isPullMode && ccfg.DeliverSubject == _EMPTY_ {
- return _EMPTY_, ErrPullSubscribeRequired
- }
-
- // If pull mode, nothing else to check here.
- if isPullMode {
- return _EMPTY_, checkConfig(ccfg, userCfg)
- }
-
- // At this point, we know the user wants push mode, and the JS consumer is
- // really push mode.
-
- dg := info.Config.DeliverGroup
- if dg == _EMPTY_ {
- // Prevent an user from attempting to create a queue subscription on
- // a JS consumer that was not created with a deliver group.
- if queue != _EMPTY_ {
- return _EMPTY_, fmt.Errorf("cannot create a queue subscription for a consumer without a deliver group")
- } else if info.PushBound {
- // Need to reject a non queue subscription to a non queue consumer
- // if the consumer is already bound.
- return _EMPTY_, fmt.Errorf("consumer is already bound to a subscription")
- }
- } else {
- // If the JS consumer has a deliver group, we need to fail a non queue
- // subscription attempt:
- if queue == _EMPTY_ {
- return _EMPTY_, fmt.Errorf("cannot create a subscription for a consumer with a deliver group %q", dg)
- } else if queue != dg {
- // Here the user's queue group name does not match the one associated
- // with the JS consumer.
- return _EMPTY_, fmt.Errorf("cannot create a queue subscription %q for a consumer with a deliver group %q",
- queue, dg)
- }
- }
- if err := checkConfig(ccfg, userCfg); err != nil {
- return _EMPTY_, err
- }
- return ccfg.DeliverSubject, nil
-}
-
-func checkConfig(s, u *ConsumerConfig) error {
- makeErr := func(fieldName string, usrVal, srvVal any) error {
- return fmt.Errorf("configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal)
- }
-
- if u.Durable != _EMPTY_ && u.Durable != s.Durable {
- return makeErr("durable", u.Durable, s.Durable)
- }
- if u.Description != _EMPTY_ && u.Description != s.Description {
- return makeErr("description", u.Description, s.Description)
- }
- if u.DeliverPolicy != deliverPolicyNotSet && u.DeliverPolicy != s.DeliverPolicy {
- return makeErr("deliver policy", u.DeliverPolicy, s.DeliverPolicy)
- }
- if u.OptStartSeq > 0 && u.OptStartSeq != s.OptStartSeq {
- return makeErr("optional start sequence", u.OptStartSeq, s.OptStartSeq)
- }
- if u.OptStartTime != nil && !u.OptStartTime.IsZero() && !(*u.OptStartTime).Equal(*s.OptStartTime) {
- return makeErr("optional start time", u.OptStartTime, s.OptStartTime)
- }
- if u.AckPolicy != ackPolicyNotSet && u.AckPolicy != s.AckPolicy {
- return makeErr("ack policy", u.AckPolicy, s.AckPolicy)
- }
- if u.AckWait > 0 && u.AckWait != s.AckWait {
- return makeErr("ack wait", u.AckWait, s.AckWait)
- }
- if u.MaxDeliver > 0 && u.MaxDeliver != s.MaxDeliver {
- return makeErr("max deliver", u.MaxDeliver, s.MaxDeliver)
- }
- if u.ReplayPolicy != replayPolicyNotSet && u.ReplayPolicy != s.ReplayPolicy {
- return makeErr("replay policy", u.ReplayPolicy, s.ReplayPolicy)
- }
- if u.RateLimit > 0 && u.RateLimit != s.RateLimit {
- return makeErr("rate limit", u.RateLimit, s.RateLimit)
- }
- if u.SampleFrequency != _EMPTY_ && u.SampleFrequency != s.SampleFrequency {
- return makeErr("sample frequency", u.SampleFrequency, s.SampleFrequency)
- }
- if u.MaxWaiting > 0 && u.MaxWaiting != s.MaxWaiting {
- return makeErr("max waiting", u.MaxWaiting, s.MaxWaiting)
- }
- if u.MaxAckPending > 0 && u.MaxAckPending != s.MaxAckPending {
- return makeErr("max ack pending", u.MaxAckPending, s.MaxAckPending)
- }
- // For flow control, we want to fail if the user explicit wanted it, but
- // it is not set in the existing consumer. If it is not asked by the user,
- // the library still handles it and so no reason to fail.
- if u.FlowControl && !s.FlowControl {
- return makeErr("flow control", u.FlowControl, s.FlowControl)
- }
- if u.Heartbeat > 0 && u.Heartbeat != s.Heartbeat {
- return makeErr("heartbeat", u.Heartbeat, s.Heartbeat)
- }
- if u.Replicas > 0 && u.Replicas != s.Replicas {
- return makeErr("replicas", u.Replicas, s.Replicas)
- }
- if u.MemoryStorage && !s.MemoryStorage {
- return makeErr("memory storage", u.MemoryStorage, s.MemoryStorage)
- }
- return nil
-}
-
-func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, isPullMode bool, opts []SubOpt) (*Subscription, error) {
- cfg := ConsumerConfig{
- DeliverPolicy: deliverPolicyNotSet,
- AckPolicy: ackPolicyNotSet,
- ReplayPolicy: replayPolicyNotSet,
- }
- o := subOpts{cfg: &cfg}
- if len(opts) > 0 {
- for _, opt := range opts {
- if opt == nil {
- continue
- }
- if err := opt.configureSubscribe(&o); err != nil {
- return nil, err
- }
- }
- }
-
- // If no stream name is specified, the subject cannot be empty.
- if subj == _EMPTY_ && o.stream == _EMPTY_ {
- return nil, fmt.Errorf("nats: subject required")
- }
-
- // Note that these may change based on the consumer info response we may get.
- hasHeartbeats := o.cfg.Heartbeat > 0
- hasFC := o.cfg.FlowControl
-
- // Some checks for pull subscribers
- if isPullMode {
- // No deliver subject should be provided
- if o.cfg.DeliverSubject != _EMPTY_ {
- return nil, ErrPullSubscribeToPushConsumer
- }
- }
-
- // Some check/setting specific to queue subs
- if queue != _EMPTY_ {
- // Queue subscriber cannot have HB or FC (since messages will be randomly dispatched
- // to members). We may in the future have a separate NATS subscription that all members
- // would subscribe to and server would send on.
- if o.cfg.Heartbeat > 0 || o.cfg.FlowControl {
- // Not making this a public ErrXXX in case we allow in the future.
- return nil, fmt.Errorf("nats: queue subscription doesn't support idle heartbeat nor flow control")
- }
-
- // If this is a queue subscription and no consumer nor durable name was specified,
- // then we will use the queue name as a durable name.
- if o.consumer == _EMPTY_ && o.cfg.Durable == _EMPTY_ {
- if err := checkConsumerName(queue); err != nil {
- return nil, err
- }
- o.cfg.Durable = queue
- }
- }
-
- var (
- err error
- shouldCreate bool
- info *ConsumerInfo
- deliver string
- stream = o.stream
- consumer = o.consumer
- isDurable = o.cfg.Durable != _EMPTY_
- consumerBound = o.bound
- ctx = o.ctx
- skipCInfo = o.skipCInfo
- notFoundErr bool
- lookupErr bool
- nc = js.nc
- nms string
- hbi time.Duration
- ccreq *createConsumerRequest // In case we need to hold onto it for ordered consumers.
- maxap int
- )
-
- // Do some quick checks here for ordered consumers. We do these here instead of spread out
- // in the individual SubOpts.
- if o.ordered {
- // Make sure we are not durable.
- if isDurable {
- return nil, fmt.Errorf("nats: durable can not be set for an ordered consumer")
- }
- // Check ack policy.
- if o.cfg.AckPolicy != ackPolicyNotSet {
- return nil, fmt.Errorf("nats: ack policy can not be set for an ordered consumer")
- }
- // Check max deliver.
- if o.cfg.MaxDeliver != 1 && o.cfg.MaxDeliver != 0 {
- return nil, fmt.Errorf("nats: max deliver can not be set for an ordered consumer")
- }
- // No deliver subject, we pick our own.
- if o.cfg.DeliverSubject != _EMPTY_ {
- return nil, fmt.Errorf("nats: deliver subject can not be set for an ordered consumer")
- }
- // Queue groups not allowed.
- if queue != _EMPTY_ {
- return nil, fmt.Errorf("nats: queues not be set for an ordered consumer")
- }
- // Check for bound consumers.
- if consumer != _EMPTY_ {
- return nil, fmt.Errorf("nats: can not bind existing consumer for an ordered consumer")
- }
- // Check for pull mode.
- if isPullMode {
- return nil, fmt.Errorf("nats: can not use pull mode for an ordered consumer")
- }
- // Setup how we need it to be here.
- o.cfg.FlowControl = true
- o.cfg.AckPolicy = AckNonePolicy
- o.cfg.MaxDeliver = 1
- o.cfg.AckWait = 22 * time.Hour // Just set to something known, not utilized.
- // Force R1 and MemoryStorage for these.
- o.cfg.Replicas = 1
- o.cfg.MemoryStorage = true
-
- if !hasHeartbeats {
- o.cfg.Heartbeat = orderedHeartbeatsInterval
- }
- hasFC, hasHeartbeats = true, true
- o.mack = true // To avoid auto-ack wrapping call below.
- hbi = o.cfg.Heartbeat
- }
-
- // In case a consumer has not been set explicitly, then the
- // durable name will be used as the consumer name.
- if consumer == _EMPTY_ {
- consumer = o.cfg.Durable
- }
-
- // Find the stream mapped to the subject if not bound to a stream already.
- if stream == _EMPTY_ {
- stream, err = js.StreamNameBySubject(subj)
- if err != nil {
- return nil, err
- }
- }
-
- // With an explicit durable name, we can lookup the consumer first
- // to which it should be attaching to.
- // If SkipConsumerLookup was used, do not call consumer info.
- if consumer != _EMPTY_ && !o.skipCInfo {
- info, err = js.ConsumerInfo(stream, consumer)
- notFoundErr = errors.Is(err, ErrConsumerNotFound)
- lookupErr = err == ErrJetStreamNotEnabled || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded)
- }
-
- switch {
- case info != nil:
- deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue)
- if err != nil {
- return nil, err
- }
- icfg := &info.Config
- hasFC, hbi = icfg.FlowControl, icfg.Heartbeat
- hasHeartbeats = hbi > 0
- maxap = icfg.MaxAckPending
- case (err != nil && !notFoundErr) || (notFoundErr && consumerBound):
- // If the consumer is being bound and we got an error on pull subscribe then allow the error.
- if !(isPullMode && lookupErr && consumerBound) {
- return nil, err
- }
- case skipCInfo:
- // When skipping consumer info, need to rely on the manually passed sub options
- // to match the expected behavior from the subscription.
- hasFC, hbi = o.cfg.FlowControl, o.cfg.Heartbeat
- hasHeartbeats = hbi > 0
- maxap = o.cfg.MaxAckPending
- deliver = o.cfg.DeliverSubject
- if consumerBound {
- break
- }
-
- // When not bound to a consumer already, proceed to create.
- fallthrough
- default:
- // Attempt to create consumer if not found nor using Bind.
- shouldCreate = true
- if o.cfg.DeliverSubject != _EMPTY_ {
- deliver = o.cfg.DeliverSubject
- } else if !isPullMode {
- deliver = nc.NewInbox()
- cfg.DeliverSubject = deliver
- }
- // Do filtering always, server will clear as needed.
- cfg.FilterSubject = subj
-
- // Pass the queue to the consumer config
- if queue != _EMPTY_ {
- cfg.DeliverGroup = queue
- }
-
- // If not set, default to deliver all
- if cfg.DeliverPolicy == deliverPolicyNotSet {
- cfg.DeliverPolicy = DeliverAllPolicy
- }
- // If not set, default to ack explicit.
- if cfg.AckPolicy == ackPolicyNotSet {
- cfg.AckPolicy = AckExplicitPolicy
- }
- // If not set, default to instant
- if cfg.ReplayPolicy == replayPolicyNotSet {
- cfg.ReplayPolicy = ReplayInstantPolicy
- }
-
- // If we have acks at all and the MaxAckPending is not set go ahead
- // and set to the internal max for channel based consumers
- if cfg.MaxAckPending == 0 && ch != nil && cfg.AckPolicy != AckNonePolicy {
- cfg.MaxAckPending = cap(ch)
- }
- // Create request here.
- ccreq = &createConsumerRequest{
- Stream: stream,
- Config: &cfg,
- }
- hbi = cfg.Heartbeat
- }
-
- if isPullMode {
- nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, consumer)
- deliver = nc.NewInbox()
- // for pull consumers, create a wildcard subscription to differentiate pull requests
- deliver += ".*"
- }
-
- // In case this has a context, then create a child context that
- // is possible to cancel via unsubscribe / drain.
- var cancel func()
- if ctx != nil {
- ctx, cancel = context.WithCancel(ctx)
- }
-
- jsi := &jsSub{
- js: js,
- stream: stream,
- consumer: consumer,
- deliver: deliver,
- hbi: hbi,
- ordered: o.ordered,
- ccreq: ccreq,
- dseq: 1,
- pull: isPullMode,
- nms: nms,
- psubj: subj,
- cancel: cancel,
- ackNone: o.cfg.AckPolicy == AckNonePolicy,
- }
-
- // Auto acknowledge unless manual ack is set or policy is set to AckNonePolicy
- if cb != nil && !o.mack && o.cfg.AckPolicy != AckNonePolicy {
- ocb := cb
- cb = func(m *Msg) { ocb(m); m.Ack() }
- }
- sub, err := nc.subscribe(deliver, queue, cb, ch, isSync, jsi)
- if err != nil {
- return nil, err
- }
-
- // If we fail and we had the sub we need to cleanup, but can't just do a straight Unsubscribe or Drain.
- // We need to clear the jsi so we do not remove any durables etc.
- cleanUpSub := func() {
- if sub != nil {
- sub.mu.Lock()
- sub.jsi = nil
- sub.mu.Unlock()
- sub.Unsubscribe()
- }
- }
-
- // If we are creating or updating let's process that request.
- consName := o.cfg.Name
- if shouldCreate {
- if cfg.Durable != "" {
- consName = cfg.Durable
- } else if consName == "" {
- consName = getHash(nuid.Next())
- }
- info, err := js.upsertConsumer(stream, consName, ccreq.Config)
- if err != nil {
- var apiErr *APIError
- if ok := errors.As(err, &apiErr); !ok {
- cleanUpSub()
- return nil, err
- }
- if consumer == _EMPTY_ ||
- (apiErr.ErrorCode != JSErrCodeConsumerAlreadyExists && apiErr.ErrorCode != JSErrCodeConsumerNameExists) {
- cleanUpSub()
- if errors.Is(apiErr, ErrStreamNotFound) {
- return nil, ErrStreamNotFound
- }
- return nil, err
- }
- // We will not be using this sub here if we were push based.
- if !isPullMode {
- cleanUpSub()
- }
-
- info, err = js.ConsumerInfo(stream, consumer)
- if err != nil {
- return nil, err
- }
- deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue)
- if err != nil {
- return nil, err
- }
-
- if !isPullMode {
- // We can't reuse the channel, so if one was passed, we need to create a new one.
- if isSync {
- ch = make(chan *Msg, cap(ch))
- } else if ch != nil {
- // User provided (ChanSubscription), simply try to drain it.
- for done := false; !done; {
- select {
- case <-ch:
- default:
- done = true
- }
- }
- }
- jsi.deliver = deliver
- jsi.hbi = info.Config.Heartbeat
-
- // Recreate the subscription here.
- sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, isSync, jsi)
- if err != nil {
- return nil, err
- }
- hasFC = info.Config.FlowControl
- hasHeartbeats = info.Config.Heartbeat > 0
- }
- } else {
- // Since the library created the JS consumer, it will delete it on Unsubscribe()/Drain()
- sub.mu.Lock()
- sub.jsi.dc = true
- sub.jsi.pending = info.NumPending + info.Delivered.Consumer
- // If this is an ephemeral, we did not have a consumer name, we get it from the info
- // after the AddConsumer returns.
- if consumer == _EMPTY_ {
- sub.jsi.consumer = info.Name
- if isPullMode {
- sub.jsi.nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, info.Name)
- }
- }
- sub.mu.Unlock()
- }
- // Capture max ack pending from the info response here which covers both
- // success and failure followed by consumer lookup.
- maxap = info.Config.MaxAckPending
- }
-
- // If maxap is greater than the default sub's pending limit, use that.
- if maxap > DefaultSubPendingMsgsLimit {
- // For bytes limit, use the min of maxp*1MB or DefaultSubPendingBytesLimit
- bl := maxap * 1024 * 1024
- if bl < DefaultSubPendingBytesLimit {
- bl = DefaultSubPendingBytesLimit
- }
- if err := sub.SetPendingLimits(maxap, bl); err != nil {
- return nil, err
- }
- }
-
- // Do heartbeats last if needed.
- if hasHeartbeats {
- sub.scheduleHeartbeatCheck()
- }
- // For ChanSubscriptions, if we know that there is flow control, we will
- // start a go routine that evaluates the number of delivered messages
- // and process flow control.
- if sub.Type() == ChanSubscription && hasFC {
- sub.chanSubcheckForFlowControlResponse()
- }
-
- // Wait for context to get canceled if there is one.
- if ctx != nil {
- go func() {
- <-ctx.Done()
- sub.Unsubscribe()
- }()
- }
-
- return sub, nil
-}
-
-// InitialConsumerPending returns the number of messages pending to be
-// delivered to the consumer when the subscription was created.
-func (sub *Subscription) InitialConsumerPending() (uint64, error) {
- sub.mu.Lock()
- defer sub.mu.Unlock()
- if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ {
- return 0, fmt.Errorf("%w: not a JetStream subscription", ErrTypeSubscription)
- }
- return sub.jsi.pending, nil
-}
-
-// This long-lived routine is used per ChanSubscription to check
-// on the number of delivered messages and check for flow control response.
-func (sub *Subscription) chanSubcheckForFlowControlResponse() {
- sub.mu.Lock()
- // We don't use defer since if we need to send an RC reply, we need
- // to do it outside the sub's lock. So doing explicit unlock...
- if sub.closed {
- sub.mu.Unlock()
- return
- }
- var fcReply string
- var nc *Conn
-
- jsi := sub.jsi
- if jsi.csfct == nil {
- jsi.csfct = time.AfterFunc(chanSubFCCheckInterval, sub.chanSubcheckForFlowControlResponse)
- } else {
- fcReply = sub.checkForFlowControlResponse()
- nc = sub.conn
- // Do the reset here under the lock, it's ok...
- jsi.csfct.Reset(chanSubFCCheckInterval)
- }
- sub.mu.Unlock()
- // This call will return an error (which we don't care here)
- // if nc is nil or fcReply is empty.
- nc.Publish(fcReply, nil)
-}
-
-// ErrConsumerSequenceMismatch represents an error from a consumer
-// that received a Heartbeat including sequence different to the
-// one expected from the view of the client.
-type ErrConsumerSequenceMismatch struct {
- // StreamResumeSequence is the stream sequence from where the consumer
- // should resume consuming from the stream.
- StreamResumeSequence uint64
-
- // ConsumerSequence is the sequence of the consumer that is behind.
- ConsumerSequence uint64
-
- // LastConsumerSequence is the sequence of the consumer when the heartbeat
- // was received.
- LastConsumerSequence uint64
-}
-
-func (ecs *ErrConsumerSequenceMismatch) Error() string {
- return fmt.Sprintf("nats: sequence mismatch for consumer at sequence %d (%d sequences behind), should restart consumer from stream sequence %d",
- ecs.ConsumerSequence,
- ecs.LastConsumerSequence-ecs.ConsumerSequence,
- ecs.StreamResumeSequence,
- )
-}
-
-// isJSControlMessage will return true if this is an empty control status message
-// and indicate what type of control message it is, say jsCtrlHB or jsCtrlFC
-func isJSControlMessage(msg *Msg) (bool, int) {
- if len(msg.Data) > 0 || msg.Header.Get(statusHdr) != controlMsg {
- return false, 0
- }
- val := msg.Header.Get(descrHdr)
- if strings.HasPrefix(val, "Idle") {
- return true, jsCtrlHB
- }
- if strings.HasPrefix(val, "Flow") {
- return true, jsCtrlFC
- }
- return true, 0
-}
-
-// Keeps track of the incoming message's reply subject so that the consumer's
-// state (deliver sequence, etc..) can be checked against heartbeats.
-// We will also bump the incoming data message sequence that is used in FC cases.
-// Runs under the subscription lock
-func (sub *Subscription) trackSequences(reply string) {
- // For flow control, keep track of incoming message sequence.
- sub.jsi.fciseq++
- sub.jsi.cmeta = reply
-}
-
-// Check to make sure messages are arriving in order.
-// Returns true if the sub had to be replaced. Will cause upper layers to return.
-// The caller has verified that sub.jsi != nil and that this is not a control message.
-// Lock should be held.
-func (sub *Subscription) checkOrderedMsgs(m *Msg) bool {
- // Ignore msgs with no reply like HBs and flow control, they are handled elsewhere.
- if m.Reply == _EMPTY_ {
- return false
- }
-
- // Normal message here.
- tokens, err := parser.GetMetadataFields(m.Reply)
- if err != nil {
- return false
- }
- sseq, dseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]), parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos])
-
- jsi := sub.jsi
- if dseq != jsi.dseq {
- sub.resetOrderedConsumer(jsi.sseq + 1)
- return true
- }
- // Update our tracking here.
- jsi.dseq, jsi.sseq = dseq+1, sseq
- return false
-}
-
-// Update and replace sid.
-// Lock should be held on entry but will be unlocked to prevent lock inversion.
-func (sub *Subscription) applyNewSID() (osid int64) {
- nc := sub.conn
- sub.mu.Unlock()
-
- nc.subsMu.Lock()
- osid = sub.sid
- delete(nc.subs, osid)
- // Place new one.
- nc.ssid++
- nsid := nc.ssid
- nc.subs[nsid] = sub
- nc.subsMu.Unlock()
-
- sub.mu.Lock()
- sub.sid = nsid
- return osid
-}
-
-// We are here if we have detected a gap with an ordered consumer.
-// We will create a new consumer and rewire the low level subscription.
-// Lock should be held.
-func (sub *Subscription) resetOrderedConsumer(sseq uint64) {
- nc := sub.conn
- if sub.jsi == nil || nc == nil || sub.closed {
- return
- }
-
- var maxStr string
- // If there was an AUTO_UNSUB done, we need to adjust the new value
- // to send after the SUB for the new sid.
- if sub.max > 0 {
- if sub.jsi.fciseq < sub.max {
- adjustedMax := sub.max - sub.jsi.fciseq
- maxStr = strconv.Itoa(int(adjustedMax))
- } else {
- // We are already at the max, so we should just unsub the
- // existing sub and be done
- go func(sid int64) {
- nc.mu.Lock()
- nc.bw.appendString(fmt.Sprintf(unsubProto, sid, _EMPTY_))
- nc.kickFlusher()
- nc.mu.Unlock()
- }(sub.sid)
- return
- }
- }
-
- // Quick unsubscribe. Since we know this is a simple push subscriber we do in place.
- osid := sub.applyNewSID()
-
- // Grab new inbox.
- newDeliver := nc.NewInbox()
- sub.Subject = newDeliver
-
- // Snapshot the new sid under sub lock.
- nsid := sub.sid
-
- // We are still in the low level readLoop for the connection so we need
- // to spin a go routine to try to create the new consumer.
- go func() {
- // Unsubscribe and subscribe with new inbox and sid.
- // Remap a new low level sub into this sub since its client accessible.
- // This is done here in this go routine to prevent lock inversion.
- nc.mu.Lock()
- nc.bw.appendString(fmt.Sprintf(unsubProto, osid, _EMPTY_))
- nc.bw.appendString(fmt.Sprintf(subProto, newDeliver, _EMPTY_, nsid))
- if maxStr != _EMPTY_ {
- nc.bw.appendString(fmt.Sprintf(unsubProto, nsid, maxStr))
- }
- nc.kickFlusher()
- nc.mu.Unlock()
-
- pushErr := func(err error) {
- nc.handleConsumerSequenceMismatch(sub, fmt.Errorf("%w: recreating ordered consumer", err))
- nc.unsubscribe(sub, 0, true)
- }
-
- sub.mu.Lock()
- jsi := sub.jsi
- // Reset some items in jsi.
- jsi.dseq = 1
- jsi.cmeta = _EMPTY_
- jsi.fcr, jsi.fcd = _EMPTY_, 0
- jsi.deliver = newDeliver
- // Reset consumer request for starting policy.
- cfg := jsi.ccreq.Config
- cfg.DeliverSubject = newDeliver
- cfg.DeliverPolicy = DeliverByStartSequencePolicy
- cfg.OptStartSeq = sseq
- // In case the consumer was created with a start time, we need to clear it
- // since we are now using a start sequence.
- cfg.OptStartTime = nil
-
- js := jsi.js
- sub.mu.Unlock()
-
- sub.mu.Lock()
- // Attempt to delete the existing consumer.
- // We don't wait for the response since even if it's unsuccessful,
- // inactivity threshold will kick in and delete it.
- if jsi.consumer != _EMPTY_ {
- go js.DeleteConsumer(jsi.stream, jsi.consumer)
- }
- jsi.consumer = ""
- sub.mu.Unlock()
- consName := getHash(nuid.Next())
- cinfo, err := js.upsertConsumer(jsi.stream, consName, cfg)
- if err != nil {
- var apiErr *APIError
- if errors.Is(err, ErrJetStreamNotEnabled) || errors.Is(err, ErrTimeout) || errors.Is(err, context.DeadlineExceeded) {
- // if creating consumer failed, retry
- return
- } else if errors.As(err, &apiErr) && apiErr.ErrorCode == JSErrCodeInsufficientResourcesErr {
- // retry for insufficient resources, as it may mean that client is connected to a running
- // server in cluster while the server hosting R1 JetStream resources is restarting
- return
- }
- pushErr(err)
- return
- }
-
- sub.mu.Lock()
- jsi.consumer = cinfo.Name
- sub.mu.Unlock()
- }()
-}
-
-// For jetstream subscriptions, returns the number of delivered messages.
-// For ChanSubscription, this value is computed based on the known number
-// of messages added to the channel minus the current size of that channel.
-// Lock held on entry
-func (sub *Subscription) getJSDelivered() uint64 {
- if sub.typ == ChanSubscription {
- return sub.jsi.fciseq - uint64(len(sub.mch))
- }
- return sub.delivered
-}
-
-// checkForFlowControlResponse will check to see if we should send a flow control response
-// based on the subscription current delivered index and the target.
-// Runs under subscription lock
-func (sub *Subscription) checkForFlowControlResponse() string {
- // Caller has verified that there is a sub.jsi and fc
- jsi := sub.jsi
- jsi.active = true
- if sub.getJSDelivered() >= jsi.fcd {
- fcr := jsi.fcr
- jsi.fcr, jsi.fcd = _EMPTY_, 0
- return fcr
- }
- return _EMPTY_
-}
-
-// Record an inbound flow control message.
-// Runs under subscription lock
-func (sub *Subscription) scheduleFlowControlResponse(reply string) {
- sub.jsi.fcr, sub.jsi.fcd = reply, sub.jsi.fciseq
-}
-
-// Checks for activity from our consumer.
-// If we do not think we are active send an async error.
-func (sub *Subscription) activityCheck() {
- sub.mu.Lock()
- jsi := sub.jsi
- if jsi == nil || sub.closed {
- sub.mu.Unlock()
- return
- }
-
- active := jsi.active
- jsi.hbc.Reset(jsi.hbi * hbcThresh)
- jsi.active = false
- nc := sub.conn
- sub.mu.Unlock()
-
- if !active {
- if !jsi.ordered || nc.Status() != CONNECTED {
- nc.mu.Lock()
- if errCB := nc.Opts.AsyncErrorCB; errCB != nil {
- nc.ach.push(func() { errCB(nc, sub, ErrConsumerNotActive) })
- }
- nc.mu.Unlock()
- return
- }
- sub.mu.Lock()
- sub.resetOrderedConsumer(jsi.sseq + 1)
- sub.mu.Unlock()
- }
-}
-
-// scheduleHeartbeatCheck sets up the timer check to make sure we are active
-// or receiving idle heartbeats..
-func (sub *Subscription) scheduleHeartbeatCheck() {
- sub.mu.Lock()
- defer sub.mu.Unlock()
-
- jsi := sub.jsi
- if jsi == nil {
- return
- }
-
- if jsi.hbc == nil {
- jsi.hbc = time.AfterFunc(jsi.hbi*hbcThresh, sub.activityCheck)
- } else {
- jsi.hbc.Reset(jsi.hbi * hbcThresh)
- }
-}
-
-// handleConsumerSequenceMismatch will send an async error that can be used to restart a push based consumer.
-func (nc *Conn) handleConsumerSequenceMismatch(sub *Subscription, err error) {
- nc.mu.Lock()
- errCB := nc.Opts.AsyncErrorCB
- if errCB != nil {
- nc.ach.push(func() { errCB(nc, sub, err) })
- }
- nc.mu.Unlock()
-}
-
-// checkForSequenceMismatch will make sure we have not missed any messages since last seen.
-func (nc *Conn) checkForSequenceMismatch(msg *Msg, s *Subscription, jsi *jsSub) {
- // Process heartbeat received, get latest control metadata if present.
- s.mu.Lock()
- ctrl, ordered := jsi.cmeta, jsi.ordered
- jsi.active = true
- s.mu.Unlock()
-
- if ctrl == _EMPTY_ {
- return
- }
-
- tokens, err := parser.GetMetadataFields(ctrl)
- if err != nil {
- return
- }
-
- // Consumer sequence.
- var ldseq string
- dseq := tokens[parser.AckConsumerSeqTokenPos]
- hdr := msg.Header[lastConsumerSeqHdr]
- if len(hdr) == 1 {
- ldseq = hdr[0]
- }
-
- // Detect consumer sequence mismatch and whether
- // should restart the consumer.
- if ldseq != dseq {
- // Dispatch async error including details such as
- // from where the consumer could be restarted.
- sseq := parser.ParseNum(tokens[parser.AckStreamSeqTokenPos])
- if ordered {
- s.mu.Lock()
- s.resetOrderedConsumer(jsi.sseq + 1)
- s.mu.Unlock()
- } else {
- ecs := &ErrConsumerSequenceMismatch{
- StreamResumeSequence: uint64(sseq),
- ConsumerSequence: parser.ParseNum(dseq),
- LastConsumerSequence: parser.ParseNum(ldseq),
- }
- nc.handleConsumerSequenceMismatch(s, ecs)
- }
- }
-}
-
-type streamRequest struct {
- Subject string `json:"subject,omitempty"`
-}
-
-type streamNamesResponse struct {
- apiResponse
- apiPaged
- Streams []string `json:"streams"`
-}
-
-type subOpts struct {
- // For attaching.
- stream, consumer string
- // For creating or updating.
- cfg *ConsumerConfig
- // For binding a subscription to a consumer without creating it.
- bound bool
- // For manual ack
- mack bool
- // For an ordered consumer.
- ordered bool
- ctx context.Context
-
- // To disable calling ConsumerInfo
- skipCInfo bool
-}
-
-// SkipConsumerLookup will omit looking up consumer when [Bind], [Durable]
-// or [ConsumerName] are provided.
-//
-// NOTE: This setting may cause an existing consumer to be overwritten. Also,
-// because consumer lookup is skipped, all consumer options like AckPolicy,
-// DeliverSubject etc. need to be provided even if consumer already exists.
-func SkipConsumerLookup() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.skipCInfo = true
- return nil
- })
-}
-
-// OrderedConsumer will create a FIFO direct/ephemeral consumer for in order delivery of messages.
-// There are no redeliveries and no acks, and flow control and heartbeats will be added but
-// will be taken care of without additional client code.
-func OrderedConsumer() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.ordered = true
- return nil
- })
-}
-
-// ManualAck disables auto ack functionality for async subscriptions.
-func ManualAck() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.mack = true
- return nil
- })
-}
-
-// Description will set the description for the created consumer.
-func Description(description string) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.Description = description
- return nil
- })
-}
-
-// Durable defines the consumer name for JetStream durable subscribers.
-// This function will return ErrInvalidConsumerName if the name contains
-// any dot ".".
-func Durable(consumer string) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- if opts.cfg.Durable != _EMPTY_ {
- return fmt.Errorf("nats: option Durable set more than once")
- }
- if opts.consumer != _EMPTY_ && opts.consumer != consumer {
- return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer)
- }
- if err := checkConsumerName(consumer); err != nil {
- return err
- }
-
- opts.cfg.Durable = consumer
- return nil
- })
-}
-
-// DeliverAll will configure a Consumer to receive all the
-// messages from a Stream.
-func DeliverAll() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.DeliverPolicy = DeliverAllPolicy
- return nil
- })
-}
-
-// DeliverLast configures a Consumer to receive messages
-// starting with the latest one.
-func DeliverLast() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.DeliverPolicy = DeliverLastPolicy
- return nil
- })
-}
-
-// DeliverLastPerSubject configures a Consumer to receive messages
-// starting with the latest one for each filtered subject.
-func DeliverLastPerSubject() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.DeliverPolicy = DeliverLastPerSubjectPolicy
- return nil
- })
-}
-
-// DeliverNew configures a Consumer to receive messages
-// published after the subscription.
-func DeliverNew() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.DeliverPolicy = DeliverNewPolicy
- return nil
- })
-}
-
-// StartSequence configures a Consumer to receive
-// messages from a start sequence.
-func StartSequence(seq uint64) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.DeliverPolicy = DeliverByStartSequencePolicy
- opts.cfg.OptStartSeq = seq
- return nil
- })
-}
-
-// StartTime configures a Consumer to receive
-// messages from a start time.
-func StartTime(startTime time.Time) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.DeliverPolicy = DeliverByStartTimePolicy
- opts.cfg.OptStartTime = &startTime
- return nil
- })
-}
-
-// AckNone requires no acks for delivered messages.
-func AckNone() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.AckPolicy = AckNonePolicy
- return nil
- })
-}
-
-// AckAll when acking a sequence number, this implicitly acks all sequences
-// below this one as well.
-func AckAll() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.AckPolicy = AckAllPolicy
- return nil
- })
-}
-
-// AckExplicit requires ack or nack for all messages.
-func AckExplicit() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.AckPolicy = AckExplicitPolicy
- return nil
- })
-}
-
-// MaxDeliver sets the number of redeliveries for a message.
-func MaxDeliver(n int) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.MaxDeliver = n
- return nil
- })
-}
-
-// MaxAckPending sets the number of outstanding acks that are allowed before
-// message delivery is halted.
-func MaxAckPending(n int) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.MaxAckPending = n
- return nil
- })
-}
-
-// ReplayOriginal replays the messages at the original speed.
-func ReplayOriginal() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.ReplayPolicy = ReplayOriginalPolicy
- return nil
- })
-}
-
-// ReplayInstant replays the messages as fast as possible.
-func ReplayInstant() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.ReplayPolicy = ReplayInstantPolicy
- return nil
- })
-}
-
-// RateLimit is the Bits per sec rate limit applied to a push consumer.
-func RateLimit(n uint64) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.RateLimit = n
- return nil
- })
-}
-
-// BackOff is an array of time durations that represent the time to delay based on delivery count.
-func BackOff(backOff []time.Duration) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.BackOff = backOff
- return nil
- })
-}
-
-// BindStream binds a consumer to a stream explicitly based on a name.
-// When a stream name is not specified, the library uses the subscribe
-// subject as a way to find the stream name. It is done by making a request
-// to the server to get list of stream names that have a filter for this
-// subject. If the returned list contains a single stream, then this
-// stream name will be used, otherwise the `ErrNoMatchingStream` is returned.
-// To avoid the stream lookup, provide the stream name with this function.
-// See also `Bind()`.
-func BindStream(stream string) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- if opts.stream != _EMPTY_ && opts.stream != stream {
- return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream)
- }
-
- opts.stream = stream
- return nil
- })
-}
-
-// Bind binds a subscription to an existing consumer from a stream without attempting to create.
-// The first argument is the stream name and the second argument will be the consumer name.
-func Bind(stream, consumer string) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- if stream == _EMPTY_ {
- return ErrStreamNameRequired
- }
- if consumer == _EMPTY_ {
- return ErrConsumerNameRequired
- }
-
- // In case of pull subscribers, the durable name is a required parameter
- // so check that they are not different.
- if opts.cfg.Durable != _EMPTY_ && opts.cfg.Durable != consumer {
- return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.cfg.Durable, consumer)
- }
- if opts.stream != _EMPTY_ && opts.stream != stream {
- return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream)
- }
- opts.stream = stream
- opts.consumer = consumer
- opts.bound = true
- return nil
- })
-}
-
-// EnableFlowControl enables flow control for a push based consumer.
-func EnableFlowControl() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.FlowControl = true
- return nil
- })
-}
-
-// IdleHeartbeat enables push based consumers to have idle heartbeats delivered.
-// For pull consumers, idle heartbeat has to be set on each [Fetch] call.
-func IdleHeartbeat(duration time.Duration) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.Heartbeat = duration
- return nil
- })
-}
-
-// DeliverSubject specifies the JetStream consumer deliver subject.
-//
-// This option is used only in situations where the consumer does not exist
-// and a creation request is sent to the server. If not provided, an inbox
-// will be selected.
-// If a consumer exists, then the NATS subscription will be created on
-// the JetStream consumer's DeliverSubject, not necessarily this subject.
-func DeliverSubject(subject string) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.DeliverSubject = subject
- return nil
- })
-}
-
-// HeadersOnly() will instruct the consumer to only deliver headers and no payloads.
-func HeadersOnly() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.HeadersOnly = true
- return nil
- })
-}
-
-// MaxRequestBatch sets the maximum pull consumer batch size that a Fetch()
-// can request.
-func MaxRequestBatch(max int) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.MaxRequestBatch = max
- return nil
- })
-}
-
-// MaxRequestExpires sets the maximum pull consumer request expiration that a
-// Fetch() can request (using the Fetch's timeout value).
-func MaxRequestExpires(max time.Duration) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.MaxRequestExpires = max
- return nil
- })
-}
-
-// MaxRequesMaxBytes sets the maximum pull consumer request bytes that a
-// Fetch() can receive.
-func MaxRequestMaxBytes(bytes int) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.MaxRequestMaxBytes = bytes
- return nil
- })
-}
-
-// InactiveThreshold indicates how long the server should keep a consumer
-// after detecting a lack of activity. In NATS Server 2.8.4 and earlier, this
-// option only applies to ephemeral consumers. In NATS Server 2.9.0 and later,
-// this option applies to both ephemeral and durable consumers, allowing durable
-// consumers to also be deleted automatically after the inactivity threshold has
-// passed.
-func InactiveThreshold(threshold time.Duration) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- if threshold < 0 {
- return fmt.Errorf("invalid InactiveThreshold value (%v), needs to be greater or equal to 0", threshold)
- }
- opts.cfg.InactiveThreshold = threshold
- return nil
- })
-}
-
-// ConsumerReplicas sets the number of replica count for a consumer.
-func ConsumerReplicas(replicas int) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- if replicas < 1 {
- return fmt.Errorf("invalid ConsumerReplicas value (%v), needs to be greater than 0", replicas)
- }
- opts.cfg.Replicas = replicas
- return nil
- })
-}
-
-// ConsumerMemoryStorage sets the memory storage to true for a consumer.
-func ConsumerMemoryStorage() SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.MemoryStorage = true
- return nil
- })
-}
-
-// ConsumerName sets the name for a consumer.
-func ConsumerName(name string) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.Name = name
- return nil
- })
-}
-
-// ConsumerFilterSubjects can be used to set multiple subject filters on the consumer.
-// It has to be used in conjunction with [nats.BindStream] and
-// with empty 'subject' parameter.
-func ConsumerFilterSubjects(subjects ...string) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.FilterSubjects = subjects
- return nil
- })
-}
-
-func (sub *Subscription) ConsumerInfo() (*ConsumerInfo, error) {
- sub.mu.Lock()
- // TODO(dlc) - Better way to mark especially if we attach.
- if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ {
- sub.mu.Unlock()
- return nil, ErrTypeSubscription
- }
-
- // Consumer info lookup should fail if in direct mode.
- js := sub.jsi.js
- stream, consumer := sub.jsi.stream, sub.jsi.consumer
- sub.mu.Unlock()
-
- return js.getConsumerInfo(stream, consumer)
-}
-
-type pullOpts struct {
- maxBytes int
- ttl time.Duration
- ctx context.Context
- hb time.Duration
-}
-
-// PullOpt are the options that can be passed when pulling a batch of messages.
-type PullOpt interface {
- configurePull(opts *pullOpts) error
-}
-
-// PullMaxWaiting defines the max inflight pull requests.
-func PullMaxWaiting(n int) SubOpt {
- return subOptFn(func(opts *subOpts) error {
- opts.cfg.MaxWaiting = n
- return nil
- })
-}
-
-type PullHeartbeat time.Duration
-
-func (h PullHeartbeat) configurePull(opts *pullOpts) error {
- if h <= 0 {
- return fmt.Errorf("%w: idle heartbeat has to be greater than 0", ErrInvalidArg)
- }
- opts.hb = time.Duration(h)
- return nil
-}
-
-// PullMaxBytes defines the max bytes allowed for a fetch request.
-type PullMaxBytes int
-
-func (n PullMaxBytes) configurePull(opts *pullOpts) error {
- opts.maxBytes = int(n)
- return nil
-}
-
-var (
- // errNoMessages is an error that a Fetch request using no_wait can receive to signal
- // that there are no more messages available.
- errNoMessages = errors.New("nats: no messages")
-
- // errRequestsPending is an error that represents a sub.Fetch requests that was using
- // no_wait and expires time got discarded by the server.
- errRequestsPending = errors.New("nats: requests pending")
-)
-
-// Returns if the given message is a user message or not, and if
-// `checkSts` is true, returns appropriate error based on the
-// content of the status (404, etc..)
-func checkMsg(msg *Msg, checkSts, isNoWait bool) (usrMsg bool, err error) {
- // Assume user message
- usrMsg = true
-
- // If payload or no header, consider this a user message
- if len(msg.Data) > 0 || len(msg.Header) == 0 {
- return
- }
- // Look for status header
- val := msg.Header.Get(statusHdr)
- // If not present, then this is considered a user message
- if val == _EMPTY_ {
- return
- }
- // At this point, this is not a user message since there is
- // no payload and a "Status" header.
- usrMsg = false
-
- // If we don't care about status, we are done.
- if !checkSts {
- return
- }
-
- // if it's a heartbeat message, report as not user msg
- if isHb, _ := isJSControlMessage(msg); isHb {
- return
- }
- switch val {
- case noResponders:
- err = ErrNoResponders
- case noMessagesSts:
- // 404 indicates that there are no messages.
- err = errNoMessages
- case reqTimeoutSts:
- // In case of a fetch request with no wait request and expires time,
- // need to skip 408 errors and retry.
- if isNoWait {
- err = errRequestsPending
- } else {
- // Older servers may send a 408 when a request in the server was expired
- // and interest is still found, which will be the case for our
- // implementation. Regardless, ignore 408 errors until receiving at least
- // one message when making requests without no_wait.
- err = ErrTimeout
- }
- case jetStream409Sts:
- if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "consumer deleted") {
- err = ErrConsumerDeleted
- break
- }
-
- if strings.Contains(strings.ToLower(msg.Header.Get(descrHdr)), "leadership change") {
- err = ErrConsumerLeadershipChanged
- break
- }
- fallthrough
- default:
- err = fmt.Errorf("nats: %s", msg.Header.Get(descrHdr))
- }
- return
-}
-
-// Fetch pulls a batch of messages from a stream for a pull consumer.
-func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
- if sub == nil {
- return nil, ErrBadSubscription
- }
- if batch < 1 {
- return nil, ErrInvalidArg
- }
-
- var o pullOpts
- for _, opt := range opts {
- if err := opt.configurePull(&o); err != nil {
- return nil, err
- }
- }
- if o.ctx != nil && o.ttl != 0 {
- return nil, ErrContextAndTimeout
- }
-
- sub.mu.Lock()
- jsi := sub.jsi
- // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription,
- // so check for jsi.pull boolean instead.
- if jsi == nil || !jsi.pull {
- sub.mu.Unlock()
- return nil, ErrTypeSubscription
- }
-
- nc := sub.conn
- nms := sub.jsi.nms
- rply, _ := newFetchInbox(jsi.deliver)
- js := sub.jsi.js
- pmc := len(sub.mch) > 0
-
- // All fetch requests have an expiration, in case of no explicit expiration
- // then the default timeout of the JetStream context is used.
- ttl := o.ttl
- if ttl == 0 {
- ttl = js.opts.wait
- }
- sub.mu.Unlock()
-
- // Use the given context or setup a default one for the span
- // of the pull batch request.
- var (
- ctx = o.ctx
- err error
- cancel context.CancelFunc
- )
- if ctx == nil {
- ctx, cancel = context.WithTimeout(context.Background(), ttl)
- } else if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- // Prevent from passing the background context which will just block
- // and cannot be canceled either.
- if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() {
- return nil, ErrNoDeadlineContext
- }
-
- // If the context did not have a deadline, then create a new child context
- // that will use the default timeout from the JS context.
- ctx, cancel = context.WithTimeout(ctx, ttl)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
- defer cancel()
-
- // if heartbeat is set, validate it against the context timeout
- if o.hb > 0 {
- deadline, _ := ctx.Deadline()
- if 2*o.hb >= time.Until(deadline) {
- return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg)
- }
- }
-
- // Check if context not done already before making the request.
- select {
- case <-ctx.Done():
- if o.ctx != nil { // Timeout or Cancel triggered by context object option
- err = ctx.Err()
- } else { // Timeout triggered by timeout option
- err = ErrTimeout
- }
- default:
- }
- if err != nil {
- return nil, err
- }
-
- var (
- msgs = make([]*Msg, 0, batch)
- msg *Msg
- )
- for pmc && len(msgs) < batch {
- // Check next msg with booleans that say that this is an internal call
- // for a pull subscribe (so don't reject it) and don't wait if there
- // are no messages.
- msg, err = sub.nextMsgWithContext(ctx, true, false)
- if err != nil {
- if errors.Is(err, errNoMessages) {
- err = nil
- }
- break
- }
- // Check msg but just to determine if this is a user message
- // or status message, however, we don't care about values of status
- // messages at this point in the Fetch() call, so checkMsg can't
- // return an error.
- if usrMsg, _ := checkMsg(msg, false, false); usrMsg {
- msgs = append(msgs, msg)
- }
- }
- var hbTimer *time.Timer
- var hbErr error
- sub.mu.Lock()
- subClosed := sub.closed || sub.draining
- sub.mu.Unlock()
- if subClosed {
- err = errors.Join(ErrBadSubscription, ErrSubscriptionClosed)
- }
- hbLock := sync.Mutex{}
- if err == nil && len(msgs) < batch && !subClosed {
- // For batch real size of 1, it does not make sense to set no_wait in
- // the request.
- noWait := batch-len(msgs) > 1
-
- var nr nextRequest
-
- sendReq := func() error {
- // The current deadline for the context will be used
- // to set the expires TTL for a fetch request.
- deadline, _ := ctx.Deadline()
- ttl = time.Until(deadline)
-
- // Check if context has already been canceled or expired.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- // Make our request expiration a bit shorter than the current timeout.
- expires := ttl
- if ttl >= 20*time.Millisecond {
- expires = ttl - 10*time.Millisecond
- }
-
- nr.Batch = batch - len(msgs)
- nr.Expires = expires
- nr.NoWait = noWait
- nr.MaxBytes = o.maxBytes
- if 2*o.hb < expires {
- nr.Heartbeat = o.hb
- } else {
- nr.Heartbeat = 0
- }
- req, _ := json.Marshal(nr)
- if err := nc.PublishRequest(nms, rply, req); err != nil {
- return err
- }
- if o.hb > 0 {
- if hbTimer == nil {
- hbTimer = time.AfterFunc(2*o.hb, func() {
- hbLock.Lock()
- hbErr = ErrNoHeartbeat
- hbLock.Unlock()
- cancel()
- })
- } else {
- hbTimer.Reset(2 * o.hb)
- }
- }
- return nil
- }
-
- err = sendReq()
- for err == nil && len(msgs) < batch {
- // Ask for next message and wait if there are no messages
- msg, err = sub.nextMsgWithContext(ctx, true, true)
- if err == nil {
- if hbTimer != nil {
- hbTimer.Reset(2 * o.hb)
- }
- var usrMsg bool
-
- usrMsg, err = checkMsg(msg, true, noWait)
- if err == nil && usrMsg {
- msgs = append(msgs, msg)
- } else if noWait && (errors.Is(err, errNoMessages) || errors.Is(err, errRequestsPending)) && len(msgs) == 0 {
- // If we have a 404/408 for our "no_wait" request and have
- // not collected any message, then resend request to
- // wait this time.
- noWait = false
- err = sendReq()
- } else if errors.Is(err, ErrTimeout) && len(msgs) == 0 {
- // If we get a 408, we will bail if we already collected some
- // messages, otherwise ignore and go back calling nextMsg.
- err = nil
- }
- }
- }
- if hbTimer != nil {
- hbTimer.Stop()
- }
- }
- // If there is at least a message added to msgs, then need to return OK and no error
- if err != nil && len(msgs) == 0 {
- hbLock.Lock()
- defer hbLock.Unlock()
- if hbErr != nil {
- return nil, hbErr
- }
- return nil, o.checkCtxErr(err)
- }
- return msgs, nil
-}
-
-// newFetchInbox returns subject used as reply subject when sending pull requests
-// as well as request ID. For non-wildcard subject, request ID is empty and
-// passed subject is not transformed
-func newFetchInbox(subj string) (string, string) {
- if !strings.HasSuffix(subj, ".*") {
- return subj, ""
- }
- reqID := nuid.Next()
- var sb strings.Builder
- sb.WriteString(subj[:len(subj)-1])
- sb.WriteString(reqID)
- return sb.String(), reqID
-}
-
-func subjectMatchesReqID(subject, reqID string) bool {
- subjectParts := strings.Split(subject, ".")
- if len(subjectParts) < 2 {
- return false
- }
- return subjectParts[len(subjectParts)-1] == reqID
-}
-
-// MessageBatch provides methods to retrieve messages consumed using [Subscribe.FetchBatch].
-type MessageBatch interface {
- // Messages returns a channel on which messages will be published.
- Messages() <-chan *Msg
-
- // Error returns an error encountered when fetching messages.
- Error() error
-
- // Done signals end of execution.
- Done() <-chan struct{}
-}
-
-type messageBatch struct {
- msgs chan *Msg
- err error
- done chan struct{}
-}
-
-func (mb *messageBatch) Messages() <-chan *Msg {
- return mb.msgs
-}
-
-func (mb *messageBatch) Error() error {
- return mb.err
-}
-
-func (mb *messageBatch) Done() <-chan struct{} {
- return mb.done
-}
-
-// FetchBatch pulls a batch of messages from a stream for a pull consumer.
-// Unlike [Subscription.Fetch], it is non blocking and returns [MessageBatch],
-// allowing to retrieve incoming messages from a channel.
-// The returned channel is always closed after all messages for a batch have been
-// delivered by the server - it is safe to iterate over it using range.
-//
-// To avoid using default JetStream timeout as fetch expiry time, use [nats.MaxWait]
-// or [nats.Context] (with deadline set).
-//
-// This method will not return error in case of pull request expiry (even if there are no messages).
-// Any other error encountered when receiving messages will cause FetchBatch to stop receiving new messages.
-func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, error) {
- if sub == nil {
- return nil, ErrBadSubscription
- }
- if batch < 1 {
- return nil, ErrInvalidArg
- }
-
- var o pullOpts
- for _, opt := range opts {
- if err := opt.configurePull(&o); err != nil {
- return nil, err
- }
- }
- if o.ctx != nil && o.ttl != 0 {
- return nil, ErrContextAndTimeout
- }
- sub.mu.Lock()
- jsi := sub.jsi
- // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription,
- // so check for jsi.pull boolean instead.
- if jsi == nil || !jsi.pull {
- sub.mu.Unlock()
- return nil, ErrTypeSubscription
- }
-
- nc := sub.conn
- nms := sub.jsi.nms
- rply, reqID := newFetchInbox(sub.jsi.deliver)
- js := sub.jsi.js
- pmc := len(sub.mch) > 0
-
- // All fetch requests have an expiration, in case of no explicit expiration
- // then the default timeout of the JetStream context is used.
- ttl := o.ttl
- if ttl == 0 {
- ttl = js.opts.wait
- }
- sub.mu.Unlock()
-
- // Use the given context or setup a default one for the span
- // of the pull batch request.
- var (
- ctx = o.ctx
- cancel context.CancelFunc
- cancelContext = true
- )
- if ctx == nil {
- ctx, cancel = context.WithTimeout(context.Background(), ttl)
- } else if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- // Prevent from passing the background context which will just block
- // and cannot be canceled either.
- if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() {
- return nil, ErrNoDeadlineContext
- }
-
- // If the context did not have a deadline, then create a new child context
- // that will use the default timeout from the JS context.
- ctx, cancel = context.WithTimeout(ctx, ttl)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
- defer func() {
- // only cancel the context here if we are sure the fetching goroutine has not been started yet
- if cancelContext {
- cancel()
- }
- }()
-
- // if heartbeat is set, validate it against the context timeout
- if o.hb > 0 {
- deadline, _ := ctx.Deadline()
- if 2*o.hb >= time.Until(deadline) {
- return nil, fmt.Errorf("%w: idle heartbeat value too large", ErrInvalidArg)
- }
- }
-
- // Check if context not done already before making the request.
- select {
- case <-ctx.Done():
- if o.ctx != nil { // Timeout or Cancel triggered by context object option
- return nil, ctx.Err()
- } else { // Timeout triggered by timeout option
- return nil, ErrTimeout
- }
- default:
- }
-
- result := &messageBatch{
- msgs: make(chan *Msg, batch),
- done: make(chan struct{}, 1),
- }
- var msg *Msg
- for pmc && len(result.msgs) < batch {
- // Check next msg with booleans that say that this is an internal call
- // for a pull subscribe (so don't reject it) and don't wait if there
- // are no messages.
- msg, err := sub.nextMsgWithContext(ctx, true, false)
- if err != nil {
- if errors.Is(err, errNoMessages) {
- err = nil
- }
- result.err = err
- break
- }
- // Check msg but just to determine if this is a user message
- // or status message, however, we don't care about values of status
- // messages at this point in the Fetch() call, so checkMsg can't
- // return an error.
- if usrMsg, _ := checkMsg(msg, false, false); usrMsg {
- result.msgs <- msg
- }
- }
- sub.mu.Lock()
- subClosed := sub.closed || sub.draining
- sub.mu.Unlock()
- if len(result.msgs) == batch || result.err != nil || subClosed {
- close(result.msgs)
- if subClosed && len(result.msgs) == 0 {
- return nil, errors.Join(ErrBadSubscription, ErrSubscriptionClosed)
- }
- result.done <- struct{}{}
- return result, nil
- }
-
- deadline, _ := ctx.Deadline()
- ttl = time.Until(deadline)
-
- // Make our request expiration a bit shorter than the current timeout.
- expires := ttl
- if ttl >= 20*time.Millisecond {
- expires = ttl - 10*time.Millisecond
- }
-
- requestBatch := batch - len(result.msgs)
- req := nextRequest{
- Expires: expires,
- Batch: requestBatch,
- MaxBytes: o.maxBytes,
- Heartbeat: o.hb,
- }
- reqJSON, err := json.Marshal(req)
- if err != nil {
- close(result.msgs)
- result.done <- struct{}{}
- result.err = err
- return result, nil
- }
- if err := nc.PublishRequest(nms, rply, reqJSON); err != nil {
- if len(result.msgs) == 0 {
- return nil, err
- }
- close(result.msgs)
- result.done <- struct{}{}
- result.err = err
- return result, nil
- }
- var hbTimer *time.Timer
- var hbErr error
- hbLock := sync.Mutex{}
- if o.hb > 0 {
- hbTimer = time.AfterFunc(2*o.hb, func() {
- hbLock.Lock()
- hbErr = ErrNoHeartbeat
- hbLock.Unlock()
- cancel()
- })
- }
- cancelContext = false
- go func() {
- defer cancel()
- var requestMsgs int
- for requestMsgs < requestBatch {
- // Ask for next message and wait if there are no messages
- msg, err = sub.nextMsgWithContext(ctx, true, true)
- if err != nil {
- break
- }
- if hbTimer != nil {
- hbTimer.Reset(2 * o.hb)
- }
- var usrMsg bool
-
- usrMsg, err = checkMsg(msg, true, false)
- if err != nil {
- if errors.Is(err, ErrTimeout) {
- if reqID != "" && !subjectMatchesReqID(msg.Subject, reqID) {
- // ignore timeout message from server if it comes from a different pull request
- continue
- }
- err = nil
- }
- break
- }
- if usrMsg {
- result.msgs <- msg
- requestMsgs++
- }
- }
- if err != nil {
- hbLock.Lock()
- if hbErr != nil {
- result.err = hbErr
- } else {
- result.err = o.checkCtxErr(err)
- }
- hbLock.Unlock()
- }
- close(result.msgs)
- result.done <- struct{}{}
- }()
- return result, nil
-}
-
-// checkCtxErr is used to determine whether ErrTimeout should be returned in case of context timeout
-func (o *pullOpts) checkCtxErr(err error) error {
- if o.ctx == nil && errors.Is(err, context.DeadlineExceeded) {
- return ErrTimeout
- }
- return err
-}
-
-func (js *js) getConsumerInfo(stream, consumer string) (*ConsumerInfo, error) {
- ctx, cancel := context.WithTimeout(context.Background(), js.opts.wait)
- defer cancel()
- return js.getConsumerInfoContext(ctx, stream, consumer)
-}
-
-func (js *js) getConsumerInfoContext(ctx context.Context, stream, consumer string) (*ConsumerInfo, error) {
- ccInfoSubj := fmt.Sprintf(apiConsumerInfoT, stream, consumer)
- resp, err := js.apiRequestWithContext(ctx, js.apiSubj(ccInfoSubj), nil)
- if err != nil {
- if errors.Is(err, ErrNoResponders) {
- err = ErrJetStreamNotEnabled
- }
- return nil, err
- }
-
- var info consumerResponse
- if err := json.Unmarshal(resp.Data, &info); err != nil {
- return nil, err
- }
- if info.Error != nil {
- if errors.Is(info.Error, ErrConsumerNotFound) {
- return nil, ErrConsumerNotFound
- }
- if errors.Is(info.Error, ErrStreamNotFound) {
- return nil, ErrStreamNotFound
- }
- return nil, info.Error
- }
- if info.Error == nil && info.ConsumerInfo == nil {
- return nil, ErrConsumerNotFound
- }
- return info.ConsumerInfo, nil
-}
-
-// a RequestWithContext with tracing via TraceCB
-func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) {
- if js.opts.shouldTrace {
- ctrace := js.opts.ctrace
- if ctrace.RequestSent != nil {
- ctrace.RequestSent(subj, data)
- }
- }
- resp, err := js.nc.RequestWithContext(ctx, subj, data)
- if err != nil {
- return nil, err
- }
- if js.opts.shouldTrace {
- ctrace := js.opts.ctrace
- if ctrace.RequestSent != nil {
- ctrace.ResponseReceived(subj, resp.Data, resp.Header)
- }
- }
-
- return resp, nil
-}
-
-func (m *Msg) checkReply() error {
- if m == nil || m.Sub == nil {
- return ErrMsgNotBound
- }
- if m.Reply == _EMPTY_ {
- return ErrMsgNoReply
- }
- return nil
-}
-
-// ackReply handles all acks. Will do the right thing for pull and sync mode.
-// It ensures that an ack is only sent a single time, regardless of
-// how many times it is being called to avoid duplicated acks.
-func (m *Msg) ackReply(ackType []byte, sync bool, opts ...AckOpt) error {
- var o ackOpts
- for _, opt := range opts {
- if err := opt.configureAck(&o); err != nil {
- return err
- }
- }
-
- if err := m.checkReply(); err != nil {
- return err
- }
-
- var ackNone bool
- var js *js
-
- sub := m.Sub
- sub.mu.Lock()
- nc := sub.conn
- if jsi := sub.jsi; jsi != nil {
- js = jsi.js
- ackNone = jsi.ackNone
- }
- sub.mu.Unlock()
-
- // Skip if already acked.
- if atomic.LoadUint32(&m.ackd) == 1 {
- return ErrMsgAlreadyAckd
- }
- if ackNone {
- return ErrCantAckIfConsumerAckNone
- }
-
- usesCtx := o.ctx != nil
- usesWait := o.ttl > 0
-
- // Only allow either AckWait or Context option to set the timeout.
- if usesWait && usesCtx {
- return ErrContextAndTimeout
- }
-
- sync = sync || usesCtx || usesWait
- ctx := o.ctx
- wait := defaultRequestWait
- if usesWait {
- wait = o.ttl
- } else if js != nil {
- wait = js.opts.wait
- }
-
- var body []byte
- var err error
- // This will be > 0 only when called from NakWithDelay()
- if o.nakDelay > 0 {
- body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, o.nakDelay.Nanoseconds()))
- } else {
- body = ackType
- }
-
- if sync {
- if usesCtx {
- _, err = nc.RequestWithContext(ctx, m.Reply, body)
- } else {
- _, err = nc.Request(m.Reply, body, wait)
- }
- } else {
- err = nc.Publish(m.Reply, body)
- }
-
- // Mark that the message has been acked unless it is ackProgress
- // which can be sent many times.
- if err == nil && !bytes.Equal(ackType, ackProgress) {
- atomic.StoreUint32(&m.ackd, 1)
- }
-
- return err
-}
-
-// Ack acknowledges a message. This tells the server that the message was
-// successfully processed and it can move on to the next message.
-func (m *Msg) Ack(opts ...AckOpt) error {
- return m.ackReply(ackAck, false, opts...)
-}
-
-// AckSync is the synchronous version of Ack. This indicates successful message
-// processing.
-func (m *Msg) AckSync(opts ...AckOpt) error {
- return m.ackReply(ackAck, true, opts...)
-}
-
-// Nak negatively acknowledges a message. This tells the server to redeliver
-// the message. You can configure the number of redeliveries by passing
-// nats.MaxDeliver when you Subscribe. The default is infinite redeliveries.
-func (m *Msg) Nak(opts ...AckOpt) error {
- return m.ackReply(ackNak, false, opts...)
-}
-
-// Nak negatively acknowledges a message. This tells the server to redeliver
-// the message after the give `delay` duration. You can configure the number
-// of redeliveries by passing nats.MaxDeliver when you Subscribe.
-// The default is infinite redeliveries.
-func (m *Msg) NakWithDelay(delay time.Duration, opts ...AckOpt) error {
- if delay > 0 {
- opts = append(opts, nakDelay(delay))
- }
- return m.ackReply(ackNak, false, opts...)
-}
-
-// Term tells the server to not redeliver this message, regardless of the value
-// of nats.MaxDeliver.
-func (m *Msg) Term(opts ...AckOpt) error {
- return m.ackReply(ackTerm, false, opts...)
-}
-
-// InProgress tells the server that this message is being worked on. It resets
-// the redelivery timer on the server.
-func (m *Msg) InProgress(opts ...AckOpt) error {
- return m.ackReply(ackProgress, false, opts...)
-}
-
-// MsgMetadata is the JetStream metadata associated with received messages.
-type MsgMetadata struct {
- Sequence SequencePair
- NumDelivered uint64
- NumPending uint64
- Timestamp time.Time
- Stream string
- Consumer string
- Domain string
-}
-
-// Metadata retrieves the metadata from a JetStream message. This method will
-// return an error for non-JetStream Msgs.
-func (m *Msg) Metadata() (*MsgMetadata, error) {
- if err := m.checkReply(); err != nil {
- return nil, err
- }
-
- tokens, err := parser.GetMetadataFields(m.Reply)
- if err != nil {
- return nil, err
- }
-
- meta := &MsgMetadata{
- Domain: tokens[parser.AckDomainTokenPos],
- NumDelivered: parser.ParseNum(tokens[parser.AckNumDeliveredTokenPos]),
- NumPending: parser.ParseNum(tokens[parser.AckNumPendingTokenPos]),
- Timestamp: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))),
- Stream: tokens[parser.AckStreamTokenPos],
- Consumer: tokens[parser.AckConsumerTokenPos],
- }
- meta.Sequence.Stream = parser.ParseNum(tokens[parser.AckStreamSeqTokenPos])
- meta.Sequence.Consumer = parser.ParseNum(tokens[parser.AckConsumerSeqTokenPos])
- return meta, nil
-}
-
-// AckPolicy determines how the consumer should acknowledge delivered messages.
-type AckPolicy int
-
-const (
- // AckNonePolicy requires no acks for delivered messages.
- AckNonePolicy AckPolicy = iota
-
- // AckAllPolicy when acking a sequence number, this implicitly acks all
- // sequences below this one as well.
- AckAllPolicy
-
- // AckExplicitPolicy requires ack or nack for all messages.
- AckExplicitPolicy
-
- // For configuration mismatch check
- ackPolicyNotSet = 99
-)
-
-func jsonString(s string) string {
- return "\"" + s + "\""
-}
-
-func (p *AckPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString("none"):
- *p = AckNonePolicy
- case jsonString("all"):
- *p = AckAllPolicy
- case jsonString("explicit"):
- *p = AckExplicitPolicy
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
-
- return nil
-}
-
-func (p AckPolicy) MarshalJSON() ([]byte, error) {
- switch p {
- case AckNonePolicy:
- return json.Marshal("none")
- case AckAllPolicy:
- return json.Marshal("all")
- case AckExplicitPolicy:
- return json.Marshal("explicit")
- default:
- return nil, fmt.Errorf("nats: unknown acknowledgement policy %v", p)
- }
-}
-
-func (p AckPolicy) String() string {
- switch p {
- case AckNonePolicy:
- return "AckNone"
- case AckAllPolicy:
- return "AckAll"
- case AckExplicitPolicy:
- return "AckExplicit"
- case ackPolicyNotSet:
- return "Not Initialized"
- default:
- return "Unknown AckPolicy"
- }
-}
-
-// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream.
-type ReplayPolicy int
-
-const (
- // ReplayInstantPolicy will replay messages as fast as possible.
- ReplayInstantPolicy ReplayPolicy = iota
-
- // ReplayOriginalPolicy will maintain the same timing as the messages were received.
- ReplayOriginalPolicy
-
- // For configuration mismatch check
- replayPolicyNotSet = 99
-)
-
-func (p *ReplayPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString("instant"):
- *p = ReplayInstantPolicy
- case jsonString("original"):
- *p = ReplayOriginalPolicy
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
-
- return nil
-}
-
-func (p ReplayPolicy) MarshalJSON() ([]byte, error) {
- switch p {
- case ReplayOriginalPolicy:
- return json.Marshal("original")
- case ReplayInstantPolicy:
- return json.Marshal("instant")
- default:
- return nil, fmt.Errorf("nats: unknown replay policy %v", p)
- }
-}
-
-var (
- ackAck = []byte("+ACK")
- ackNak = []byte("-NAK")
- ackProgress = []byte("+WPI")
- ackTerm = []byte("+TERM")
-)
-
-// DeliverPolicy determines how the consumer should select the first message to deliver.
-type DeliverPolicy int
-
-const (
- // DeliverAllPolicy starts delivering messages from the very beginning of a
- // stream. This is the default.
- DeliverAllPolicy DeliverPolicy = iota
-
- // DeliverLastPolicy will start the consumer with the last sequence
- // received.
- DeliverLastPolicy
-
- // DeliverNewPolicy will only deliver new messages that are sent after the
- // consumer is created.
- DeliverNewPolicy
-
- // DeliverByStartSequencePolicy will deliver messages starting from a given
- // sequence.
- DeliverByStartSequencePolicy
-
- // DeliverByStartTimePolicy will deliver messages starting from a given
- // time.
- DeliverByStartTimePolicy
-
- // DeliverLastPerSubjectPolicy will start the consumer with the last message
- // for all subjects received.
- DeliverLastPerSubjectPolicy
-
- // For configuration mismatch check
- deliverPolicyNotSet = 99
-)
-
-func (p *DeliverPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString("all"), jsonString("undefined"):
- *p = DeliverAllPolicy
- case jsonString("last"):
- *p = DeliverLastPolicy
- case jsonString("new"):
- *p = DeliverNewPolicy
- case jsonString("by_start_sequence"):
- *p = DeliverByStartSequencePolicy
- case jsonString("by_start_time"):
- *p = DeliverByStartTimePolicy
- case jsonString("last_per_subject"):
- *p = DeliverLastPerSubjectPolicy
- }
-
- return nil
-}
-
-func (p DeliverPolicy) MarshalJSON() ([]byte, error) {
- switch p {
- case DeliverAllPolicy:
- return json.Marshal("all")
- case DeliverLastPolicy:
- return json.Marshal("last")
- case DeliverNewPolicy:
- return json.Marshal("new")
- case DeliverByStartSequencePolicy:
- return json.Marshal("by_start_sequence")
- case DeliverByStartTimePolicy:
- return json.Marshal("by_start_time")
- case DeliverLastPerSubjectPolicy:
- return json.Marshal("last_per_subject")
- default:
- return nil, fmt.Errorf("nats: unknown deliver policy %v", p)
- }
-}
-
-// RetentionPolicy determines how messages in a set are retained.
-type RetentionPolicy int
-
-const (
- // LimitsPolicy (default) means that messages are retained until any given limit is reached.
- // This could be one of MaxMsgs, MaxBytes, or MaxAge.
- LimitsPolicy RetentionPolicy = iota
- // InterestPolicy specifies that when all known observables have acknowledged a message it can be removed.
- InterestPolicy
- // WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed.
- WorkQueuePolicy
-)
-
-// DiscardPolicy determines how to proceed when limits of messages or bytes are
-// reached.
-type DiscardPolicy int
-
-const (
- // DiscardOld will remove older messages to return to the limits. This is
- // the default.
- DiscardOld DiscardPolicy = iota
- //DiscardNew will fail to store new messages.
- DiscardNew
-)
-
-const (
- limitsPolicyString = "limits"
- interestPolicyString = "interest"
- workQueuePolicyString = "workqueue"
-)
-
-func (rp RetentionPolicy) String() string {
- switch rp {
- case LimitsPolicy:
- return "Limits"
- case InterestPolicy:
- return "Interest"
- case WorkQueuePolicy:
- return "WorkQueue"
- default:
- return "Unknown Retention Policy"
- }
-}
-
-func (rp RetentionPolicy) MarshalJSON() ([]byte, error) {
- switch rp {
- case LimitsPolicy:
- return json.Marshal(limitsPolicyString)
- case InterestPolicy:
- return json.Marshal(interestPolicyString)
- case WorkQueuePolicy:
- return json.Marshal(workQueuePolicyString)
- default:
- return nil, fmt.Errorf("nats: can not marshal %v", rp)
- }
-}
-
-func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString(limitsPolicyString):
- *rp = LimitsPolicy
- case jsonString(interestPolicyString):
- *rp = InterestPolicy
- case jsonString(workQueuePolicyString):
- *rp = WorkQueuePolicy
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-func (dp DiscardPolicy) String() string {
- switch dp {
- case DiscardOld:
- return "DiscardOld"
- case DiscardNew:
- return "DiscardNew"
- default:
- return "Unknown Discard Policy"
- }
-}
-
-func (dp DiscardPolicy) MarshalJSON() ([]byte, error) {
- switch dp {
- case DiscardOld:
- return json.Marshal("old")
- case DiscardNew:
- return json.Marshal("new")
- default:
- return nil, fmt.Errorf("nats: can not marshal %v", dp)
- }
-}
-
-func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error {
- switch strings.ToLower(string(data)) {
- case jsonString("old"):
- *dp = DiscardOld
- case jsonString("new"):
- *dp = DiscardNew
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-// StorageType determines how messages are stored for retention.
-type StorageType int
-
-const (
- // FileStorage specifies on disk storage. It's the default.
- FileStorage StorageType = iota
- // MemoryStorage specifies in memory only.
- MemoryStorage
-)
-
-const (
- memoryStorageString = "memory"
- fileStorageString = "file"
-)
-
-func (st StorageType) String() string {
- switch st {
- case MemoryStorage:
- return "Memory"
- case FileStorage:
- return "File"
- default:
- return "Unknown Storage Type"
- }
-}
-
-func (st StorageType) MarshalJSON() ([]byte, error) {
- switch st {
- case MemoryStorage:
- return json.Marshal(memoryStorageString)
- case FileStorage:
- return json.Marshal(fileStorageString)
- default:
- return nil, fmt.Errorf("nats: can not marshal %v", st)
- }
-}
-
-func (st *StorageType) UnmarshalJSON(data []byte) error {
- switch string(data) {
- case jsonString(memoryStorageString):
- *st = MemoryStorage
- case jsonString(fileStorageString):
- *st = FileStorage
- default:
- return fmt.Errorf("nats: can not unmarshal %q", data)
- }
- return nil
-}
-
-type StoreCompression uint8
-
-const (
- NoCompression StoreCompression = iota
- S2Compression
-)
-
-func (alg StoreCompression) String() string {
- switch alg {
- case NoCompression:
- return "None"
- case S2Compression:
- return "S2"
- default:
- return "Unknown StoreCompression"
- }
-}
-
-func (alg StoreCompression) MarshalJSON() ([]byte, error) {
- var str string
- switch alg {
- case S2Compression:
- str = "s2"
- case NoCompression:
- str = "none"
- default:
- return nil, fmt.Errorf("unknown compression algorithm")
- }
- return json.Marshal(str)
-}
-
-func (alg *StoreCompression) UnmarshalJSON(b []byte) error {
- var str string
- if err := json.Unmarshal(b, &str); err != nil {
- return err
- }
- switch str {
- case "s2":
- *alg = S2Compression
- case "none":
- *alg = NoCompression
- default:
- return fmt.Errorf("unknown compression algorithm")
- }
- return nil
-}
-
-// Length of our hash used for named consumers.
-const nameHashLen = 8
-
-// Computes a hash for the given `name`.
-func getHash(name string) string {
- sha := sha256.New()
- sha.Write([]byte(name))
- b := sha.Sum(nil)
- for i := 0; i < nameHashLen; i++ {
- b[i] = rdigits[int(b[i]%base)]
- }
- return string(b[:nameHashLen])
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jserrors.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jserrors.go
deleted file mode 100644
index f028594..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jserrors.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2020-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "errors"
- "fmt"
-)
-
-var (
- // API errors
-
- // ErrJetStreamNotEnabled is an error returned when JetStream is not enabled for an account.
- //
- // Note: This error will not be returned in clustered mode, even if each
- // server in the cluster does not have JetStream enabled. In clustered mode,
- // requests will time out instead.
- ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}}
-
- // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is not enabled for an account.
- ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}}
-
- // ErrStreamNotFound is an error returned when stream with given name does not exist.
- ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}}
-
- // ErrStreamNameAlreadyInUse is returned when a stream with given name already exists and has a different configuration.
- ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}}
-
- // ErrStreamSubjectTransformNotSupported is returned when the connected nats-server version does not support setting
- // the stream subject transform. If this error is returned when executing AddStream(), the stream with invalid
- // configuration was already created in the server.
- ErrStreamSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
-
- // ErrStreamSourceSubjectTransformNotSupported is returned when the connected nats-server version does not support setting
- // the stream source subject transform. If this error is returned when executing AddStream(), the stream with invalid
- // configuration was already created in the server.
- ErrStreamSourceSubjectTransformNotSupported JetStreamError = &jsError{message: "stream subject transformation not supported by nats-server"}
-
- // ErrStreamSourceNotSupported is returned when the connected nats-server version does not support setting
- // the stream sources. If this error is returned when executing AddStream(), the stream with invalid
- // configuration was already created in the server.
- ErrStreamSourceNotSupported JetStreamError = &jsError{message: "stream sourcing is not supported by nats-server"}
-
- // ErrStreamSourceMultipleSubjectTransformsNotSupported is returned when the connected nats-server version does not support setting
- // the stream sources. If this error is returned when executing AddStream(), the stream with invalid
- // configuration was already created in the server.
- ErrStreamSourceMultipleSubjectTransformsNotSupported JetStreamError = &jsError{message: "stream sourcing with multiple subject transforms not supported by nats-server"}
-
- // ErrConsumerNotFound is an error returned when consumer with given name does not exist.
- ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}}
-
- // ErrMsgNotFound is returned when message with provided sequence number does npt exist.
- ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}}
-
- // ErrBadRequest is returned when invalid request is sent to JetStream API.
- ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}}
-
- // ErrDuplicateFilterSubjects is returned when both FilterSubject and FilterSubjects are specified when creating consumer.
- ErrDuplicateFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeDuplicateFilterSubjects, Description: "consumer cannot have both FilterSubject and FilterSubjects specified", Code: 500}}
-
- // ErrDuplicateFilterSubjects is returned when filter subjects overlap when creating consumer.
- ErrOverlappingFilterSubjects JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeOverlappingFilterSubjects, Description: "consumer subject filters cannot overlap", Code: 500}}
-
- // ErrEmptyFilter is returned when a filter in FilterSubjects is empty.
- ErrEmptyFilter JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerEmptyFilter, Description: "consumer filter in FilterSubjects cannot be empty", Code: 500}}
-
- // Client errors
-
- // ErrConsumerNameAlreadyInUse is an error returned when consumer with given name already exists.
- ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"}
-
- // ErrConsumerNotActive is an error returned when consumer is not active.
- ErrConsumerNotActive JetStreamError = &jsError{message: "consumer not active"}
-
- // ErrInvalidJSAck is returned when JetStream ack from message publish is invalid.
- ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"}
-
- // ErrStreamConfigRequired is returned when empty stream configuration is supplied to add/update stream.
- ErrStreamConfigRequired JetStreamError = &jsError{message: "stream configuration is required"}
-
- // ErrStreamNameRequired is returned when the provided stream name is empty.
- ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"}
-
- // ErrConsumerNameRequired is returned when the provided consumer durable name is empty.
- ErrConsumerNameRequired JetStreamError = &jsError{message: "consumer name is required"}
-
- // ErrConsumerMultipleFilterSubjectsNotSupported is returned when the connected nats-server version does not support setting
- // multiple filter subjects with filter_subjects field. If this error is returned when executing AddConsumer(), the consumer with invalid
- // configuration was already created in the server.
- ErrConsumerMultipleFilterSubjectsNotSupported JetStreamError = &jsError{message: "multiple consumer filter subjects not supported by nats-server"}
-
- // ErrConsumerConfigRequired is returned when empty consumer consuguration is supplied to add/update consumer.
- ErrConsumerConfigRequired JetStreamError = &jsError{message: "consumer configuration is required"}
-
- // ErrPullSubscribeToPushConsumer is returned when attempting to use PullSubscribe on push consumer.
- ErrPullSubscribeToPushConsumer JetStreamError = &jsError{message: "cannot pull subscribe to push based consumer"}
-
- // ErrPullSubscribeRequired is returned when attempting to use subscribe methods not suitable for pull consumers for pull consumers.
- ErrPullSubscribeRequired JetStreamError = &jsError{message: "must use pull subscribe to bind to pull based consumer"}
-
- // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more than once.
- ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"}
-
- // ErrNoStreamResponse is returned when there is no response from stream (e.g. no responders error).
- ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"}
-
- // ErrNotJSMessage is returned when attempting to get metadata from non JetStream message .
- ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"}
-
- // ErrInvalidStreamName is returned when the provided stream name is invalid (contains '.' or ' ').
- ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"}
-
- // ErrInvalidConsumerName is returned when the provided consumer name is invalid (contains '.' or ' ').
- ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"}
-
- // ErrInvalidFilterSubject is returned when the provided filter subject is invalid.
- ErrInvalidFilterSubject JetStreamError = &jsError{message: "invalid filter subject"}
-
- // ErrNoMatchingStream is returned when stream lookup by subject is unsuccessful.
- ErrNoMatchingStream JetStreamError = &jsError{message: "no stream matches subject"}
-
- // ErrSubjectMismatch is returned when the provided subject does not match consumer's filter subject.
- ErrSubjectMismatch JetStreamError = &jsError{message: "subject does not match consumer"}
-
- // ErrContextAndTimeout is returned when attempting to use both context and timeout.
- ErrContextAndTimeout JetStreamError = &jsError{message: "context and timeout can not both be set"}
-
- // ErrCantAckIfConsumerAckNone is returned when attempting to ack a message for consumer with AckNone policy set.
- ErrCantAckIfConsumerAckNone JetStreamError = &jsError{message: "cannot acknowledge a message for a consumer with AckNone policy"}
-
- // ErrConsumerDeleted is returned when attempting to send pull request to a consumer which does not exist
- ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"}
-
- // ErrConsumerLeadershipChanged is returned when pending requests are no longer valid after leadership has changed
- ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "Leadership Changed"}
-
- // ErrNoHeartbeat is returned when no heartbeat is received from server when sending requests with pull consumer.
- ErrNoHeartbeat JetStreamError = &jsError{message: "no heartbeat received"}
-
- // ErrSubscriptionClosed is returned when attempting to send pull request to a closed subscription
- ErrSubscriptionClosed JetStreamError = &jsError{message: "subscription closed"}
-
- // DEPRECATED: ErrInvalidDurableName is no longer returned and will be removed in future releases.
- // Use ErrInvalidConsumerName instead.
- ErrInvalidDurableName = errors.New("nats: invalid durable name")
-)
-
-// Error code represents JetStream error codes returned by the API
-type ErrorCode uint16
-
-const (
- JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039
- JSErrCodeJetStreamNotEnabled ErrorCode = 10076
- JSErrCodeInsufficientResourcesErr ErrorCode = 10023
-
- JSErrCodeStreamNotFound ErrorCode = 10059
- JSErrCodeStreamNameInUse ErrorCode = 10058
-
- JSErrCodeConsumerNotFound ErrorCode = 10014
- JSErrCodeConsumerNameExists ErrorCode = 10013
- JSErrCodeConsumerAlreadyExists ErrorCode = 10105
- JSErrCodeDuplicateFilterSubjects ErrorCode = 10136
- JSErrCodeOverlappingFilterSubjects ErrorCode = 10138
- JSErrCodeConsumerEmptyFilter ErrorCode = 10139
-
- JSErrCodeMessageNotFound ErrorCode = 10037
-
- JSErrCodeBadRequest ErrorCode = 10003
- JSStreamInvalidConfig ErrorCode = 10052
-
- JSErrCodeStreamWrongLastSequence ErrorCode = 10071
-)
-
-// APIError is included in all API responses if there was an error.
-type APIError struct {
- Code int `json:"code"`
- ErrorCode ErrorCode `json:"err_code"`
- Description string `json:"description,omitempty"`
-}
-
-// Error prints the JetStream API error code and description
-func (e *APIError) Error() string {
- return fmt.Sprintf("nats: %s", e.Description)
-}
-
-// APIError implements the JetStreamError interface.
-func (e *APIError) APIError() *APIError {
- return e
-}
-
-// Is matches against an APIError.
-func (e *APIError) Is(err error) bool {
- if e == nil {
- return false
- }
- // Extract internal APIError to match against.
- var aerr *APIError
- ok := errors.As(err, &aerr)
- if !ok {
- return ok
- }
- return e.ErrorCode == aerr.ErrorCode
-}
-
-// JetStreamError is an error result that happens when using JetStream.
-// In case of client-side error, `APIError()` returns nil
-type JetStreamError interface {
- APIError() *APIError
- error
-}
-
-type jsError struct {
- apiErr *APIError
- message string
-}
-
-func (err *jsError) APIError() *APIError {
- return err.apiErr
-}
-
-func (err *jsError) Error() string {
- if err.apiErr != nil && err.apiErr.Description != "" {
- return err.apiErr.Error()
- }
- return fmt.Sprintf("nats: %s", err.message)
-}
-
-func (err *jsError) Unwrap() error {
- // Allow matching to embedded APIError in case there is one.
- if err.apiErr == nil {
- return nil
- }
- return err.apiErr
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jsm.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jsm.go
deleted file mode 100644
index 9eb5d4b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/jsm.go
+++ /dev/null
@@ -1,1775 +0,0 @@
-// Copyright 2021-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// JetStreamManager manages JetStream Streams and Consumers.
-type JetStreamManager interface {
- // AddStream creates a stream.
- AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error)
-
- // UpdateStream updates a stream.
- UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error)
-
- // DeleteStream deletes a stream.
- DeleteStream(name string, opts ...JSOpt) error
-
- // StreamInfo retrieves information from a stream.
- StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error)
-
- // PurgeStream purges a stream messages.
- PurgeStream(name string, opts ...JSOpt) error
-
- // StreamsInfo can be used to retrieve a list of StreamInfo objects.
- // DEPRECATED: Use Streams() instead.
- StreamsInfo(opts ...JSOpt) <-chan *StreamInfo
-
- // Streams can be used to retrieve a list of StreamInfo objects.
- Streams(opts ...JSOpt) <-chan *StreamInfo
-
- // StreamNames is used to retrieve a list of Stream names.
- StreamNames(opts ...JSOpt) <-chan string
-
- // GetMsg retrieves a raw stream message stored in JetStream by sequence number.
- // Use options nats.DirectGet() or nats.DirectGetNext() to trigger retrieval
- // directly from a distributed group of servers (leader and replicas).
- // The stream must have been created/updated with the AllowDirect boolean.
- GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error)
-
- // GetLastMsg retrieves the last raw stream message stored in JetStream by subject.
- // Use option nats.DirectGet() to trigger retrieval
- // directly from a distributed group of servers (leader and replicas).
- // The stream must have been created/updated with the AllowDirect boolean.
- GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error)
-
- // DeleteMsg deletes a message from a stream. The message is marked as erased, but its value is not overwritten.
- DeleteMsg(name string, seq uint64, opts ...JSOpt) error
-
- // SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data
- // As a result, this operation is slower than DeleteMsg()
- SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error
-
- // AddConsumer adds a consumer to a stream.
- // If the consumer already exists, and the configuration is the same, it
- // will return the existing consumer.
- // If the consumer already exists, and the configuration is different, it
- // will return ErrConsumerNameAlreadyInUse.
- AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error)
-
- // UpdateConsumer updates an existing consumer.
- UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error)
-
- // DeleteConsumer deletes a consumer.
- DeleteConsumer(stream, consumer string, opts ...JSOpt) error
-
- // ConsumerInfo retrieves information of a consumer from a stream.
- ConsumerInfo(stream, name string, opts ...JSOpt) (*ConsumerInfo, error)
-
- // ConsumersInfo is used to retrieve a list of ConsumerInfo objects.
- // DEPRECATED: Use Consumers() instead.
- ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo
-
- // Consumers is used to retrieve a list of ConsumerInfo objects.
- Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo
-
- // ConsumerNames is used to retrieve a list of Consumer names.
- ConsumerNames(stream string, opts ...JSOpt) <-chan string
-
- // AccountInfo retrieves info about the JetStream usage from an account.
- AccountInfo(opts ...JSOpt) (*AccountInfo, error)
-
- // StreamNameBySubject returns a stream matching given subject.
- StreamNameBySubject(string, ...JSOpt) (string, error)
-}
-
-// StreamConfig will determine the properties for a stream.
-// There are sensible defaults for most. If no subjects are
-// given the name will be used as the only subject.
-type StreamConfig struct {
- // Name is the name of the stream. It is required and must be unique
- // across the JetStream account.
- //
- // Name Names cannot contain whitespace, ., *, >, path separators
- // (forward or backwards slash), and non-printable characters.
- Name string `json:"name"`
-
- // Description is an optional description of the stream.
- Description string `json:"description,omitempty"`
-
- // Subjects is a list of subjects that the stream is listening on.
- // Wildcards are supported. Subjects cannot be set if the stream is
- // created as a mirror.
- Subjects []string `json:"subjects,omitempty"`
-
- // Retention defines the message retention policy for the stream.
- // Defaults to LimitsPolicy.
- Retention RetentionPolicy `json:"retention"`
-
- // MaxConsumers specifies the maximum number of consumers allowed for
- // the stream.
- MaxConsumers int `json:"max_consumers"`
-
- // MaxMsgs is the maximum number of messages the stream will store.
- // After reaching the limit, stream adheres to the discard policy.
- // If not set, server default is -1 (unlimited).
- MaxMsgs int64 `json:"max_msgs"`
-
- // MaxBytes is the maximum total size of messages the stream will store.
- // After reaching the limit, stream adheres to the discard policy.
- // If not set, server default is -1 (unlimited).
- MaxBytes int64 `json:"max_bytes"`
-
- // Discard defines the policy for handling messages when the stream
- // reaches its limits in terms of number of messages or total bytes.
- Discard DiscardPolicy `json:"discard"`
-
- // DiscardNewPerSubject is a flag to enable discarding new messages per
- // subject when limits are reached. Requires DiscardPolicy to be
- // DiscardNew and the MaxMsgsPerSubject to be set.
- DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"`
-
- // MaxAge is the maximum age of messages that the stream will retain.
- MaxAge time.Duration `json:"max_age"`
-
- // MaxMsgsPerSubject is the maximum number of messages per subject that
- // the stream will retain.
- MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"`
-
- // MaxMsgSize is the maximum size of any single message in the stream.
- MaxMsgSize int32 `json:"max_msg_size,omitempty"`
-
- // Storage specifies the type of storage backend used for the stream
- // (file or memory).
- Storage StorageType `json:"storage"`
-
- // Replicas is the number of stream replicas in clustered JetStream.
- // Defaults to 1, maximum is 5.
- Replicas int `json:"num_replicas"`
-
- // NoAck is a flag to disable acknowledging messages received by this
- // stream.
- //
- // If set to true, publish methods from the JetStream client will not
- // work as expected, since they rely on acknowledgements. Core NATS
- // publish methods should be used instead. Note that this will make
- // message delivery less reliable.
- NoAck bool `json:"no_ack,omitempty"`
-
- // Duplicates is the window within which to track duplicate messages.
- // If not set, server default is 2 minutes.
- Duplicates time.Duration `json:"duplicate_window,omitempty"`
-
- // Placement is used to declare where the stream should be placed via
- // tags and/or an explicit cluster name.
- Placement *Placement `json:"placement,omitempty"`
-
- // Mirror defines the configuration for mirroring another stream.
- Mirror *StreamSource `json:"mirror,omitempty"`
-
- // Sources is a list of other streams this stream sources messages from.
- Sources []*StreamSource `json:"sources,omitempty"`
-
- // Sealed streams do not allow messages to be published or deleted via limits or API,
- // sealed streams can not be unsealed via configuration update. Can only
- // be set on already created streams via the Update API.
- Sealed bool `json:"sealed,omitempty"`
-
- // DenyDelete restricts the ability to delete messages from a stream via
- // the API. Defaults to false.
- DenyDelete bool `json:"deny_delete,omitempty"`
-
- // DenyPurge restricts the ability to purge messages from a stream via
- // the API. Defaults to false.
- DenyPurge bool `json:"deny_purge,omitempty"`
-
- // AllowRollup allows the use of the Nats-Rollup header to replace all
- // contents of a stream, or subject in a stream, with a single new
- // message.
- AllowRollup bool `json:"allow_rollup_hdrs,omitempty"`
-
- // Compression specifies the message storage compression algorithm.
- // Defaults to NoCompression.
- Compression StoreCompression `json:"compression"`
-
- // FirstSeq is the initial sequence number of the first message in the
- // stream.
- FirstSeq uint64 `json:"first_seq,omitempty"`
-
- // SubjectTransform allows applying a transformation to matching
- // messages' subjects.
- SubjectTransform *SubjectTransformConfig `json:"subject_transform,omitempty"`
-
- // RePublish allows immediate republishing a message to the configured
- // subject after it's stored.
- RePublish *RePublish `json:"republish,omitempty"`
-
- // AllowDirect enables direct access to individual messages using direct
- // get API. Defaults to false.
- AllowDirect bool `json:"allow_direct"`
-
- // MirrorDirect enables direct access to individual messages from the
- // origin stream using direct get API. Defaults to false.
- MirrorDirect bool `json:"mirror_direct"`
-
- // ConsumerLimits defines limits of certain values that consumers can
- // set, defaults for those who don't set these settings
- ConsumerLimits StreamConsumerLimits `json:"consumer_limits,omitempty"`
-
- // Metadata is a set of application-defined key-value pairs for
- // associating metadata on the stream. This feature requires nats-server
- // v2.10.0 or later.
- Metadata map[string]string `json:"metadata,omitempty"`
-
- // Template identifies the template that manages the Stream. DEPRECATED:
- // This feature is no longer supported.
- Template string `json:"template_owner,omitempty"`
-}
-
-// SubjectTransformConfig is for applying a subject transform (to matching messages) before doing anything else when a new message is received.
-type SubjectTransformConfig struct {
- Source string `json:"src,omitempty"`
- Destination string `json:"dest"`
-}
-
-// RePublish is for republishing messages once committed to a stream. The original
-// subject cis remapped from the subject pattern to the destination pattern.
-type RePublish struct {
- Source string `json:"src,omitempty"`
- Destination string `json:"dest"`
- HeadersOnly bool `json:"headers_only,omitempty"`
-}
-
-// Placement is used to guide placement of streams in clustered JetStream.
-type Placement struct {
- Cluster string `json:"cluster"`
- Tags []string `json:"tags,omitempty"`
-}
-
-// StreamSource dictates how streams can source from other streams.
-type StreamSource struct {
- Name string `json:"name"`
- OptStartSeq uint64 `json:"opt_start_seq,omitempty"`
- OptStartTime *time.Time `json:"opt_start_time,omitempty"`
- FilterSubject string `json:"filter_subject,omitempty"`
- SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
- External *ExternalStream `json:"external,omitempty"`
- Domain string `json:"-"`
-}
-
-// ExternalStream allows you to qualify access to a stream source in another
-// account.
-type ExternalStream struct {
- APIPrefix string `json:"api"`
- DeliverPrefix string `json:"deliver,omitempty"`
-}
-
-// StreamConsumerLimits are the limits for a consumer on a stream.
-// These can be overridden on a per consumer basis.
-type StreamConsumerLimits struct {
- InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"`
- MaxAckPending int `json:"max_ack_pending,omitempty"`
-}
-
-// Helper for copying when we do not want to change user's version.
-func (ss *StreamSource) copy() *StreamSource {
- nss := *ss
- // Check pointers
- if ss.OptStartTime != nil {
- t := *ss.OptStartTime
- nss.OptStartTime = &t
- }
- if ss.External != nil {
- ext := *ss.External
- nss.External = &ext
- }
- return &nss
-}
-
-// If we have a Domain, convert to the appropriate ext.APIPrefix.
-// This will change the stream source, so should be a copy passed in.
-func (ss *StreamSource) convertDomain() error {
- if ss.Domain == _EMPTY_ {
- return nil
- }
- if ss.External != nil {
- // These should be mutually exclusive.
- // TODO(dlc) - Make generic?
- return errors.New("nats: domain and external are both set")
- }
- ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)}
- return nil
-}
-
-// apiResponse is a standard response from the JetStream JSON API
-type apiResponse struct {
- Type string `json:"type"`
- Error *APIError `json:"error,omitempty"`
-}
-
-// apiPaged includes variables used to create paged responses from the JSON API
-type apiPaged struct {
- Total int `json:"total"`
- Offset int `json:"offset"`
- Limit int `json:"limit"`
-}
-
-// apiPagedRequest includes parameters allowing specific pages to be requested
-// from APIs responding with apiPaged.
-type apiPagedRequest struct {
- Offset int `json:"offset,omitempty"`
-}
-
-// AccountInfo contains info about the JetStream usage from the current account.
-type AccountInfo struct {
- Tier
- Domain string `json:"domain"`
- API APIStats `json:"api"`
- Tiers map[string]Tier `json:"tiers"`
-}
-
-type Tier struct {
- Memory uint64 `json:"memory"`
- Store uint64 `json:"storage"`
- ReservedMemory uint64 `json:"reserved_memory"`
- ReservedStore uint64 `json:"reserved_storage"`
- Streams int `json:"streams"`
- Consumers int `json:"consumers"`
- Limits AccountLimits `json:"limits"`
-}
-
-// APIStats reports on API calls to JetStream for this account.
-type APIStats struct {
- Total uint64 `json:"total"`
- Errors uint64 `json:"errors"`
-}
-
-// AccountLimits includes the JetStream limits of the current account.
-type AccountLimits struct {
- MaxMemory int64 `json:"max_memory"`
- MaxStore int64 `json:"max_storage"`
- MaxStreams int `json:"max_streams"`
- MaxConsumers int `json:"max_consumers"`
- MaxAckPending int `json:"max_ack_pending"`
- MemoryMaxStreamBytes int64 `json:"memory_max_stream_bytes"`
- StoreMaxStreamBytes int64 `json:"storage_max_stream_bytes"`
- MaxBytesRequired bool `json:"max_bytes_required"`
-}
-
-type accountInfoResponse struct {
- apiResponse
- AccountInfo
-}
-
-// AccountInfo fetches account information from the server, containing details
-// about the account associated with this JetStream connection. If account is
-// not enabled for JetStream, ErrJetStreamNotEnabledForAccount is returned.
-//
-// If the server does not have JetStream enabled, ErrJetStreamNotEnabled is
-// returned (for a single server setup). For clustered topologies, AccountInfo
-// will time out.
-func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) {
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return nil, err
- }
- if cancel != nil {
- defer cancel()
- }
-
- resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(apiAccountInfo), nil)
- if err != nil {
- // todo maybe nats server should never have no responder on this subject and always respond if they know there is no js to be had
- if errors.Is(err, ErrNoResponders) {
- err = ErrJetStreamNotEnabled
- }
- return nil, err
- }
- var info accountInfoResponse
- if err := json.Unmarshal(resp.Data, &info); err != nil {
- return nil, err
- }
- if info.Error != nil {
- // Internally checks based on error code instead of description match.
- if errors.Is(info.Error, ErrJetStreamNotEnabledForAccount) {
- return nil, ErrJetStreamNotEnabledForAccount
- }
- return nil, info.Error
- }
-
- return &info.AccountInfo, nil
-}
-
-type createConsumerRequest struct {
- Stream string `json:"stream_name"`
- Config *ConsumerConfig `json:"config"`
-}
-
-type consumerResponse struct {
- apiResponse
- *ConsumerInfo
-}
-
-// AddConsumer adds a consumer to a stream.
-// If the consumer already exists, and the configuration is the same, it
-// will return the existing consumer.
-// If the consumer already exists, and the configuration is different, it
-// will return ErrConsumerNameAlreadyInUse.
-func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) {
- if cfg == nil {
- cfg = &ConsumerConfig{}
- }
- consumerName := cfg.Name
- if consumerName == _EMPTY_ {
- consumerName = cfg.Durable
- }
- if consumerName != _EMPTY_ {
- consInfo, err := js.ConsumerInfo(stream, consumerName, opts...)
- if err != nil && !errors.Is(err, ErrConsumerNotFound) && !errors.Is(err, ErrStreamNotFound) {
- return nil, err
- }
-
- if consInfo != nil {
- sameConfig := checkConfig(&consInfo.Config, cfg)
- if sameConfig != nil {
- return nil, fmt.Errorf("%w: creating consumer %q on stream %q", ErrConsumerNameAlreadyInUse, consumerName, stream)
- } else {
- return consInfo, nil
- }
- }
- }
-
- return js.upsertConsumer(stream, consumerName, cfg, opts...)
-}
-
-func (js *js) UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) {
- if cfg == nil {
- return nil, ErrConsumerConfigRequired
- }
- consumerName := cfg.Name
- if consumerName == _EMPTY_ {
- consumerName = cfg.Durable
- }
- if consumerName == _EMPTY_ {
- return nil, ErrConsumerNameRequired
- }
- return js.upsertConsumer(stream, consumerName, cfg, opts...)
-}
-
-func (js *js) upsertConsumer(stream, consumerName string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) {
- if err := checkStreamName(stream); err != nil {
- return nil, err
- }
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return nil, err
- }
- if cancel != nil {
- defer cancel()
- }
-
- req, err := json.Marshal(&createConsumerRequest{Stream: stream, Config: cfg})
- if err != nil {
- return nil, err
- }
-
- var ccSubj string
- if consumerName == _EMPTY_ {
- // if consumer name is empty (neither Durable nor Name is set), use the legacy ephemeral endpoint
- ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream)
- } else if err := checkConsumerName(consumerName); err != nil {
- return nil, err
- } else if js.nc.serverMinVersion(2, 9, 0) {
- if cfg.Durable != "" && js.opts.featureFlags.useDurableConsumerCreate {
- // if user set the useDurableConsumerCreate flag, use the legacy DURABLE.CREATE endpoint
- ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName)
- } else if cfg.FilterSubject == _EMPTY_ || cfg.FilterSubject == ">" {
- // if filter subject is empty or ">", use the endpoint without filter subject
- ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName)
- } else {
- // safeguard against passing invalid filter subject in request subject
- if cfg.FilterSubject[0] == '.' || cfg.FilterSubject[len(cfg.FilterSubject)-1] == '.' {
- return nil, fmt.Errorf("%w: %q", ErrInvalidFilterSubject, cfg.FilterSubject)
- }
- // if filter subject is not empty, use the endpoint with filter subject
- ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject)
- }
- } else {
- if cfg.Durable != "" {
- // if Durable is set, use the DURABLE.CREATE endpoint
- ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName)
- } else {
- // if Durable is not set, use the legacy ephemeral endpoint
- ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream)
- }
- }
-
- resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(ccSubj), req)
- if err != nil {
- if errors.Is(err, ErrNoResponders) {
- err = ErrJetStreamNotEnabled
- }
- return nil, err
- }
- var info consumerResponse
- err = json.Unmarshal(resp.Data, &info)
- if err != nil {
- return nil, err
- }
- if info.Error != nil {
- if errors.Is(info.Error, ErrStreamNotFound) {
- return nil, ErrStreamNotFound
- }
- if errors.Is(info.Error, ErrConsumerNotFound) {
- return nil, ErrConsumerNotFound
- }
- return nil, info.Error
- }
-
- // check whether multiple filter subjects (if used) are reflected in the returned ConsumerInfo
- if len(cfg.FilterSubjects) != 0 && len(info.Config.FilterSubjects) == 0 {
- return nil, ErrConsumerMultipleFilterSubjectsNotSupported
- }
- return info.ConsumerInfo, nil
-}
-
-// consumerDeleteResponse is the response for a Consumer delete request.
-type consumerDeleteResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
-}
-
-func checkStreamName(stream string) error {
- if stream == _EMPTY_ {
- return ErrStreamNameRequired
- }
- if strings.ContainsAny(stream, ". ") {
- return ErrInvalidStreamName
- }
- return nil
-}
-
-// Check that the consumer name is not empty and is valid (does not contain "." and " ").
-// Additional consumer name validation is done in nats-server.
-// Returns ErrConsumerNameRequired if consumer name is empty, ErrInvalidConsumerName is invalid, otherwise nil
-func checkConsumerName(consumer string) error {
- if consumer == _EMPTY_ {
- return ErrConsumerNameRequired
- }
- if strings.ContainsAny(consumer, ". ") {
- return ErrInvalidConsumerName
- }
- return nil
-}
-
-// DeleteConsumer deletes a Consumer.
-func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error {
- if err := checkStreamName(stream); err != nil {
- return err
- }
- if err := checkConsumerName(consumer); err != nil {
- return err
- }
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return err
- }
- if cancel != nil {
- defer cancel()
- }
-
- dcSubj := js.apiSubj(fmt.Sprintf(apiConsumerDeleteT, stream, consumer))
- r, err := js.apiRequestWithContext(o.ctx, dcSubj, nil)
- if err != nil {
- return err
- }
- var resp consumerDeleteResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return err
- }
-
- if resp.Error != nil {
- if errors.Is(resp.Error, ErrConsumerNotFound) {
- return ErrConsumerNotFound
- }
- return resp.Error
- }
- return nil
-}
-
-// ConsumerInfo returns information about a Consumer.
-func (js *js) ConsumerInfo(stream, consumer string, opts ...JSOpt) (*ConsumerInfo, error) {
- if err := checkStreamName(stream); err != nil {
- return nil, err
- }
- if err := checkConsumerName(consumer); err != nil {
- return nil, err
- }
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return nil, err
- }
- if cancel != nil {
- defer cancel()
- }
- return js.getConsumerInfoContext(o.ctx, stream, consumer)
-}
-
-// consumerLister fetches pages of ConsumerInfo objects. This object is not
-// safe to use for multiple threads.
-type consumerLister struct {
- stream string
- js *js
-
- err error
- offset int
- page []*ConsumerInfo
- pageInfo *apiPaged
-}
-
-// consumersRequest is the type used for Consumers requests.
-type consumersRequest struct {
- apiPagedRequest
-}
-
-// consumerListResponse is the response for a Consumers List request.
-type consumerListResponse struct {
- apiResponse
- apiPaged
- Consumers []*ConsumerInfo `json:"consumers"`
-}
-
-// Next fetches the next ConsumerInfo page.
-func (c *consumerLister) Next() bool {
- if c.err != nil {
- return false
- }
- if err := checkStreamName(c.stream); err != nil {
- c.err = err
- return false
- }
- if c.pageInfo != nil && c.offset >= c.pageInfo.Total {
- return false
- }
-
- req, err := json.Marshal(consumersRequest{
- apiPagedRequest: apiPagedRequest{Offset: c.offset},
- })
- if err != nil {
- c.err = err
- return false
- }
-
- var cancel context.CancelFunc
- ctx := c.js.opts.ctx
- if ctx == nil {
- ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait)
- defer cancel()
- }
-
- clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerListT, c.stream))
- r, err := c.js.apiRequestWithContext(ctx, clSubj, req)
- if err != nil {
- c.err = err
- return false
- }
- var resp consumerListResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- c.err = err
- return false
- }
- if resp.Error != nil {
- c.err = resp.Error
- return false
- }
-
- c.pageInfo = &resp.apiPaged
- c.page = resp.Consumers
- c.offset += len(c.page)
- return true
-}
-
-// Page returns the current ConsumerInfo page.
-func (c *consumerLister) Page() []*ConsumerInfo {
- return c.page
-}
-
-// Err returns any errors found while fetching pages.
-func (c *consumerLister) Err() error {
- return c.err
-}
-
-// Consumers is used to retrieve a list of ConsumerInfo objects.
-func (jsc *js) Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo {
- o, cancel, err := getJSContextOpts(jsc.opts, opts...)
- if err != nil {
- return nil
- }
-
- ch := make(chan *ConsumerInfo)
- l := &consumerLister{js: &js{nc: jsc.nc, opts: o}, stream: stream}
- go func() {
- if cancel != nil {
- defer cancel()
- }
- defer close(ch)
- for l.Next() {
- for _, info := range l.Page() {
- select {
- case ch <- info:
- case <-o.ctx.Done():
- return
- }
- }
- }
- }()
-
- return ch
-}
-
-// ConsumersInfo is used to retrieve a list of ConsumerInfo objects.
-// DEPRECATED: Use Consumers() instead.
-func (jsc *js) ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo {
- return jsc.Consumers(stream, opts...)
-}
-
-type consumerNamesLister struct {
- stream string
- js *js
-
- err error
- offset int
- page []string
- pageInfo *apiPaged
-}
-
-// consumerNamesListResponse is the response for a Consumers Names List request.
-type consumerNamesListResponse struct {
- apiResponse
- apiPaged
- Consumers []string `json:"consumers"`
-}
-
-// Next fetches the next consumer names page.
-func (c *consumerNamesLister) Next() bool {
- if c.err != nil {
- return false
- }
- if err := checkStreamName(c.stream); err != nil {
- c.err = err
- return false
- }
- if c.pageInfo != nil && c.offset >= c.pageInfo.Total {
- return false
- }
-
- var cancel context.CancelFunc
- ctx := c.js.opts.ctx
- if ctx == nil {
- ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait)
- defer cancel()
- }
-
- req, err := json.Marshal(consumersRequest{
- apiPagedRequest: apiPagedRequest{Offset: c.offset},
- })
- if err != nil {
- c.err = err
- return false
- }
- clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerNamesT, c.stream))
- r, err := c.js.apiRequestWithContext(ctx, clSubj, req)
- if err != nil {
- c.err = err
- return false
- }
- var resp consumerNamesListResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- c.err = err
- return false
- }
- if resp.Error != nil {
- c.err = resp.Error
- return false
- }
-
- c.pageInfo = &resp.apiPaged
- c.page = resp.Consumers
- c.offset += len(c.page)
- return true
-}
-
-// Page returns the current ConsumerInfo page.
-func (c *consumerNamesLister) Page() []string {
- return c.page
-}
-
-// Err returns any errors found while fetching pages.
-func (c *consumerNamesLister) Err() error {
- return c.err
-}
-
-// ConsumerNames is used to retrieve a list of Consumer names.
-func (jsc *js) ConsumerNames(stream string, opts ...JSOpt) <-chan string {
- o, cancel, err := getJSContextOpts(jsc.opts, opts...)
- if err != nil {
- return nil
- }
-
- ch := make(chan string)
- l := &consumerNamesLister{stream: stream, js: &js{nc: jsc.nc, opts: o}}
- go func() {
- if cancel != nil {
- defer cancel()
- }
- defer close(ch)
- for l.Next() {
- for _, info := range l.Page() {
- select {
- case ch <- info:
- case <-o.ctx.Done():
- return
- }
- }
- }
- }()
-
- return ch
-}
-
-// streamCreateResponse stream creation.
-type streamCreateResponse struct {
- apiResponse
- *StreamInfo
-}
-
-func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) {
- if cfg == nil {
- return nil, ErrStreamConfigRequired
- }
- if err := checkStreamName(cfg.Name); err != nil {
- return nil, err
- }
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return nil, err
- }
- if cancel != nil {
- defer cancel()
- }
-
- // In case we need to change anything, copy so we do not change the caller's version.
- ncfg := *cfg
-
- // If we have a mirror and an external domain, convert to ext.APIPrefix.
- if cfg.Mirror != nil && cfg.Mirror.Domain != _EMPTY_ {
- // Copy so we do not change the caller's version.
- ncfg.Mirror = ncfg.Mirror.copy()
- if err := ncfg.Mirror.convertDomain(); err != nil {
- return nil, err
- }
- }
- // Check sources for the same.
- if len(ncfg.Sources) > 0 {
- ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...)
- for i, ss := range ncfg.Sources {
- if ss.Domain != _EMPTY_ {
- ncfg.Sources[i] = ss.copy()
- if err := ncfg.Sources[i].convertDomain(); err != nil {
- return nil, err
- }
- }
- }
- }
-
- req, err := json.Marshal(&ncfg)
- if err != nil {
- return nil, err
- }
-
- csSubj := js.apiSubj(fmt.Sprintf(apiStreamCreateT, cfg.Name))
- r, err := js.apiRequestWithContext(o.ctx, csSubj, req)
- if err != nil {
- return nil, err
- }
- var resp streamCreateResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if errors.Is(resp.Error, ErrStreamNameAlreadyInUse) {
- return nil, ErrStreamNameAlreadyInUse
- }
- return nil, resp.Error
- }
-
- // check that input subject transform (if used) is reflected in the returned ConsumerInfo
- if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil {
- return nil, ErrStreamSubjectTransformNotSupported
- }
- if len(cfg.Sources) != 0 {
- if len(cfg.Sources) != len(resp.Config.Sources) {
- return nil, ErrStreamSourceNotSupported
- }
- for i := range cfg.Sources {
- if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 {
- return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported
- }
- }
- }
-
- return resp.StreamInfo, nil
-}
-
-type (
- // StreamInfoRequest contains additional option to return
- StreamInfoRequest struct {
- apiPagedRequest
- // DeletedDetails when true includes information about deleted messages
- DeletedDetails bool `json:"deleted_details,omitempty"`
- // SubjectsFilter when set, returns information on the matched subjects
- SubjectsFilter string `json:"subjects_filter,omitempty"`
- }
- streamInfoResponse = struct {
- apiResponse
- apiPaged
- *StreamInfo
- }
-)
-
-func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) {
- if err := checkStreamName(stream); err != nil {
- return nil, err
- }
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return nil, err
- }
- if cancel != nil {
- defer cancel()
- }
-
- var i int
- var subjectMessagesMap map[string]uint64
- var req []byte
- var requestPayload bool
-
- var siOpts StreamInfoRequest
- if o.streamInfoOpts != nil {
- requestPayload = true
- siOpts = *o.streamInfoOpts
- }
-
- for {
- if requestPayload {
- siOpts.Offset = i
- if req, err = json.Marshal(&siOpts); err != nil {
- return nil, err
- }
- }
-
- siSubj := js.apiSubj(fmt.Sprintf(apiStreamInfoT, stream))
-
- r, err := js.apiRequestWithContext(o.ctx, siSubj, req)
- if err != nil {
- return nil, err
- }
-
- var resp streamInfoResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return nil, err
- }
-
- if resp.Error != nil {
- if errors.Is(resp.Error, ErrStreamNotFound) {
- return nil, ErrStreamNotFound
- }
- return nil, resp.Error
- }
-
- var total int
- // for backwards compatibility
- if resp.Total != 0 {
- total = resp.Total
- } else {
- total = len(resp.State.Subjects)
- }
-
- if requestPayload && len(resp.StreamInfo.State.Subjects) > 0 {
- if subjectMessagesMap == nil {
- subjectMessagesMap = make(map[string]uint64, total)
- }
-
- for k, j := range resp.State.Subjects {
- subjectMessagesMap[k] = j
- i++
- }
- }
-
- if i >= total {
- if requestPayload {
- resp.StreamInfo.State.Subjects = subjectMessagesMap
- }
- return resp.StreamInfo, nil
- }
- }
-}
-
-// StreamInfo shows config and current state for this stream.
-type StreamInfo struct {
- Config StreamConfig `json:"config"`
- Created time.Time `json:"created"`
- State StreamState `json:"state"`
- Cluster *ClusterInfo `json:"cluster,omitempty"`
- Mirror *StreamSourceInfo `json:"mirror,omitempty"`
- Sources []*StreamSourceInfo `json:"sources,omitempty"`
- Alternates []*StreamAlternate `json:"alternates,omitempty"`
-}
-
-// StreamAlternate is an alternate stream represented by a mirror.
-type StreamAlternate struct {
- Name string `json:"name"`
- Domain string `json:"domain,omitempty"`
- Cluster string `json:"cluster"`
-}
-
-// StreamSourceInfo shows information about an upstream stream source.
-type StreamSourceInfo struct {
- Name string `json:"name"`
- Lag uint64 `json:"lag"`
- Active time.Duration `json:"active"`
- External *ExternalStream `json:"external"`
- Error *APIError `json:"error"`
- FilterSubject string `json:"filter_subject,omitempty"`
- SubjectTransforms []SubjectTransformConfig `json:"subject_transforms,omitempty"`
-}
-
-// StreamState is information about the given stream.
-type StreamState struct {
- Msgs uint64 `json:"messages"`
- Bytes uint64 `json:"bytes"`
- FirstSeq uint64 `json:"first_seq"`
- FirstTime time.Time `json:"first_ts"`
- LastSeq uint64 `json:"last_seq"`
- LastTime time.Time `json:"last_ts"`
- Consumers int `json:"consumer_count"`
- Deleted []uint64 `json:"deleted"`
- NumDeleted int `json:"num_deleted"`
- NumSubjects uint64 `json:"num_subjects"`
- Subjects map[string]uint64 `json:"subjects"`
-}
-
-// ClusterInfo shows information about the underlying set of servers
-// that make up the stream or consumer.
-type ClusterInfo struct {
- Name string `json:"name,omitempty"`
- Leader string `json:"leader,omitempty"`
- Replicas []*PeerInfo `json:"replicas,omitempty"`
-}
-
-// PeerInfo shows information about all the peers in the cluster that
-// are supporting the stream or consumer.
-type PeerInfo struct {
- Name string `json:"name"`
- Current bool `json:"current"`
- Offline bool `json:"offline,omitempty"`
- Active time.Duration `json:"active"`
- Lag uint64 `json:"lag,omitempty"`
-}
-
-// UpdateStream updates a Stream.
-func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) {
- if cfg == nil {
- return nil, ErrStreamConfigRequired
- }
- if err := checkStreamName(cfg.Name); err != nil {
- return nil, err
- }
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return nil, err
- }
- if cancel != nil {
- defer cancel()
- }
-
- req, err := json.Marshal(cfg)
- if err != nil {
- return nil, err
- }
-
- usSubj := js.apiSubj(fmt.Sprintf(apiStreamUpdateT, cfg.Name))
- r, err := js.apiRequestWithContext(o.ctx, usSubj, req)
- if err != nil {
- return nil, err
- }
- var resp streamInfoResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if errors.Is(resp.Error, ErrStreamNotFound) {
- return nil, ErrStreamNotFound
- }
- return nil, resp.Error
- }
-
- // check that input subject transform (if used) is reflected in the returned StreamInfo
- if cfg.SubjectTransform != nil && resp.StreamInfo.Config.SubjectTransform == nil {
- return nil, ErrStreamSubjectTransformNotSupported
- }
-
- if len(cfg.Sources) != 0 {
- if len(cfg.Sources) != len(resp.Config.Sources) {
- return nil, ErrStreamSourceNotSupported
- }
- for i := range cfg.Sources {
- if len(cfg.Sources[i].SubjectTransforms) != 0 && len(resp.Sources[i].SubjectTransforms) == 0 {
- return nil, ErrStreamSourceMultipleSubjectTransformsNotSupported
- }
- }
- }
-
- return resp.StreamInfo, nil
-}
-
-// streamDeleteResponse is the response for a Stream delete request.
-type streamDeleteResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
-}
-
-// DeleteStream deletes a Stream.
-func (js *js) DeleteStream(name string, opts ...JSOpt) error {
- if err := checkStreamName(name); err != nil {
- return err
- }
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return err
- }
- if cancel != nil {
- defer cancel()
- }
-
- dsSubj := js.apiSubj(fmt.Sprintf(apiStreamDeleteT, name))
- r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil)
- if err != nil {
- return err
- }
- var resp streamDeleteResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return err
- }
-
- if resp.Error != nil {
- if errors.Is(resp.Error, ErrStreamNotFound) {
- return ErrStreamNotFound
- }
- return resp.Error
- }
- return nil
-}
-
-type apiMsgGetRequest struct {
- Seq uint64 `json:"seq,omitempty"`
- LastFor string `json:"last_by_subj,omitempty"`
- NextFor string `json:"next_by_subj,omitempty"`
-}
-
-// RawStreamMsg is a raw message stored in JetStream.
-type RawStreamMsg struct {
- Subject string
- Sequence uint64
- Header Header
- Data []byte
- Time time.Time
-}
-
-// storedMsg is a raw message stored in JetStream.
-type storedMsg struct {
- Subject string `json:"subject"`
- Sequence uint64 `json:"seq"`
- Header []byte `json:"hdrs,omitempty"`
- Data []byte `json:"data,omitempty"`
- Time time.Time `json:"time"`
-}
-
-// apiMsgGetResponse is the response for a Stream get request.
-type apiMsgGetResponse struct {
- apiResponse
- Message *storedMsg `json:"message,omitempty"`
-}
-
-// GetLastMsg retrieves the last raw stream message stored in JetStream by subject.
-func (js *js) GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) {
- return js.getMsg(name, &apiMsgGetRequest{LastFor: subject}, opts...)
-}
-
-// GetMsg retrieves a raw stream message stored in JetStream by sequence number.
-func (js *js) GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) {
- return js.getMsg(name, &apiMsgGetRequest{Seq: seq}, opts...)
-}
-
-// Low level getMsg
-func (js *js) getMsg(name string, mreq *apiMsgGetRequest, opts ...JSOpt) (*RawStreamMsg, error) {
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return nil, err
- }
- if cancel != nil {
- defer cancel()
- }
-
- if err := checkStreamName(name); err != nil {
- return nil, err
- }
-
- var apiSubj string
- if o.directGet && mreq.LastFor != _EMPTY_ {
- apiSubj = apiDirectMsgGetLastBySubjectT
- dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name, mreq.LastFor))
- r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil)
- if err != nil {
- return nil, err
- }
- return convertDirectGetMsgResponseToMsg(name, r)
- }
-
- if o.directGet {
- apiSubj = apiDirectMsgGetT
- mreq.NextFor = o.directNextFor
- } else {
- apiSubj = apiMsgGetT
- }
-
- req, err := json.Marshal(mreq)
- if err != nil {
- return nil, err
- }
-
- dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name))
- r, err := js.apiRequestWithContext(o.ctx, dsSubj, req)
- if err != nil {
- return nil, err
- }
-
- if o.directGet {
- return convertDirectGetMsgResponseToMsg(name, r)
- }
-
- var resp apiMsgGetResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return nil, err
- }
- if resp.Error != nil {
- if errors.Is(resp.Error, ErrMsgNotFound) {
- return nil, ErrMsgNotFound
- }
- if errors.Is(resp.Error, ErrStreamNotFound) {
- return nil, ErrStreamNotFound
- }
- return nil, resp.Error
- }
-
- msg := resp.Message
-
- var hdr Header
- if len(msg.Header) > 0 {
- hdr, err = DecodeHeadersMsg(msg.Header)
- if err != nil {
- return nil, err
- }
- }
-
- return &RawStreamMsg{
- Subject: msg.Subject,
- Sequence: msg.Sequence,
- Header: hdr,
- Data: msg.Data,
- Time: msg.Time,
- }, nil
-}
-
-func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error) {
- // Check for 404/408. We would get a no-payload message and a "Status" header
- if len(r.Data) == 0 {
- val := r.Header.Get(statusHdr)
- if val != _EMPTY_ {
- switch val {
- case noMessagesSts:
- return nil, ErrMsgNotFound
- default:
- desc := r.Header.Get(descrHdr)
- if desc == _EMPTY_ {
- desc = "unable to get message"
- }
- return nil, fmt.Errorf("nats: %s", desc)
- }
- }
- }
- // Check for headers that give us the required information to
- // reconstruct the message.
- if len(r.Header) == 0 {
- return nil, fmt.Errorf("nats: response should have headers")
- }
- stream := r.Header.Get(JSStream)
- if stream == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing stream header")
- }
-
- // Mirrors can now answer direct gets, so removing check for name equality.
- // TODO(dlc) - We could have server also have a header with origin and check that?
-
- seqStr := r.Header.Get(JSSequence)
- if seqStr == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing sequence header")
- }
- seq, err := strconv.ParseUint(seqStr, 10, 64)
- if err != nil {
- return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err)
- }
- timeStr := r.Header.Get(JSTimeStamp)
- if timeStr == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing timestamp header")
- }
- // Temporary code: the server in main branch is sending with format
- // "2006-01-02 15:04:05.999999999 +0000 UTC", but will be changed
- // to use format RFC3339Nano. Because of server test deps/cycle,
- // support both until the server PR lands.
- tm, err := time.Parse(time.RFC3339Nano, timeStr)
- if err != nil {
- tm, err = time.Parse("2006-01-02 15:04:05.999999999 +0000 UTC", timeStr)
- if err != nil {
- return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err)
- }
- }
- subj := r.Header.Get(JSSubject)
- if subj == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing subject header")
- }
- return &RawStreamMsg{
- Subject: subj,
- Sequence: seq,
- Header: r.Header,
- Data: r.Data,
- Time: tm,
- }, nil
-}
-
-type msgDeleteRequest struct {
- Seq uint64 `json:"seq"`
- NoErase bool `json:"no_erase,omitempty"`
-}
-
-// msgDeleteResponse is the response for a Stream delete request.
-type msgDeleteResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
-}
-
-// DeleteMsg deletes a message from a stream.
-// The message is marked as erased, but not overwritten
-func (js *js) DeleteMsg(name string, seq uint64, opts ...JSOpt) error {
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return err
- }
- if cancel != nil {
- defer cancel()
- }
-
- return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq, NoErase: true})
-}
-
-// SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data
-// As a result, this operation is slower than DeleteMsg()
-func (js *js) SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error {
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return err
- }
- if cancel != nil {
- defer cancel()
- }
-
- return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq})
-}
-
-func (js *js) deleteMsg(ctx context.Context, stream string, req *msgDeleteRequest) error {
- if err := checkStreamName(stream); err != nil {
- return err
- }
- reqJSON, err := json.Marshal(req)
- if err != nil {
- return err
- }
-
- dsSubj := js.apiSubj(fmt.Sprintf(apiMsgDeleteT, stream))
- r, err := js.apiRequestWithContext(ctx, dsSubj, reqJSON)
- if err != nil {
- return err
- }
- var resp msgDeleteResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return err
- }
- if resp.Error != nil {
- return resp.Error
- }
- return nil
-}
-
-// StreamPurgeRequest is optional request information to the purge API.
-type StreamPurgeRequest struct {
- // Purge up to but not including sequence.
- Sequence uint64 `json:"seq,omitempty"`
- // Subject to match against messages for the purge command.
- Subject string `json:"filter,omitempty"`
- // Number of messages to keep.
- Keep uint64 `json:"keep,omitempty"`
-}
-
-type streamPurgeResponse struct {
- apiResponse
- Success bool `json:"success,omitempty"`
- Purged uint64 `json:"purged"`
-}
-
-// PurgeStream purges messages on a Stream.
-func (js *js) PurgeStream(stream string, opts ...JSOpt) error {
- if err := checkStreamName(stream); err != nil {
- return err
- }
- var req *StreamPurgeRequest
- var ok bool
- for _, opt := range opts {
- // For PurgeStream, only request body opt is relevant
- if req, ok = opt.(*StreamPurgeRequest); ok {
- break
- }
- }
- return js.purgeStream(stream, req)
-}
-
-func (js *js) purgeStream(stream string, req *StreamPurgeRequest, opts ...JSOpt) error {
- o, cancel, err := getJSContextOpts(js.opts, opts...)
- if err != nil {
- return err
- }
- if cancel != nil {
- defer cancel()
- }
-
- var b []byte
- if req != nil {
- if b, err = json.Marshal(req); err != nil {
- return err
- }
- }
-
- psSubj := js.apiSubj(fmt.Sprintf(apiStreamPurgeT, stream))
- r, err := js.apiRequestWithContext(o.ctx, psSubj, b)
- if err != nil {
- return err
- }
- var resp streamPurgeResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- return err
- }
- if resp.Error != nil {
- if errors.Is(resp.Error, ErrBadRequest) {
- return fmt.Errorf("%w: %s", ErrBadRequest, "invalid purge request body")
- }
- return resp.Error
- }
- return nil
-}
-
-// streamLister fetches pages of StreamInfo objects. This object is not safe
-// to use for multiple threads.
-type streamLister struct {
- js *js
- page []*StreamInfo
- err error
-
- offset int
- pageInfo *apiPaged
-}
-
-// streamListResponse list of detailed stream information.
-// A nil request is valid and means all streams.
-type streamListResponse struct {
- apiResponse
- apiPaged
- Streams []*StreamInfo `json:"streams"`
-}
-
-// streamNamesRequest is used for Stream Name requests.
-type streamNamesRequest struct {
- apiPagedRequest
- // These are filters that can be applied to the list.
- Subject string `json:"subject,omitempty"`
-}
-
-// Next fetches the next StreamInfo page.
-func (s *streamLister) Next() bool {
- if s.err != nil {
- return false
- }
- if s.pageInfo != nil && s.offset >= s.pageInfo.Total {
- return false
- }
-
- req, err := json.Marshal(streamNamesRequest{
- apiPagedRequest: apiPagedRequest{Offset: s.offset},
- Subject: s.js.opts.streamListSubject,
- })
- if err != nil {
- s.err = err
- return false
- }
-
- var cancel context.CancelFunc
- ctx := s.js.opts.ctx
- if ctx == nil {
- ctx, cancel = context.WithTimeout(context.Background(), s.js.opts.wait)
- defer cancel()
- }
-
- slSubj := s.js.apiSubj(apiStreamListT)
- r, err := s.js.apiRequestWithContext(ctx, slSubj, req)
- if err != nil {
- s.err = err
- return false
- }
- var resp streamListResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- s.err = err
- return false
- }
- if resp.Error != nil {
- s.err = resp.Error
- return false
- }
-
- s.pageInfo = &resp.apiPaged
- s.page = resp.Streams
- s.offset += len(s.page)
- return true
-}
-
-// Page returns the current StreamInfo page.
-func (s *streamLister) Page() []*StreamInfo {
- return s.page
-}
-
-// Err returns any errors found while fetching pages.
-func (s *streamLister) Err() error {
- return s.err
-}
-
-// Streams can be used to retrieve a list of StreamInfo objects.
-func (jsc *js) Streams(opts ...JSOpt) <-chan *StreamInfo {
- o, cancel, err := getJSContextOpts(jsc.opts, opts...)
- if err != nil {
- return nil
- }
-
- ch := make(chan *StreamInfo)
- l := &streamLister{js: &js{nc: jsc.nc, opts: o}}
- go func() {
- if cancel != nil {
- defer cancel()
- }
- defer close(ch)
- for l.Next() {
- for _, info := range l.Page() {
- select {
- case ch <- info:
- case <-o.ctx.Done():
- return
- }
- }
- }
- }()
-
- return ch
-}
-
-// StreamsInfo can be used to retrieve a list of StreamInfo objects.
-// DEPRECATED: Use Streams() instead.
-func (jsc *js) StreamsInfo(opts ...JSOpt) <-chan *StreamInfo {
- return jsc.Streams(opts...)
-}
-
-type streamNamesLister struct {
- js *js
-
- err error
- offset int
- page []string
- pageInfo *apiPaged
-}
-
-// Next fetches the next stream names page.
-func (l *streamNamesLister) Next() bool {
- if l.err != nil {
- return false
- }
- if l.pageInfo != nil && l.offset >= l.pageInfo.Total {
- return false
- }
-
- var cancel context.CancelFunc
- ctx := l.js.opts.ctx
- if ctx == nil {
- ctx, cancel = context.WithTimeout(context.Background(), l.js.opts.wait)
- defer cancel()
- }
-
- req, err := json.Marshal(streamNamesRequest{
- apiPagedRequest: apiPagedRequest{Offset: l.offset},
- Subject: l.js.opts.streamListSubject,
- })
- if err != nil {
- l.err = err
- return false
- }
- r, err := l.js.apiRequestWithContext(ctx, l.js.apiSubj(apiStreams), req)
- if err != nil {
- l.err = err
- return false
- }
- var resp streamNamesResponse
- if err := json.Unmarshal(r.Data, &resp); err != nil {
- l.err = err
- return false
- }
- if resp.Error != nil {
- l.err = resp.Error
- return false
- }
-
- l.pageInfo = &resp.apiPaged
- l.page = resp.Streams
- l.offset += len(l.page)
- return true
-}
-
-// Page returns the current ConsumerInfo page.
-func (l *streamNamesLister) Page() []string {
- return l.page
-}
-
-// Err returns any errors found while fetching pages.
-func (l *streamNamesLister) Err() error {
- return l.err
-}
-
-// StreamNames is used to retrieve a list of Stream names.
-func (jsc *js) StreamNames(opts ...JSOpt) <-chan string {
- o, cancel, err := getJSContextOpts(jsc.opts, opts...)
- if err != nil {
- return nil
- }
-
- ch := make(chan string)
- l := &streamNamesLister{js: &js{nc: jsc.nc, opts: o}}
- go func() {
- if cancel != nil {
- defer cancel()
- }
- defer close(ch)
- for l.Next() {
- for _, info := range l.Page() {
- select {
- case ch <- info:
- case <-o.ctx.Done():
- return
- }
- }
- }
- }()
-
- return ch
-}
-
-// StreamNameBySubject returns a stream name that matches the subject.
-func (jsc *js) StreamNameBySubject(subj string, opts ...JSOpt) (string, error) {
- o, cancel, err := getJSContextOpts(jsc.opts, opts...)
- if err != nil {
- return "", err
- }
- if cancel != nil {
- defer cancel()
- }
-
- var slr streamNamesResponse
- req := &streamRequest{subj}
- j, err := json.Marshal(req)
- if err != nil {
- return _EMPTY_, err
- }
-
- resp, err := jsc.apiRequestWithContext(o.ctx, jsc.apiSubj(apiStreams), j)
- if err != nil {
- if errors.Is(err, ErrNoResponders) {
- err = ErrJetStreamNotEnabled
- }
- return _EMPTY_, err
- }
- if err := json.Unmarshal(resp.Data, &slr); err != nil {
- return _EMPTY_, err
- }
-
- if slr.Error != nil || len(slr.Streams) != 1 {
- return _EMPTY_, ErrNoMatchingStream
- }
- return slr.Streams[0], nil
-}
-
-func getJSContextOpts(defs *jsOpts, opts ...JSOpt) (*jsOpts, context.CancelFunc, error) {
- var o jsOpts
- for _, opt := range opts {
- if err := opt.configureJSContext(&o); err != nil {
- return nil, nil, err
- }
- }
-
- // Check for option collisions. Right now just timeout and context.
- if o.ctx != nil && o.wait != 0 {
- return nil, nil, ErrContextAndTimeout
- }
- if o.wait == 0 && o.ctx == nil {
- o.wait = defs.wait
- }
- var cancel context.CancelFunc
- if o.ctx == nil && o.wait > 0 {
- o.ctx, cancel = context.WithTimeout(context.Background(), o.wait)
- }
- if o.pre == _EMPTY_ {
- o.pre = defs.pre
- }
-
- return &o, cancel, nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/kv.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/kv.go
deleted file mode 100644
index d9f40fd..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/kv.go
+++ /dev/null
@@ -1,1196 +0,0 @@
-// Copyright 2021-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "context"
- "errors"
- "fmt"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/nats-io/nats.go/internal/parser"
-)
-
-// KeyValueManager is used to manage KeyValue stores.
-type KeyValueManager interface {
- // KeyValue will lookup and bind to an existing KeyValue store.
- KeyValue(bucket string) (KeyValue, error)
- // CreateKeyValue will create a KeyValue store with the following configuration.
- CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error)
- // DeleteKeyValue will delete this KeyValue store (JetStream stream).
- DeleteKeyValue(bucket string) error
- // KeyValueStoreNames is used to retrieve a list of key value store names
- KeyValueStoreNames() <-chan string
- // KeyValueStores is used to retrieve a list of key value store statuses
- KeyValueStores() <-chan KeyValueStatus
-}
-
-// KeyValue contains methods to operate on a KeyValue store.
-type KeyValue interface {
- // Get returns the latest value for the key.
- Get(key string) (entry KeyValueEntry, err error)
- // GetRevision returns a specific revision value for the key.
- GetRevision(key string, revision uint64) (entry KeyValueEntry, err error)
- // Put will place the new value for the key into the store.
- Put(key string, value []byte) (revision uint64, err error)
- // PutString will place the string for the key into the store.
- PutString(key string, value string) (revision uint64, err error)
- // Create will add the key/value pair iff it does not exist.
- Create(key string, value []byte) (revision uint64, err error)
- // Update will update the value iff the latest revision matches.
- Update(key string, value []byte, last uint64) (revision uint64, err error)
- // Delete will place a delete marker and leave all revisions.
- Delete(key string, opts ...DeleteOpt) error
- // Purge will place a delete marker and remove all previous revisions.
- Purge(key string, opts ...DeleteOpt) error
- // Watch for any updates to keys that match the keys argument which could include wildcards.
- // Watch will send a nil entry when it has received all initial values.
- Watch(keys string, opts ...WatchOpt) (KeyWatcher, error)
- // WatchAll will invoke the callback for all updates.
- WatchAll(opts ...WatchOpt) (KeyWatcher, error)
- // Keys will return all keys.
- // DEPRECATED: Use ListKeys instead to avoid memory issues.
- Keys(opts ...WatchOpt) ([]string, error)
- // ListKeys will return all keys in a channel.
- ListKeys(opts ...WatchOpt) (KeyLister, error)
- // History will return all historical values for the key.
- History(key string, opts ...WatchOpt) ([]KeyValueEntry, error)
- // Bucket returns the current bucket name.
- Bucket() string
- // PurgeDeletes will remove all current delete markers.
- PurgeDeletes(opts ...PurgeOpt) error
- // Status retrieves the status and configuration of a bucket
- Status() (KeyValueStatus, error)
-}
-
-// KeyValueStatus is run-time status about a Key-Value bucket
-type KeyValueStatus interface {
- // Bucket the name of the bucket
- Bucket() string
-
- // Values is how many messages are in the bucket, including historical values
- Values() uint64
-
- // History returns the configured history kept per key
- History() int64
-
- // TTL is how long the bucket keeps values for
- TTL() time.Duration
-
- // BackingStore indicates what technology is used for storage of the bucket
- BackingStore() string
-
- // Bytes returns the size in bytes of the bucket
- Bytes() uint64
-
- // IsCompressed indicates if the data is compressed on disk
- IsCompressed() bool
-}
-
-// KeyWatcher is what is returned when doing a watch.
-type KeyWatcher interface {
- // Context returns watcher context optionally provided by nats.Context option.
- Context() context.Context
- // Updates returns a channel to read any updates to entries.
- Updates() <-chan KeyValueEntry
- // Stop will stop this watcher.
- Stop() error
-}
-
-// KeyLister is used to retrieve a list of key value store keys
-type KeyLister interface {
- Keys() <-chan string
- Stop() error
-}
-
-type WatchOpt interface {
- configureWatcher(opts *watchOpts) error
-}
-
-// For nats.Context() support.
-func (ctx ContextOpt) configureWatcher(opts *watchOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-type watchOpts struct {
- ctx context.Context
- // Do not send delete markers to the update channel.
- ignoreDeletes bool
- // Include all history per subject, not just last one.
- includeHistory bool
- // Include only updates for keys.
- updatesOnly bool
- // retrieve only the meta data of the entry
- metaOnly bool
-}
-
-type watchOptFn func(opts *watchOpts) error
-
-func (opt watchOptFn) configureWatcher(opts *watchOpts) error {
- return opt(opts)
-}
-
-// IncludeHistory instructs the key watcher to include historical values as well.
-func IncludeHistory() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- if opts.updatesOnly {
- return errors.New("nats: include history can not be used with updates only")
- }
- opts.includeHistory = true
- return nil
- })
-}
-
-// UpdatesOnly instructs the key watcher to only include updates on values (without latest values when started).
-func UpdatesOnly() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- if opts.includeHistory {
- return errors.New("nats: updates only can not be used with include history")
- }
- opts.updatesOnly = true
- return nil
- })
-}
-
-// IgnoreDeletes will have the key watcher not pass any deleted keys.
-func IgnoreDeletes() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- opts.ignoreDeletes = true
- return nil
- })
-}
-
-// MetaOnly instructs the key watcher to retrieve only the entry meta data, not the entry value
-func MetaOnly() WatchOpt {
- return watchOptFn(func(opts *watchOpts) error {
- opts.metaOnly = true
- return nil
- })
-}
-
-type PurgeOpt interface {
- configurePurge(opts *purgeOpts) error
-}
-
-type purgeOpts struct {
- dmthr time.Duration // Delete markers threshold
- ctx context.Context
-}
-
-// DeleteMarkersOlderThan indicates that delete or purge markers older than that
-// will be deleted as part of PurgeDeletes() operation, otherwise, only the data
-// will be removed but markers that are recent will be kept.
-// Note that if no option is specified, the default is 30 minutes. You can set
-// this option to a negative value to instruct to always remove the markers,
-// regardless of their age.
-type DeleteMarkersOlderThan time.Duration
-
-func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error {
- opts.dmthr = time.Duration(ttl)
- return nil
-}
-
-// For nats.Context() support.
-func (ctx ContextOpt) configurePurge(opts *purgeOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-type DeleteOpt interface {
- configureDelete(opts *deleteOpts) error
-}
-
-type deleteOpts struct {
- // Remove all previous revisions.
- purge bool
-
- // Delete only if the latest revision matches.
- revision uint64
-}
-
-type deleteOptFn func(opts *deleteOpts) error
-
-func (opt deleteOptFn) configureDelete(opts *deleteOpts) error {
- return opt(opts)
-}
-
-// LastRevision deletes if the latest revision matches.
-func LastRevision(revision uint64) DeleteOpt {
- return deleteOptFn(func(opts *deleteOpts) error {
- opts.revision = revision
- return nil
- })
-}
-
-// purge removes all previous revisions.
-func purge() DeleteOpt {
- return deleteOptFn(func(opts *deleteOpts) error {
- opts.purge = true
- return nil
- })
-}
-
-// KeyValueConfig is for configuring a KeyValue store.
-type KeyValueConfig struct {
- Bucket string `json:"bucket"`
- Description string `json:"description,omitempty"`
- MaxValueSize int32 `json:"max_value_size,omitempty"`
- History uint8 `json:"history,omitempty"`
- TTL time.Duration `json:"ttl,omitempty"`
- MaxBytes int64 `json:"max_bytes,omitempty"`
- Storage StorageType `json:"storage,omitempty"`
- Replicas int `json:"num_replicas,omitempty"`
- Placement *Placement `json:"placement,omitempty"`
- RePublish *RePublish `json:"republish,omitempty"`
- Mirror *StreamSource `json:"mirror,omitempty"`
- Sources []*StreamSource `json:"sources,omitempty"`
-
- // Enable underlying stream compression.
- // NOTE: Compression is supported for nats-server 2.10.0+
- Compression bool `json:"compression,omitempty"`
-}
-
-// Used to watch all keys.
-const (
- KeyValueMaxHistory = 64
- AllKeys = ">"
- kvLatestRevision = 0
- kvop = "KV-Operation"
- kvdel = "DEL"
- kvpurge = "PURGE"
-)
-
-type KeyValueOp uint8
-
-const (
- KeyValuePut KeyValueOp = iota
- KeyValueDelete
- KeyValuePurge
-)
-
-func (op KeyValueOp) String() string {
- switch op {
- case KeyValuePut:
- return "KeyValuePutOp"
- case KeyValueDelete:
- return "KeyValueDeleteOp"
- case KeyValuePurge:
- return "KeyValuePurgeOp"
- default:
- return "Unknown Operation"
- }
-}
-
-// KeyValueEntry is a retrieved entry for Get or List or Watch.
-type KeyValueEntry interface {
- // Bucket is the bucket the data was loaded from.
- Bucket() string
- // Key is the key that was retrieved.
- Key() string
- // Value is the retrieved value.
- Value() []byte
- // Revision is a unique sequence for this value.
- Revision() uint64
- // Created is the time the data was put in the bucket.
- Created() time.Time
- // Delta is distance from the latest value.
- Delta() uint64
- // Operation returns Put or Delete or Purge.
- Operation() KeyValueOp
-}
-
-// Errors
-var (
- ErrKeyValueConfigRequired = errors.New("nats: config required")
- ErrInvalidBucketName = errors.New("nats: invalid bucket name")
- ErrInvalidKey = errors.New("nats: invalid key")
- ErrBucketNotFound = errors.New("nats: bucket not found")
- ErrBadBucket = errors.New("nats: bucket not valid key-value store")
- ErrKeyNotFound = errors.New("nats: key not found")
- ErrKeyDeleted = errors.New("nats: key was deleted")
- ErrHistoryToLarge = errors.New("nats: history limited to a max of 64")
- ErrNoKeysFound = errors.New("nats: no keys found")
-)
-
-var (
- ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"}
-)
-
-const (
- kvBucketNamePre = "KV_"
- kvBucketNameTmpl = "KV_%s"
- kvSubjectsTmpl = "$KV.%s.>"
- kvSubjectsPreTmpl = "$KV.%s."
- kvSubjectsPreDomainTmpl = "%s.$KV.%s."
- kvNoPending = "0"
-)
-
-// Regex for valid keys and buckets.
-var (
- validBucketRe = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`)
- validKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9]+$`)
- validSearchKeyRe = regexp.MustCompile(`^[-/_=\.a-zA-Z0-9*]*[>]?$`)
-)
-
-// KeyValue will lookup and bind to an existing KeyValue store.
-func (js *js) KeyValue(bucket string) (KeyValue, error) {
- if !js.nc.serverMinVersion(2, 6, 2) {
- return nil, errors.New("nats: key-value requires at least server version 2.6.2")
- }
- if !bucketValid(bucket) {
- return nil, ErrInvalidBucketName
- }
- stream := fmt.Sprintf(kvBucketNameTmpl, bucket)
- si, err := js.StreamInfo(stream)
- if err != nil {
- if errors.Is(err, ErrStreamNotFound) {
- err = ErrBucketNotFound
- }
- return nil, err
- }
- // Do some quick sanity checks that this is a correctly formed stream for KV.
- // Max msgs per subject should be > 0.
- if si.Config.MaxMsgsPerSubject < 1 {
- return nil, ErrBadBucket
- }
-
- return mapStreamToKVS(js, si), nil
-}
-
-// CreateKeyValue will create a KeyValue store with the following configuration.
-func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) {
- if !js.nc.serverMinVersion(2, 6, 2) {
- return nil, errors.New("nats: key-value requires at least server version 2.6.2")
- }
- if cfg == nil {
- return nil, ErrKeyValueConfigRequired
- }
- if !bucketValid(cfg.Bucket) {
- return nil, ErrInvalidBucketName
- }
- if _, err := js.AccountInfo(); err != nil {
- return nil, err
- }
-
- // Default to 1 for history. Max is 64 for now.
- history := int64(1)
- if cfg.History > 0 {
- if cfg.History > KeyValueMaxHistory {
- return nil, ErrHistoryToLarge
- }
- history = int64(cfg.History)
- }
-
- replicas := cfg.Replicas
- if replicas == 0 {
- replicas = 1
- }
-
- // We will set explicitly some values so that we can do comparison
- // if we get an "already in use" error and need to check if it is same.
- maxBytes := cfg.MaxBytes
- if maxBytes == 0 {
- maxBytes = -1
- }
- maxMsgSize := cfg.MaxValueSize
- if maxMsgSize == 0 {
- maxMsgSize = -1
- }
- // When stream's MaxAge is not set, server uses 2 minutes as the default
- // for the duplicate window. If MaxAge is set, and lower than 2 minutes,
- // then the duplicate window will be set to that. If MaxAge is greater,
- // we will cap the duplicate window to 2 minutes (to be consistent with
- // previous behavior).
- duplicateWindow := 2 * time.Minute
- if cfg.TTL > 0 && cfg.TTL < duplicateWindow {
- duplicateWindow = cfg.TTL
- }
- var compression StoreCompression
- if cfg.Compression {
- compression = S2Compression
- }
- scfg := &StreamConfig{
- Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket),
- Description: cfg.Description,
- MaxMsgsPerSubject: history,
- MaxBytes: maxBytes,
- MaxAge: cfg.TTL,
- MaxMsgSize: maxMsgSize,
- Storage: cfg.Storage,
- Replicas: replicas,
- Placement: cfg.Placement,
- AllowRollup: true,
- DenyDelete: true,
- Duplicates: duplicateWindow,
- MaxMsgs: -1,
- MaxConsumers: -1,
- AllowDirect: true,
- RePublish: cfg.RePublish,
- Compression: compression,
- }
- if cfg.Mirror != nil {
- // Copy in case we need to make changes so we do not change caller's version.
- m := cfg.Mirror.copy()
- if !strings.HasPrefix(m.Name, kvBucketNamePre) {
- m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name)
- }
- scfg.Mirror = m
- scfg.MirrorDirect = true
- } else if len(cfg.Sources) > 0 {
- for _, ss := range cfg.Sources {
- var sourceBucketName string
- if strings.HasPrefix(ss.Name, kvBucketNamePre) {
- sourceBucketName = ss.Name[len(kvBucketNamePre):]
- } else {
- sourceBucketName = ss.Name
- ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name)
- }
-
- if ss.External == nil || sourceBucketName != cfg.Bucket {
- ss.SubjectTransforms = []SubjectTransformConfig{{Source: fmt.Sprintf(kvSubjectsTmpl, sourceBucketName), Destination: fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}}
- }
- scfg.Sources = append(scfg.Sources, ss)
- }
- scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}
- } else {
- scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)}
- }
-
- // If we are at server version 2.7.2 or above use DiscardNew. We can not use DiscardNew for 2.7.1 or below.
- if js.nc.serverMinVersion(2, 7, 2) {
- scfg.Discard = DiscardNew
- }
-
- si, err := js.AddStream(scfg)
- if err != nil {
- // If we have a failure to add, it could be because we have
- // a config change if the KV was created against a pre 2.7.2
- // and we are now moving to a v2.7.2+. If that is the case
- // and the only difference is the discard policy, then update
- // the stream.
- // The same logic applies for KVs created pre 2.9.x and
- // the AllowDirect setting.
- if errors.Is(err, ErrStreamNameAlreadyInUse) {
- if si, _ = js.StreamInfo(scfg.Name); si != nil {
- // To compare, make the server's stream info discard
- // policy same than ours.
- si.Config.Discard = scfg.Discard
- // Also need to set allow direct for v2.9.x+
- si.Config.AllowDirect = scfg.AllowDirect
- if reflect.DeepEqual(&si.Config, scfg) {
- si, err = js.UpdateStream(scfg)
- }
- }
- }
- if err != nil {
- return nil, err
- }
- }
- return mapStreamToKVS(js, si), nil
-}
-
-// DeleteKeyValue will delete this KeyValue store (JetStream stream).
-func (js *js) DeleteKeyValue(bucket string) error {
- if !bucketValid(bucket) {
- return ErrInvalidBucketName
- }
- stream := fmt.Sprintf(kvBucketNameTmpl, bucket)
- return js.DeleteStream(stream)
-}
-
-type kvs struct {
- name string
- stream string
- pre string
- putPre string
- js *js
- // If true, it means that APIPrefix/Domain was set in the context
- // and we need to add something to some of our high level protocols
- // (such as Put, etc..)
- useJSPfx bool
- // To know if we can use the stream direct get API
- useDirect bool
-}
-
-// Underlying entry.
-type kve struct {
- bucket string
- key string
- value []byte
- revision uint64
- delta uint64
- created time.Time
- op KeyValueOp
-}
-
-func (e *kve) Bucket() string { return e.bucket }
-func (e *kve) Key() string { return e.key }
-func (e *kve) Value() []byte { return e.value }
-func (e *kve) Revision() uint64 { return e.revision }
-func (e *kve) Created() time.Time { return e.created }
-func (e *kve) Delta() uint64 { return e.delta }
-func (e *kve) Operation() KeyValueOp { return e.op }
-
-func bucketValid(bucket string) bool {
- if len(bucket) == 0 {
- return false
- }
- return validBucketRe.MatchString(bucket)
-}
-
-func keyValid(key string) bool {
- if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' {
- return false
- }
- return validKeyRe.MatchString(key)
-}
-
-func searchKeyValid(key string) bool {
- if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' {
- return false
- }
- return validSearchKeyRe.MatchString(key)
-}
-
-// Get returns the latest value for the key.
-func (kv *kvs) Get(key string) (KeyValueEntry, error) {
- e, err := kv.get(key, kvLatestRevision)
- if err != nil {
- if errors.Is(err, ErrKeyDeleted) {
- return nil, ErrKeyNotFound
- }
- return nil, err
- }
-
- return e, nil
-}
-
-// GetRevision returns a specific revision value for the key.
-func (kv *kvs) GetRevision(key string, revision uint64) (KeyValueEntry, error) {
- e, err := kv.get(key, revision)
- if err != nil {
- if errors.Is(err, ErrKeyDeleted) {
- return nil, ErrKeyNotFound
- }
- return nil, err
- }
-
- return e, nil
-}
-
-func (kv *kvs) get(key string, revision uint64) (KeyValueEntry, error) {
- if !keyValid(key) {
- return nil, ErrInvalidKey
- }
-
- var b strings.Builder
- b.WriteString(kv.pre)
- b.WriteString(key)
-
- var m *RawStreamMsg
- var err error
- var _opts [1]JSOpt
- opts := _opts[:0]
- if kv.useDirect {
- opts = append(opts, DirectGet())
- }
-
- if revision == kvLatestRevision {
- m, err = kv.js.GetLastMsg(kv.stream, b.String(), opts...)
- } else {
- m, err = kv.js.GetMsg(kv.stream, revision, opts...)
- // If a sequence was provided, just make sure that the retrieved
- // message subject matches the request.
- if err == nil && m.Subject != b.String() {
- return nil, ErrKeyNotFound
- }
- }
- if err != nil {
- if errors.Is(err, ErrMsgNotFound) {
- err = ErrKeyNotFound
- }
- return nil, err
- }
-
- entry := &kve{
- bucket: kv.name,
- key: key,
- value: m.Data,
- revision: m.Sequence,
- created: m.Time,
- }
-
- // Double check here that this is not a DEL Operation marker.
- if len(m.Header) > 0 {
- switch m.Header.Get(kvop) {
- case kvdel:
- entry.op = KeyValueDelete
- return entry, ErrKeyDeleted
- case kvpurge:
- entry.op = KeyValuePurge
- return entry, ErrKeyDeleted
- }
- }
-
- return entry, nil
-}
-
-// Put will place the new value for the key into the store.
-func (kv *kvs) Put(key string, value []byte) (revision uint64, err error) {
- if !keyValid(key) {
- return 0, ErrInvalidKey
- }
-
- var b strings.Builder
- if kv.useJSPfx {
- b.WriteString(kv.js.opts.pre)
- }
- if kv.putPre != _EMPTY_ {
- b.WriteString(kv.putPre)
- } else {
- b.WriteString(kv.pre)
- }
- b.WriteString(key)
-
- pa, err := kv.js.Publish(b.String(), value)
- if err != nil {
- return 0, err
- }
- return pa.Sequence, err
-}
-
-// PutString will place the string for the key into the store.
-func (kv *kvs) PutString(key string, value string) (revision uint64, err error) {
- return kv.Put(key, []byte(value))
-}
-
-// Create will add the key/value pair if it does not exist.
-func (kv *kvs) Create(key string, value []byte) (revision uint64, err error) {
- v, err := kv.Update(key, value, 0)
- if err == nil {
- return v, nil
- }
-
- // TODO(dlc) - Since we have tombstones for DEL ops for watchers, this could be from that
- // so we need to double check.
- if e, err := kv.get(key, kvLatestRevision); errors.Is(err, ErrKeyDeleted) {
- return kv.Update(key, value, e.Revision())
- }
-
- // Check if the expected last subject sequence is not zero which implies
- // the key already exists.
- if errors.Is(err, ErrKeyExists) {
- jserr := ErrKeyExists.(*jsError)
- return 0, fmt.Errorf("%w: %s", err, jserr.message)
- }
-
- return 0, err
-}
-
-// Update will update the value if the latest revision matches.
-func (kv *kvs) Update(key string, value []byte, revision uint64) (uint64, error) {
- if !keyValid(key) {
- return 0, ErrInvalidKey
- }
-
- var b strings.Builder
- if kv.useJSPfx {
- b.WriteString(kv.js.opts.pre)
- }
- b.WriteString(kv.pre)
- b.WriteString(key)
-
- m := Msg{Subject: b.String(), Header: Header{}, Data: value}
- m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(revision, 10))
-
- pa, err := kv.js.PublishMsg(&m)
- if err != nil {
- return 0, err
- }
- return pa.Sequence, err
-}
-
-// Delete will place a delete marker and leave all revisions.
-func (kv *kvs) Delete(key string, opts ...DeleteOpt) error {
- if !keyValid(key) {
- return ErrInvalidKey
- }
-
- var b strings.Builder
- if kv.useJSPfx {
- b.WriteString(kv.js.opts.pre)
- }
- if kv.putPre != _EMPTY_ {
- b.WriteString(kv.putPre)
- } else {
- b.WriteString(kv.pre)
- }
- b.WriteString(key)
-
- // DEL op marker. For watch functionality.
- m := NewMsg(b.String())
-
- var o deleteOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureDelete(&o); err != nil {
- return err
- }
- }
- }
-
- if o.purge {
- m.Header.Set(kvop, kvpurge)
- m.Header.Set(MsgRollup, MsgRollupSubject)
- } else {
- m.Header.Set(kvop, kvdel)
- }
-
- if o.revision != 0 {
- m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.revision, 10))
- }
-
- _, err := kv.js.PublishMsg(m)
- return err
-}
-
-// Purge will remove the key and all revisions.
-func (kv *kvs) Purge(key string, opts ...DeleteOpt) error {
- return kv.Delete(key, append(opts, purge())...)
-}
-
-const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute
-
-// PurgeDeletes will remove all current delete markers.
-// This is a maintenance option if there is a larger buildup of delete markers.
-// See DeleteMarkersOlderThan() option for more information.
-func (kv *kvs) PurgeDeletes(opts ...PurgeOpt) error {
- var o purgeOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configurePurge(&o); err != nil {
- return err
- }
- }
- }
- // Transfer possible context purge option to the watcher. This is the
- // only option that matters for the PurgeDeletes() feature.
- var wopts []WatchOpt
- if o.ctx != nil {
- wopts = append(wopts, Context(o.ctx))
- }
- watcher, err := kv.WatchAll(wopts...)
- if err != nil {
- return err
- }
- defer watcher.Stop()
-
- var limit time.Time
- olderThan := o.dmthr
- // Negative value is used to instruct to always remove markers, regardless
- // of age. If set to 0 (or not set), use our default value.
- if olderThan == 0 {
- olderThan = kvDefaultPurgeDeletesMarkerThreshold
- }
- if olderThan > 0 {
- limit = time.Now().Add(-olderThan)
- }
-
- var deleteMarkers []KeyValueEntry
- for entry := range watcher.Updates() {
- if entry == nil {
- break
- }
- if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge {
- deleteMarkers = append(deleteMarkers, entry)
- }
- }
-
- var (
- pr StreamPurgeRequest
- b strings.Builder
- )
- // Do actual purges here.
- for _, entry := range deleteMarkers {
- b.WriteString(kv.pre)
- b.WriteString(entry.Key())
- pr.Subject = b.String()
- pr.Keep = 0
- if olderThan > 0 && entry.Created().After(limit) {
- pr.Keep = 1
- }
- if err := kv.js.purgeStream(kv.stream, &pr); err != nil {
- return err
- }
- b.Reset()
- }
- return nil
-}
-
-// Keys() will return all keys.
-func (kv *kvs) Keys(opts ...WatchOpt) ([]string, error) {
- opts = append(opts, IgnoreDeletes(), MetaOnly())
- watcher, err := kv.WatchAll(opts...)
- if err != nil {
- return nil, err
- }
- defer watcher.Stop()
-
- var keys []string
- for entry := range watcher.Updates() {
- if entry == nil {
- break
- }
- keys = append(keys, entry.Key())
- }
- if len(keys) == 0 {
- return nil, ErrNoKeysFound
- }
- return keys, nil
-}
-
-type keyLister struct {
- watcher KeyWatcher
- keys chan string
-}
-
-// ListKeys will return all keys.
-func (kv *kvs) ListKeys(opts ...WatchOpt) (KeyLister, error) {
- opts = append(opts, IgnoreDeletes(), MetaOnly())
- watcher, err := kv.WatchAll(opts...)
- if err != nil {
- return nil, err
- }
- kl := &keyLister{watcher: watcher, keys: make(chan string, 256)}
-
- go func() {
- defer close(kl.keys)
- defer watcher.Stop()
- for entry := range watcher.Updates() {
- if entry == nil {
- return
- }
- kl.keys <- entry.Key()
- }
- }()
- return kl, nil
-}
-
-func (kl *keyLister) Keys() <-chan string {
- return kl.keys
-}
-
-func (kl *keyLister) Stop() error {
- return kl.watcher.Stop()
-}
-
-// History will return all values for the key.
-func (kv *kvs) History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) {
- opts = append(opts, IncludeHistory())
- watcher, err := kv.Watch(key, opts...)
- if err != nil {
- return nil, err
- }
- defer watcher.Stop()
-
- var entries []KeyValueEntry
- for entry := range watcher.Updates() {
- if entry == nil {
- break
- }
- entries = append(entries, entry)
- }
- if len(entries) == 0 {
- return nil, ErrKeyNotFound
- }
- return entries, nil
-}
-
-// Implementation for Watch
-type watcher struct {
- mu sync.Mutex
- updates chan KeyValueEntry
- sub *Subscription
- initDone bool
- initPending uint64
- received uint64
- ctx context.Context
-}
-
-// Context returns the context for the watcher if set.
-func (w *watcher) Context() context.Context {
- if w == nil {
- return nil
- }
- return w.ctx
-}
-
-// Updates returns the interior channel.
-func (w *watcher) Updates() <-chan KeyValueEntry {
- if w == nil {
- return nil
- }
- return w.updates
-}
-
-// Stop will unsubscribe from the watcher.
-func (w *watcher) Stop() error {
- if w == nil {
- return nil
- }
- return w.sub.Unsubscribe()
-}
-
-// WatchAll watches all keys.
-func (kv *kvs) WatchAll(opts ...WatchOpt) (KeyWatcher, error) {
- return kv.Watch(AllKeys, opts...)
-}
-
-// Watch will fire the callback when a key that matches the keys pattern is updated.
-// keys needs to be a valid NATS subject.
-func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) {
- if !searchKeyValid(keys) {
- return nil, fmt.Errorf("%w: %s", ErrInvalidKey, "keys cannot be empty and must be a valid NATS subject")
- }
- var o watchOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureWatcher(&o); err != nil {
- return nil, err
- }
- }
- }
-
- // Could be a pattern so don't check for validity as we normally do.
- var b strings.Builder
- b.WriteString(kv.pre)
- b.WriteString(keys)
- keys = b.String()
-
- // We will block below on placing items on the chan. That is by design.
- w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx}
-
- update := func(m *Msg) {
- tokens, err := parser.GetMetadataFields(m.Reply)
- if err != nil {
- return
- }
- if len(m.Subject) <= len(kv.pre) {
- return
- }
- subj := m.Subject[len(kv.pre):]
-
- var op KeyValueOp
- if len(m.Header) > 0 {
- switch m.Header.Get(kvop) {
- case kvdel:
- op = KeyValueDelete
- case kvpurge:
- op = KeyValuePurge
- }
- }
- delta := parser.ParseNum(tokens[parser.AckNumPendingTokenPos])
- w.mu.Lock()
- defer w.mu.Unlock()
- if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) {
- entry := &kve{
- bucket: kv.name,
- key: subj,
- value: m.Data,
- revision: parser.ParseNum(tokens[parser.AckStreamSeqTokenPos]),
- created: time.Unix(0, int64(parser.ParseNum(tokens[parser.AckTimestampSeqTokenPos]))),
- delta: delta,
- op: op,
- }
- w.updates <- entry
- }
- // Check if done and initial values.
- // Skip if UpdatesOnly() is set, since there will never be updates initially.
- if !w.initDone {
- w.received++
- // We set this on the first trip through..
- if w.initPending == 0 {
- w.initPending = delta
- }
- if w.received > w.initPending || delta == 0 {
- w.initDone = true
- w.updates <- nil
- }
- }
- }
-
- // Used ordered consumer to deliver results.
- subOpts := []SubOpt{BindStream(kv.stream), OrderedConsumer()}
- if !o.includeHistory {
- subOpts = append(subOpts, DeliverLastPerSubject())
- }
- if o.updatesOnly {
- subOpts = append(subOpts, DeliverNew())
- }
- if o.metaOnly {
- subOpts = append(subOpts, HeadersOnly())
- }
- if o.ctx != nil {
- subOpts = append(subOpts, Context(o.ctx))
- }
- // Create the sub and rest of initialization under the lock.
- // We want to prevent the race between this code and the
- // update() callback.
- w.mu.Lock()
- defer w.mu.Unlock()
- sub, err := kv.js.Subscribe(keys, update, subOpts...)
- if err != nil {
- return nil, err
- }
- sub.mu.Lock()
- // If there were no pending messages at the time of the creation
- // of the consumer, send the marker.
- // Skip if UpdatesOnly() is set, since there will never be updates initially.
- if !o.updatesOnly {
- if sub.jsi != nil && sub.jsi.pending == 0 {
- w.initDone = true
- w.updates <- nil
- }
- } else {
- // if UpdatesOnly was used, mark initialization as complete
- w.initDone = true
- }
- // Set us up to close when the waitForMessages func returns.
- sub.pDone = func(_ string) {
- close(w.updates)
- }
- sub.mu.Unlock()
-
- w.sub = sub
- return w, nil
-}
-
-// Bucket returns the current bucket name (JetStream stream).
-func (kv *kvs) Bucket() string {
- return kv.name
-}
-
-// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus
-type KeyValueBucketStatus struct {
- nfo *StreamInfo
- bucket string
-}
-
-// Bucket the name of the bucket
-func (s *KeyValueBucketStatus) Bucket() string { return s.bucket }
-
-// Values is how many messages are in the bucket, including historical values
-func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs }
-
-// History returns the configured history kept per key
-func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject }
-
-// TTL is how long the bucket keeps values for
-func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge }
-
-// BackingStore indicates what technology is used for storage of the bucket
-func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" }
-
-// StreamInfo is the stream info retrieved to create the status
-func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo }
-
-// Bytes is the size of the stream
-func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes }
-
-// IsCompressed indicates if the data is compressed on disk
-func (s *KeyValueBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression }
-
-// Status retrieves the status and configuration of a bucket
-func (kv *kvs) Status() (KeyValueStatus, error) {
- nfo, err := kv.js.StreamInfo(kv.stream)
- if err != nil {
- return nil, err
- }
-
- return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil
-}
-
-// KeyValueStoreNames is used to retrieve a list of key value store names
-func (js *js) KeyValueStoreNames() <-chan string {
- ch := make(chan string)
- l := &streamNamesLister{js: js}
- l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*")
- go func() {
- defer close(ch)
- for l.Next() {
- for _, name := range l.Page() {
- if !strings.HasPrefix(name, kvBucketNamePre) {
- continue
- }
- ch <- strings.TrimPrefix(name, kvBucketNamePre)
- }
- }
- }()
-
- return ch
-}
-
-// KeyValueStores is used to retrieve a list of key value store statuses
-func (js *js) KeyValueStores() <-chan KeyValueStatus {
- ch := make(chan KeyValueStatus)
- l := &streamLister{js: js}
- l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*")
- go func() {
- defer close(ch)
- for l.Next() {
- for _, info := range l.Page() {
- if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) {
- continue
- }
- ch <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)}
- }
- }
- }()
- return ch
-}
-
-func mapStreamToKVS(js *js, info *StreamInfo) *kvs {
- bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre)
-
- kv := &kvs{
- name: bucket,
- stream: info.Config.Name,
- pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket),
- js: js,
- // Determine if we need to use the JS prefix in front of Put and Delete operations
- useJSPfx: js.opts.pre != defaultAPIPrefix,
- useDirect: info.Config.AllowDirect,
- }
-
- // If we are mirroring, we will have mirror direct on, so just use the mirror name
- // and override use
- if m := info.Config.Mirror; m != nil {
- bucket := strings.TrimPrefix(m.Name, kvBucketNamePre)
- if m.External != nil && m.External.APIPrefix != _EMPTY_ {
- kv.useJSPfx = false
- kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket)
- kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket)
- } else {
- kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket)
- }
- }
-
- return kv
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/legacy_jetstream.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/legacy_jetstream.md
deleted file mode 100644
index 43e1c73..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/legacy_jetstream.md
+++ /dev/null
@@ -1,83 +0,0 @@
-# Legacy JetStream API
-
-This is a documentation for the legacy JetStream API. A README for the current
-API can be found [here](jetstream/README.md)
-
-## JetStream Basic Usage
-
-```go
-import "github.com/nats-io/nats.go"
-
-// Connect to NATS
-nc, _ := nats.Connect(nats.DefaultURL)
-
-// Create JetStream Context
-js, _ := nc.JetStream(nats.PublishAsyncMaxPending(256))
-
-// Simple Stream Publisher
-js.Publish("ORDERS.scratch", []byte("hello"))
-
-// Simple Async Stream Publisher
-for i := 0; i < 500; i++ {
- js.PublishAsync("ORDERS.scratch", []byte("hello"))
-}
-select {
-case <-js.PublishAsyncComplete():
-case <-time.After(5 * time.Second):
- fmt.Println("Did not resolve in time")
-}
-
-// Simple Async Ephemeral Consumer
-js.Subscribe("ORDERS.*", func(m *nats.Msg) {
- fmt.Printf("Received a JetStream message: %s\n", string(m.Data))
-})
-
-// Simple Sync Durable Consumer (optional SubOpts at the end)
-sub, err := js.SubscribeSync("ORDERS.*", nats.Durable("MONITOR"), nats.MaxDeliver(3))
-m, err := sub.NextMsg(timeout)
-
-// Simple Pull Consumer
-sub, err := js.PullSubscribe("ORDERS.*", "MONITOR")
-msgs, err := sub.Fetch(10)
-
-// Unsubscribe
-sub.Unsubscribe()
-
-// Drain
-sub.Drain()
-```
-
-## JetStream Basic Management
-
-```go
-import "github.com/nats-io/nats.go"
-
-// Connect to NATS
-nc, _ := nats.Connect(nats.DefaultURL)
-
-// Create JetStream Context
-js, _ := nc.JetStream()
-
-// Create a Stream
-js.AddStream(&nats.StreamConfig{
- Name: "ORDERS",
- Subjects: []string{"ORDERS.*"},
-})
-
-// Update a Stream
-js.UpdateStream(&nats.StreamConfig{
- Name: "ORDERS",
- MaxBytes: 8,
-})
-
-// Create a Consumer
-js.AddConsumer("ORDERS", &nats.ConsumerConfig{
- Durable: "MONITOR",
-})
-
-// Delete Consumer
-js.DeleteConsumer("ORDERS", "MONITOR")
-
-// Delete Stream
-js.DeleteStream("ORDERS")
-```
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/nats.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/nats.go
deleted file mode 100644
index befff78..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/nats.go
+++ /dev/null
@@ -1,5903 +0,0 @@
-// Copyright 2012-2024 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// A Go client for the NATS messaging system (https://nats.io).
-package nats
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "crypto/x509"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math/rand"
- "net"
- "net/http"
- "net/textproto"
- "net/url"
- "os"
- "path/filepath"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/nats-io/nkeys"
- "github.com/nats-io/nuid"
-
- "github.com/nats-io/nats.go/util"
-)
-
-// Default Constants
-const (
- Version = "1.35.0"
- DefaultURL = "nats://127.0.0.1:4222"
- DefaultPort = 4222
- DefaultMaxReconnect = 60
- DefaultReconnectWait = 2 * time.Second
- DefaultReconnectJitter = 100 * time.Millisecond
- DefaultReconnectJitterTLS = time.Second
- DefaultTimeout = 2 * time.Second
- DefaultPingInterval = 2 * time.Minute
- DefaultMaxPingOut = 2
- DefaultMaxChanLen = 64 * 1024 // 64k
- DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB
- RequestChanLen = 8
- DefaultDrainTimeout = 30 * time.Second
- DefaultFlusherTimeout = time.Minute
- LangString = "go"
-)
-
-const (
- // STALE_CONNECTION is for detection and proper handling of stale connections.
- STALE_CONNECTION = "stale connection"
-
- // PERMISSIONS_ERR is for when nats server subject authorization has failed.
- PERMISSIONS_ERR = "permissions violation"
-
- // AUTHORIZATION_ERR is for when nats server user authorization has failed.
- AUTHORIZATION_ERR = "authorization violation"
-
- // AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired.
- AUTHENTICATION_EXPIRED_ERR = "user authentication expired"
-
- // AUTHENTICATION_REVOKED_ERR is for when user authorization has been revoked.
- AUTHENTICATION_REVOKED_ERR = "user authentication revoked"
-
- // ACCOUNT_AUTHENTICATION_EXPIRED_ERR is for when nats server account authorization has expired.
- ACCOUNT_AUTHENTICATION_EXPIRED_ERR = "account authentication expired"
-
- // MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit
- MAX_CONNECTIONS_ERR = "maximum connections exceeded"
-)
-
-// Errors
-var (
- ErrConnectionClosed = errors.New("nats: connection closed")
- ErrConnectionDraining = errors.New("nats: connection draining")
- ErrDrainTimeout = errors.New("nats: draining connection timed out")
- ErrConnectionReconnecting = errors.New("nats: connection reconnecting")
- ErrSecureConnRequired = errors.New("nats: secure connection required")
- ErrSecureConnWanted = errors.New("nats: secure connection not available")
- ErrBadSubscription = errors.New("nats: invalid subscription")
- ErrTypeSubscription = errors.New("nats: invalid subscription type")
- ErrBadSubject = errors.New("nats: invalid subject")
- ErrBadQueueName = errors.New("nats: invalid queue name")
- ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped")
- ErrTimeout = errors.New("nats: timeout")
- ErrBadTimeout = errors.New("nats: timeout invalid")
- ErrAuthorization = errors.New("nats: authorization violation")
- ErrAuthExpired = errors.New("nats: authentication expired")
- ErrAuthRevoked = errors.New("nats: authentication revoked")
- ErrAccountAuthExpired = errors.New("nats: account authentication expired")
- ErrNoServers = errors.New("nats: no servers available for connection")
- ErrJsonParse = errors.New("nats: connect message, json parse error")
- ErrChanArg = errors.New("nats: argument needs to be a channel type")
- ErrMaxPayload = errors.New("nats: maximum payload exceeded")
- ErrMaxMessages = errors.New("nats: maximum messages delivered")
- ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription")
- ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed")
- ErrClientCertOrRootCAsRequired = errors.New("nats: at least one of certCB or rootCAsCB must be set")
- ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received")
- ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded")
- ErrInvalidConnection = errors.New("nats: invalid connection")
- ErrInvalidMsg = errors.New("nats: invalid message or message nil")
- ErrInvalidArg = errors.New("nats: invalid argument")
- ErrInvalidContext = errors.New("nats: invalid context")
- ErrNoDeadlineContext = errors.New("nats: context requires a deadline")
- ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server")
- ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server")
- ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler")
- ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler")
- ErrNoUserCB = errors.New("nats: user callback not defined")
- ErrNkeyAndUser = errors.New("nats: user callback and nkey defined")
- ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server")
- ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION)
- ErrTokenAlreadySet = errors.New("nats: token and token handler both set")
- ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection")
- ErrMsgNoReply = errors.New("nats: message does not have a reply")
- ErrClientIPNotSupported = errors.New("nats: client IP not supported by this server")
- ErrDisconnected = errors.New("nats: server is disconnected")
- ErrHeadersNotSupported = errors.New("nats: headers not supported by this server")
- ErrBadHeaderMsg = errors.New("nats: message could not decode headers")
- ErrNoResponders = errors.New("nats: no responders available for request")
- ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded")
- ErrConnectionNotTLS = errors.New("nats: connection is not tls")
-)
-
-// GetDefaultOptions returns default configuration options for the client.
-func GetDefaultOptions() Options {
- return Options{
- AllowReconnect: true,
- MaxReconnect: DefaultMaxReconnect,
- ReconnectWait: DefaultReconnectWait,
- ReconnectJitter: DefaultReconnectJitter,
- ReconnectJitterTLS: DefaultReconnectJitterTLS,
- Timeout: DefaultTimeout,
- PingInterval: DefaultPingInterval,
- MaxPingsOut: DefaultMaxPingOut,
- SubChanLen: DefaultMaxChanLen,
- ReconnectBufSize: DefaultReconnectBufSize,
- DrainTimeout: DefaultDrainTimeout,
- FlusherTimeout: DefaultFlusherTimeout,
- }
-}
-
-// DEPRECATED: Use GetDefaultOptions() instead.
-// DefaultOptions is not safe for use by multiple clients.
-// For details see #308.
-var DefaultOptions = GetDefaultOptions()
-
-// Status represents the state of the connection.
-type Status int
-
-const (
- DISCONNECTED = Status(iota)
- CONNECTED
- CLOSED
- RECONNECTING
- CONNECTING
- DRAINING_SUBS
- DRAINING_PUBS
-)
-
-func (s Status) String() string {
- switch s {
- case DISCONNECTED:
- return "DISCONNECTED"
- case CONNECTED:
- return "CONNECTED"
- case CLOSED:
- return "CLOSED"
- case RECONNECTING:
- return "RECONNECTING"
- case CONNECTING:
- return "CONNECTING"
- case DRAINING_SUBS:
- return "DRAINING_SUBS"
- case DRAINING_PUBS:
- return "DRAINING_PUBS"
- }
- return "unknown status"
-}
-
-// ConnHandler is used for asynchronous events such as
-// disconnected and closed connections.
-type ConnHandler func(*Conn)
-
-// ConnErrHandler is used to process asynchronous events like
-// disconnected connection with the error (if any).
-type ConnErrHandler func(*Conn, error)
-
-// ErrHandler is used to process asynchronous errors encountered
-// while processing inbound messages.
-type ErrHandler func(*Conn, *Subscription, error)
-
-// UserJWTHandler is used to fetch and return the account signed
-// JWT for this user.
-type UserJWTHandler func() (string, error)
-
-// TLSCertHandler is used to fetch and return tls certificate.
-type TLSCertHandler func() (tls.Certificate, error)
-
-// RootCAsHandler is used to fetch and return a set of root certificate
-// authorities that clients use when verifying server certificates.
-type RootCAsHandler func() (*x509.CertPool, error)
-
-// SignatureHandler is used to sign a nonce from the server while
-// authenticating with nkeys. The user should sign the nonce and
-// return the raw signature. The client will base64 encode this to
-// send to the server.
-type SignatureHandler func([]byte) ([]byte, error)
-
-// AuthTokenHandler is used to generate a new token.
-type AuthTokenHandler func() string
-
-// ReconnectDelayHandler is used to get from the user the desired
-// delay the library should pause before attempting to reconnect
-// again. Note that this is invoked after the library tried the
-// whole list of URLs and failed to reconnect.
-type ReconnectDelayHandler func(attempts int) time.Duration
-
-// asyncCB is used to preserve order for async callbacks.
-type asyncCB struct {
- f func()
- next *asyncCB
-}
-
-type asyncCallbacksHandler struct {
- mu sync.Mutex
- cond *sync.Cond
- head *asyncCB
- tail *asyncCB
-}
-
-// Option is a function on the options for a connection.
-type Option func(*Options) error
-
-// CustomDialer can be used to specify any dialer, not necessarily a
-// *net.Dialer. A CustomDialer may also implement `SkipTLSHandshake() bool`
-// in order to skip the TLS handshake in case not required.
-type CustomDialer interface {
- Dial(network, address string) (net.Conn, error)
-}
-
-type InProcessConnProvider interface {
- InProcessConn() (net.Conn, error)
-}
-
-// Options can be used to create a customized connection.
-type Options struct {
-
- // Url represents a single NATS server url to which the client
- // will be connecting. If the Servers option is also set, it
- // then becomes the first server in the Servers array.
- Url string
-
- // InProcessServer represents a NATS server running within the
- // same process. If this is set then we will attempt to connect
- // to the server directly rather than using external TCP conns.
- InProcessServer InProcessConnProvider
-
- // Servers is a configured set of servers which this client
- // will use when attempting to connect.
- Servers []string
-
- // NoRandomize configures whether we will randomize the
- // server pool.
- NoRandomize bool
-
- // NoEcho configures whether the server will echo back messages
- // that are sent on this connection if we also have matching subscriptions.
- // Note this is supported on servers >= version 1.2. Proto 1 or greater.
- NoEcho bool
-
- // Name is an optional name label which will be sent to the server
- // on CONNECT to identify the client.
- Name string
-
- // Verbose signals the server to send an OK ack for commands
- // successfully processed by the server.
- Verbose bool
-
- // Pedantic signals the server whether it should be doing further
- // validation of subjects.
- Pedantic bool
-
- // Secure enables TLS secure connections that skip server
- // verification by default. NOT RECOMMENDED.
- Secure bool
-
- // TLSConfig is a custom TLS configuration to use for secure
- // transports.
- TLSConfig *tls.Config
-
- // TLSCertCB is used to fetch and return custom tls certificate.
- TLSCertCB TLSCertHandler
-
- // TLSHandshakeFirst is used to instruct the library perform
- // the TLS handshake right after the connect and before receiving
- // the INFO protocol from the server. If this option is enabled
- // but the server is not configured to perform the TLS handshake
- // first, the connection will fail.
- TLSHandshakeFirst bool
-
- // RootCAsCB is used to fetch and return a set of root certificate
- // authorities that clients use when verifying server certificates.
- RootCAsCB RootCAsHandler
-
- // AllowReconnect enables reconnection logic to be used when we
- // encounter a disconnect from the current server.
- AllowReconnect bool
-
- // MaxReconnect sets the number of reconnect attempts that will be
- // tried before giving up. If negative, then it will never give up
- // trying to reconnect.
- // Defaults to 60.
- MaxReconnect int
-
- // ReconnectWait sets the time to backoff after attempting a reconnect
- // to a server that we were already connected to previously.
- // Defaults to 2s.
- ReconnectWait time.Duration
-
- // CustomReconnectDelayCB is invoked after the library tried every
- // URL in the server list and failed to reconnect. It passes to the
- // user the current number of attempts. This function returns the
- // amount of time the library will sleep before attempting to reconnect
- // again. It is strongly recommended that this value contains some
- // jitter to prevent all connections to attempt reconnecting at the same time.
- CustomReconnectDelayCB ReconnectDelayHandler
-
- // ReconnectJitter sets the upper bound for a random delay added to
- // ReconnectWait during a reconnect when no TLS is used.
- // Defaults to 100ms.
- ReconnectJitter time.Duration
-
- // ReconnectJitterTLS sets the upper bound for a random delay added to
- // ReconnectWait during a reconnect when TLS is used.
- // Defaults to 1s.
- ReconnectJitterTLS time.Duration
-
- // Timeout sets the timeout for a Dial operation on a connection.
- // Defaults to 2s.
- Timeout time.Duration
-
- // DrainTimeout sets the timeout for a Drain Operation to complete.
- // Defaults to 30s.
- DrainTimeout time.Duration
-
- // FlusherTimeout is the maximum time to wait for write operations
- // to the underlying connection to complete (including the flusher loop).
- // Defaults to 1m.
- FlusherTimeout time.Duration
-
- // PingInterval is the period at which the client will be sending ping
- // commands to the server, disabled if 0 or negative.
- // Defaults to 2m.
- PingInterval time.Duration
-
- // MaxPingsOut is the maximum number of pending ping commands that can
- // be awaiting a response before raising an ErrStaleConnection error.
- // Defaults to 2.
- MaxPingsOut int
-
- // ClosedCB sets the closed handler that is called when a client will
- // no longer be connected.
- ClosedCB ConnHandler
-
- // DisconnectedCB sets the disconnected handler that is called
- // whenever the connection is disconnected.
- // Will not be called if DisconnectedErrCB is set
- // DEPRECATED. Use DisconnectedErrCB which passes error that caused
- // the disconnect event.
- DisconnectedCB ConnHandler
-
- // DisconnectedErrCB sets the disconnected error handler that is called
- // whenever the connection is disconnected.
- // Disconnected error could be nil, for instance when user explicitly closes the connection.
- // DisconnectedCB will not be called if DisconnectedErrCB is set
- DisconnectedErrCB ConnErrHandler
-
- // ConnectedCB sets the connected handler called when the initial connection
- // is established. It is not invoked on successful reconnects - for reconnections,
- // use ReconnectedCB. ConnectedCB can be used in conjunction with RetryOnFailedConnect
- // to detect whether the initial connect was successful.
- ConnectedCB ConnHandler
-
- // ReconnectedCB sets the reconnected handler called whenever
- // the connection is successfully reconnected.
- ReconnectedCB ConnHandler
-
- // DiscoveredServersCB sets the callback that is invoked whenever a new
- // server has joined the cluster.
- DiscoveredServersCB ConnHandler
-
- // AsyncErrorCB sets the async error handler (e.g. slow consumer errors)
- AsyncErrorCB ErrHandler
-
- // ReconnectBufSize is the size of the backing bufio during reconnect.
- // Once this has been exhausted publish operations will return an error.
- // Defaults to 8388608 bytes (8MB).
- ReconnectBufSize int
-
- // SubChanLen is the size of the buffered channel used between the socket
- // Go routine and the message delivery for SyncSubscriptions.
- // NOTE: This does not affect AsyncSubscriptions which are
- // dictated by PendingLimits()
- // Defaults to 65536.
- SubChanLen int
-
- // UserJWT sets the callback handler that will fetch a user's JWT.
- UserJWT UserJWTHandler
-
- // Nkey sets the public nkey that will be used to authenticate
- // when connecting to the server. UserJWT and Nkey are mutually exclusive
- // and if defined, UserJWT will take precedence.
- Nkey string
-
- // SignatureCB designates the function used to sign the nonce
- // presented from the server.
- SignatureCB SignatureHandler
-
- // User sets the username to be used when connecting to the server.
- User string
-
- // Password sets the password to be used when connecting to a server.
- Password string
-
- // Token sets the token to be used when connecting to a server.
- Token string
-
- // TokenHandler designates the function used to generate the token to be used when connecting to a server.
- TokenHandler AuthTokenHandler
-
- // Dialer allows a custom net.Dialer when forming connections.
- // DEPRECATED: should use CustomDialer instead.
- Dialer *net.Dialer
-
- // CustomDialer allows to specify a custom dialer (not necessarily
- // a *net.Dialer).
- CustomDialer CustomDialer
-
- // UseOldRequestStyle forces the old method of Requests that utilize
- // a new Inbox and a new Subscription for each request.
- UseOldRequestStyle bool
-
- // NoCallbacksAfterClientClose allows preventing the invocation of
- // callbacks after Close() is called. Client won't receive notifications
- // when Close is invoked by user code. Default is to invoke the callbacks.
- NoCallbacksAfterClientClose bool
-
- // LameDuckModeHandler sets the callback to invoke when the server notifies
- // the connection that it entered lame duck mode, that is, going to
- // gradually disconnect all its connections before shutting down. This is
- // often used in deployments when upgrading NATS Servers.
- LameDuckModeHandler ConnHandler
-
- // RetryOnFailedConnect sets the connection in reconnecting state right
- // away if it can't connect to a server in the initial set. The
- // MaxReconnect and ReconnectWait options are used for this process,
- // similarly to when an established connection is disconnected.
- // If a ReconnectHandler is set, it will be invoked on the first
- // successful reconnect attempt (if the initial connect fails),
- // and if a ClosedHandler is set, it will be invoked if
- // it fails to connect (after exhausting the MaxReconnect attempts).
- RetryOnFailedConnect bool
-
- // For websocket connections, indicates to the server that the connection
- // supports compression. If the server does too, then data will be compressed.
- Compression bool
-
- // For websocket connections, adds a path to connections url.
- // This is useful when connecting to NATS behind a proxy.
- ProxyPath string
-
- // InboxPrefix allows the default _INBOX prefix to be customized
- InboxPrefix string
-
- // IgnoreAuthErrorAbort - if set to true, client opts out of the default connect behavior of aborting
- // subsequent reconnect attempts if server returns the same auth error twice (regardless of reconnect policy).
- IgnoreAuthErrorAbort bool
-
- // SkipHostLookup skips the DNS lookup for the server hostname.
- SkipHostLookup bool
-}
-
-const (
- // Scratch storage for assembling protocol headers
- scratchSize = 512
-
- // The size of the bufio reader/writer on top of the socket.
- defaultBufSize = 32768
-
- // The buffered size of the flush "kick" channel
- flushChanSize = 1
-
- // Default server pool size
- srvPoolSize = 4
-
- // NUID size
- nuidSize = 22
-
- // Default ports used if none is specified in given URL(s)
- defaultWSPortString = "80"
- defaultWSSPortString = "443"
- defaultPortString = "4222"
-)
-
-// A Conn represents a bare connection to a nats-server.
-// It can send and receive []byte payloads.
-// The connection is safe to use in multiple Go routines concurrently.
-type Conn struct {
- // Keep all members for which we use atomic at the beginning of the
- // struct and make sure they are all 64bits (or use padding if necessary).
- // atomic.* functions crash on 32bit machines if operand is not aligned
- // at 64bit. See https://github.com/golang/go/issues/599
- Statistics
- mu sync.RWMutex
- // Opts holds the configuration of the Conn.
- // Modifying the configuration of a running Conn is a race.
- Opts Options
- wg sync.WaitGroup
- srvPool []*srv
- current *srv
- urls map[string]struct{} // Keep track of all known URLs (used by processInfo)
- conn net.Conn
- bw *natsWriter
- br *natsReader
- fch chan struct{}
- info serverInfo
- ssid int64
- subsMu sync.RWMutex
- subs map[int64]*Subscription
- ach *asyncCallbacksHandler
- pongs []chan struct{}
- scratch [scratchSize]byte
- status Status
- statListeners map[Status][]chan Status
- initc bool // true if the connection is performing the initial connect
- err error
- ps *parseState
- ptmr *time.Timer
- pout int
- ar bool // abort reconnect
- rqch chan struct{}
- ws bool // true if a websocket connection
-
- // New style response handler
- respSub string // The wildcard subject
- respSubPrefix string // the wildcard prefix including trailing .
- respSubLen int // the length of the wildcard prefix excluding trailing .
- respMux *Subscription // A single response subscription
- respMap map[string]chan *Msg // Request map for the response msg channels
- respRand *rand.Rand // Used for generating suffix
-
- // Msg filters for testing.
- // Protected by subsMu
- filters map[string]msgFilter
-}
-
-type natsReader struct {
- r io.Reader
- buf []byte
- off int
- n int
-}
-
-type natsWriter struct {
- w io.Writer
- bufs []byte
- limit int
- pending *bytes.Buffer
- plimit int
-}
-
-// Subscription represents interest in a given subject.
-type Subscription struct {
- mu sync.Mutex
- sid int64
-
- // Subject that represents this subscription. This can be different
- // than the received subject inside a Msg if this is a wildcard.
- Subject string
-
- // Optional queue group name. If present, all subscriptions with the
- // same name will form a distributed queue, and each message will
- // only be processed by one member of the group.
- Queue string
-
- // For holding information about a JetStream consumer.
- jsi *jsSub
-
- delivered uint64
- max uint64
- conn *Conn
- mcb MsgHandler
- mch chan *Msg
- closed bool
- sc bool
- connClosed bool
- draining bool
- status SubStatus
- statListeners map[chan SubStatus][]SubStatus
-
- // Type of Subscription
- typ SubscriptionType
-
- // Async linked list
- pHead *Msg
- pTail *Msg
- pCond *sync.Cond
- pDone func(subject string)
-
- // Pending stats, async subscriptions, high-speed etc.
- pMsgs int
- pBytes int
- pMsgsMax int
- pBytesMax int
- pMsgsLimit int
- pBytesLimit int
- dropped int
-}
-
-// Status represents the state of the connection.
-type SubStatus int
-
-const (
- SubscriptionActive = SubStatus(iota)
- SubscriptionDraining
- SubscriptionClosed
- SubscriptionSlowConsumer
-)
-
-func (s SubStatus) String() string {
- switch s {
- case SubscriptionActive:
- return "Active"
- case SubscriptionDraining:
- return "Draining"
- case SubscriptionClosed:
- return "Closed"
- case SubscriptionSlowConsumer:
- return "SlowConsumer"
- }
- return "unknown status"
-}
-
-// Msg represents a message delivered by NATS. This structure is used
-// by Subscribers and PublishMsg().
-//
-// # Types of Acknowledgements
-//
-// In case using JetStream, there are multiple ways to ack a Msg:
-//
-// // Acknowledgement that a message has been processed.
-// msg.Ack()
-//
-// // Negatively acknowledges a message.
-// msg.Nak()
-//
-// // Terminate a message so that it is not redelivered further.
-// msg.Term()
-//
-// // Signal the server that the message is being worked on and reset redelivery timer.
-// msg.InProgress()
-type Msg struct {
- Subject string
- Reply string
- Header Header
- Data []byte
- Sub *Subscription
- // Internal
- next *Msg
- wsz int
- barrier *barrierInfo
- ackd uint32
-}
-
-// Compares two msgs, ignores sub but checks all other public fields.
-func (m *Msg) Equal(msg *Msg) bool {
- if m == msg {
- return true
- }
- if m == nil || msg == nil {
- return false
- }
- if m.Subject != msg.Subject || m.Reply != msg.Reply {
- return false
- }
- if !bytes.Equal(m.Data, msg.Data) {
- return false
- }
- if len(m.Header) != len(msg.Header) {
- return false
- }
- for k, v := range m.Header {
- val, ok := msg.Header[k]
- if !ok || len(v) != len(val) {
- return false
- }
- for i, hdr := range v {
- if hdr != val[i] {
- return false
- }
- }
- }
- return true
-}
-
-// Size returns a message size in bytes.
-func (m *Msg) Size() int {
- if m.wsz != 0 {
- return m.wsz
- }
- hdr, _ := m.headerBytes()
- return len(m.Subject) + len(m.Reply) + len(hdr) + len(m.Data)
-}
-
-func (m *Msg) headerBytes() ([]byte, error) {
- var hdr []byte
- if len(m.Header) == 0 {
- return hdr, nil
- }
-
- var b bytes.Buffer
- _, err := b.WriteString(hdrLine)
- if err != nil {
- return nil, ErrBadHeaderMsg
- }
-
- err = http.Header(m.Header).Write(&b)
- if err != nil {
- return nil, ErrBadHeaderMsg
- }
-
- _, err = b.WriteString(crlf)
- if err != nil {
- return nil, ErrBadHeaderMsg
- }
-
- return b.Bytes(), nil
-}
-
-type barrierInfo struct {
- refs int64
- f func()
-}
-
-// Tracks various stats received and sent on this connection,
-// including counts for messages and bytes.
-type Statistics struct {
- InMsgs uint64
- OutMsgs uint64
- InBytes uint64
- OutBytes uint64
- Reconnects uint64
-}
-
-// Tracks individual backend servers.
-type srv struct {
- url *url.URL
- didConnect bool
- reconnects int
- lastErr error
- isImplicit bool
- tlsName string
-}
-
-// The INFO block received from the server.
-type serverInfo struct {
- ID string `json:"server_id"`
- Name string `json:"server_name"`
- Proto int `json:"proto"`
- Version string `json:"version"`
- Host string `json:"host"`
- Port int `json:"port"`
- Headers bool `json:"headers"`
- AuthRequired bool `json:"auth_required,omitempty"`
- TLSRequired bool `json:"tls_required,omitempty"`
- TLSAvailable bool `json:"tls_available,omitempty"`
- MaxPayload int64 `json:"max_payload"`
- CID uint64 `json:"client_id,omitempty"`
- ClientIP string `json:"client_ip,omitempty"`
- Nonce string `json:"nonce,omitempty"`
- Cluster string `json:"cluster,omitempty"`
- ConnectURLs []string `json:"connect_urls,omitempty"`
- LameDuckMode bool `json:"ldm,omitempty"`
-}
-
-const (
- // clientProtoZero is the original client protocol from 2009.
- // http://nats.io/documentation/internals/nats-protocol/
- /* clientProtoZero */ _ = iota
- // clientProtoInfo signals a client can receive more then the original INFO block.
- // This can be used to update clients on other cluster members, etc.
- clientProtoInfo
-)
-
-type connectInfo struct {
- Verbose bool `json:"verbose"`
- Pedantic bool `json:"pedantic"`
- UserJWT string `json:"jwt,omitempty"`
- Nkey string `json:"nkey,omitempty"`
- Signature string `json:"sig,omitempty"`
- User string `json:"user,omitempty"`
- Pass string `json:"pass,omitempty"`
- Token string `json:"auth_token,omitempty"`
- TLS bool `json:"tls_required"`
- Name string `json:"name"`
- Lang string `json:"lang"`
- Version string `json:"version"`
- Protocol int `json:"protocol"`
- Echo bool `json:"echo"`
- Headers bool `json:"headers"`
- NoResponders bool `json:"no_responders"`
-}
-
-// MsgHandler is a callback function that processes messages delivered to
-// asynchronous subscribers.
-type MsgHandler func(msg *Msg)
-
-// Connect will attempt to connect to the NATS system.
-// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222
-// Comma separated arrays are also supported, e.g. urlA, urlB.
-// Options start with the defaults but can be overridden.
-// To connect to a NATS Server's websocket port, use the `ws` or `wss` scheme, such as
-// `ws://localhost:8080`. Note that websocket schemes cannot be mixed with others (nats/tls).
-func Connect(url string, options ...Option) (*Conn, error) {
- opts := GetDefaultOptions()
- opts.Servers = processUrlString(url)
- for _, opt := range options {
- if opt != nil {
- if err := opt(&opts); err != nil {
- return nil, err
- }
- }
- }
- return opts.Connect()
-}
-
-// Options that can be passed to Connect.
-
-// Name is an Option to set the client name.
-func Name(name string) Option {
- return func(o *Options) error {
- o.Name = name
- return nil
- }
-}
-
-// InProcessServer is an Option that will try to establish a direction to a NATS server
-// running within the process instead of dialing via TCP.
-func InProcessServer(server InProcessConnProvider) Option {
- return func(o *Options) error {
- o.InProcessServer = server
- return nil
- }
-}
-
-// Secure is an Option to enable TLS secure connections that skip server verification by default.
-// Pass a TLS Configuration for proper TLS.
-// A TLS Configuration using InsecureSkipVerify should NOT be used in a production setting.
-func Secure(tls ...*tls.Config) Option {
- return func(o *Options) error {
- o.Secure = true
- // Use of variadic just simplifies testing scenarios. We only take the first one.
- if len(tls) > 1 {
- return ErrMultipleTLSConfigs
- }
- if len(tls) == 1 {
- o.TLSConfig = tls[0]
- }
- return nil
- }
-}
-
-// ClientTLSConfig is an Option to set the TLS configuration for secure
-// connections. It can be used to e.g. set TLS config with cert and root CAs
-// from memory. For simple use case of loading cert and CAs from file,
-// ClientCert and RootCAs options are more convenient.
-// If Secure is not already set this will set it as well.
-func ClientTLSConfig(certCB TLSCertHandler, rootCAsCB RootCAsHandler) Option {
- return func(o *Options) error {
- o.Secure = true
-
- if certCB == nil && rootCAsCB == nil {
- return ErrClientCertOrRootCAsRequired
- }
-
- // Smoke test the callbacks to fail early
- // if they are not valid.
- if certCB != nil {
- if _, err := certCB(); err != nil {
- return err
- }
- }
- if rootCAsCB != nil {
- if _, err := rootCAsCB(); err != nil {
- return err
- }
- }
- if o.TLSConfig == nil {
- o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
- }
- o.TLSCertCB = certCB
- o.RootCAsCB = rootCAsCB
- return nil
- }
-}
-
-// RootCAs is a helper option to provide the RootCAs pool from a list of filenames.
-// If Secure is not already set this will set it as well.
-func RootCAs(file ...string) Option {
- return func(o *Options) error {
- rootCAsCB := func() (*x509.CertPool, error) {
- pool := x509.NewCertPool()
- for _, f := range file {
- rootPEM, err := os.ReadFile(f)
- if err != nil || rootPEM == nil {
- return nil, fmt.Errorf("nats: error loading or parsing rootCA file: %w", err)
- }
- ok := pool.AppendCertsFromPEM(rootPEM)
- if !ok {
- return nil, fmt.Errorf("nats: failed to parse root certificate from %q", f)
- }
- }
- return pool, nil
- }
- if o.TLSConfig == nil {
- o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
- }
- if _, err := rootCAsCB(); err != nil {
- return err
- }
- o.RootCAsCB = rootCAsCB
- o.Secure = true
- return nil
- }
-}
-
-// ClientCert is a helper option to provide the client certificate from a file.
-// If Secure is not already set this will set it as well.
-func ClientCert(certFile, keyFile string) Option {
- return func(o *Options) error {
- tlsCertCB := func() (tls.Certificate, error) {
- cert, err := tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return tls.Certificate{}, fmt.Errorf("nats: error loading client certificate: %w", err)
- }
- cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
- if err != nil {
- return tls.Certificate{}, fmt.Errorf("nats: error parsing client certificate: %w", err)
- }
- return cert, nil
- }
- if o.TLSConfig == nil {
- o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
- }
- if _, err := tlsCertCB(); err != nil {
- return err
- }
- o.TLSCertCB = tlsCertCB
- o.Secure = true
- return nil
- }
-}
-
-// NoReconnect is an Option to turn off reconnect behavior.
-func NoReconnect() Option {
- return func(o *Options) error {
- o.AllowReconnect = false
- return nil
- }
-}
-
-// DontRandomize is an Option to turn off randomizing the server pool.
-func DontRandomize() Option {
- return func(o *Options) error {
- o.NoRandomize = true
- return nil
- }
-}
-
-// NoEcho is an Option to turn off messages echoing back from a server.
-// Note this is supported on servers >= version 1.2. Proto 1 or greater.
-func NoEcho() Option {
- return func(o *Options) error {
- o.NoEcho = true
- return nil
- }
-}
-
-// ReconnectWait is an Option to set the wait time between reconnect attempts.
-// Defaults to 2s.
-func ReconnectWait(t time.Duration) Option {
- return func(o *Options) error {
- o.ReconnectWait = t
- return nil
- }
-}
-
-// MaxReconnects is an Option to set the maximum number of reconnect attempts.
-// If negative, it will never stop trying to reconnect.
-// Defaults to 60.
-func MaxReconnects(max int) Option {
- return func(o *Options) error {
- o.MaxReconnect = max
- return nil
- }
-}
-
-// ReconnectJitter is an Option to set the upper bound of a random delay added ReconnectWait.
-// Defaults to 100ms and 1s, respectively.
-func ReconnectJitter(jitter, jitterForTLS time.Duration) Option {
- return func(o *Options) error {
- o.ReconnectJitter = jitter
- o.ReconnectJitterTLS = jitterForTLS
- return nil
- }
-}
-
-// CustomReconnectDelay is an Option to set the CustomReconnectDelayCB option.
-// See CustomReconnectDelayCB Option for more details.
-func CustomReconnectDelay(cb ReconnectDelayHandler) Option {
- return func(o *Options) error {
- o.CustomReconnectDelayCB = cb
- return nil
- }
-}
-
-// PingInterval is an Option to set the period for client ping commands.
-// Defaults to 2m.
-func PingInterval(t time.Duration) Option {
- return func(o *Options) error {
- o.PingInterval = t
- return nil
- }
-}
-
-// MaxPingsOutstanding is an Option to set the maximum number of ping requests
-// that can go unanswered by the server before closing the connection.
-// Defaults to 2.
-func MaxPingsOutstanding(max int) Option {
- return func(o *Options) error {
- o.MaxPingsOut = max
- return nil
- }
-}
-
-// ReconnectBufSize sets the buffer size of messages kept while busy reconnecting.
-// Defaults to 8388608 bytes (8MB). It can be disabled by setting it to -1.
-func ReconnectBufSize(size int) Option {
- return func(o *Options) error {
- o.ReconnectBufSize = size
- return nil
- }
-}
-
-// Timeout is an Option to set the timeout for Dial on a connection.
-// Defaults to 2s.
-func Timeout(t time.Duration) Option {
- return func(o *Options) error {
- o.Timeout = t
- return nil
- }
-}
-
-// FlusherTimeout is an Option to set the write (and flush) timeout on a connection.
-func FlusherTimeout(t time.Duration) Option {
- return func(o *Options) error {
- o.FlusherTimeout = t
- return nil
- }
-}
-
-// DrainTimeout is an Option to set the timeout for draining a connection.
-// Defaults to 30s.
-func DrainTimeout(t time.Duration) Option {
- return func(o *Options) error {
- o.DrainTimeout = t
- return nil
- }
-}
-
-// DisconnectErrHandler is an Option to set the disconnected error handler.
-func DisconnectErrHandler(cb ConnErrHandler) Option {
- return func(o *Options) error {
- o.DisconnectedErrCB = cb
- return nil
- }
-}
-
-// DisconnectHandler is an Option to set the disconnected handler.
-// DEPRECATED: Use DisconnectErrHandler.
-func DisconnectHandler(cb ConnHandler) Option {
- return func(o *Options) error {
- o.DisconnectedCB = cb
- return nil
- }
-}
-
-// ConnectHandler is an Option to set the connected handler.
-func ConnectHandler(cb ConnHandler) Option {
- return func(o *Options) error {
- o.ConnectedCB = cb
- return nil
- }
-}
-
-// ReconnectHandler is an Option to set the reconnected handler.
-func ReconnectHandler(cb ConnHandler) Option {
- return func(o *Options) error {
- o.ReconnectedCB = cb
- return nil
- }
-}
-
-// ClosedHandler is an Option to set the closed handler.
-func ClosedHandler(cb ConnHandler) Option {
- return func(o *Options) error {
- o.ClosedCB = cb
- return nil
- }
-}
-
-// DiscoveredServersHandler is an Option to set the new servers handler.
-func DiscoveredServersHandler(cb ConnHandler) Option {
- return func(o *Options) error {
- o.DiscoveredServersCB = cb
- return nil
- }
-}
-
-// ErrorHandler is an Option to set the async error handler.
-func ErrorHandler(cb ErrHandler) Option {
- return func(o *Options) error {
- o.AsyncErrorCB = cb
- return nil
- }
-}
-
-// UserInfo is an Option to set the username and password to
-// use when not included directly in the URLs.
-func UserInfo(user, password string) Option {
- return func(o *Options) error {
- o.User = user
- o.Password = password
- return nil
- }
-}
-
-// Token is an Option to set the token to use
-// when a token is not included directly in the URLs
-// and when a token handler is not provided.
-func Token(token string) Option {
- return func(o *Options) error {
- if o.TokenHandler != nil {
- return ErrTokenAlreadySet
- }
- o.Token = token
- return nil
- }
-}
-
-// TokenHandler is an Option to set the token handler to use
-// when a token is not included directly in the URLs
-// and when a token is not set.
-func TokenHandler(cb AuthTokenHandler) Option {
- return func(o *Options) error {
- if o.Token != "" {
- return ErrTokenAlreadySet
- }
- o.TokenHandler = cb
- return nil
- }
-}
-
-// UserCredentials is a convenience function that takes a filename
-// for a user's JWT and a filename for the user's private Nkey seed.
-func UserCredentials(userOrChainedFile string, seedFiles ...string) Option {
- userCB := func() (string, error) {
- return userFromFile(userOrChainedFile)
- }
- var keyFile string
- if len(seedFiles) > 0 {
- keyFile = seedFiles[0]
- } else {
- keyFile = userOrChainedFile
- }
- sigCB := func(nonce []byte) ([]byte, error) {
- return sigHandler(nonce, keyFile)
- }
- return UserJWT(userCB, sigCB)
-}
-
-// UserJWTAndSeed is a convenience function that takes the JWT and seed
-// values as strings.
-func UserJWTAndSeed(jwt string, seed string) Option {
- userCB := func() (string, error) {
- return jwt, nil
- }
-
- sigCB := func(nonce []byte) ([]byte, error) {
- kp, err := nkeys.FromSeed([]byte(seed))
- if err != nil {
- return nil, fmt.Errorf("unable to extract key pair from seed: %w", err)
- }
- // Wipe our key on exit.
- defer kp.Wipe()
-
- sig, _ := kp.Sign(nonce)
- return sig, nil
- }
-
- return UserJWT(userCB, sigCB)
-}
-
-// UserJWT will set the callbacks to retrieve the user's JWT and
-// the signature callback to sign the server nonce. This an the Nkey
-// option are mutually exclusive.
-func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option {
- return func(o *Options) error {
- if userCB == nil {
- return ErrNoUserCB
- }
- if sigCB == nil {
- return ErrUserButNoSigCB
- }
- // Smoke test the user callback to ensure it is setup properly
- // when processing options.
- if _, err := userCB(); err != nil {
- return err
- }
-
- o.UserJWT = userCB
- o.SignatureCB = sigCB
- return nil
- }
-}
-
-// Nkey will set the public Nkey and the signature callback to
-// sign the server nonce.
-func Nkey(pubKey string, sigCB SignatureHandler) Option {
- return func(o *Options) error {
- o.Nkey = pubKey
- o.SignatureCB = sigCB
- if pubKey != "" && sigCB == nil {
- return ErrNkeyButNoSigCB
- }
- return nil
- }
-}
-
-// SyncQueueLen will set the maximum queue len for the internal
-// channel used for SubscribeSync().
-// Defaults to 65536.
-func SyncQueueLen(max int) Option {
- return func(o *Options) error {
- o.SubChanLen = max
- return nil
- }
-}
-
-// Dialer is an Option to set the dialer which will be used when
-// attempting to establish a connection.
-// DEPRECATED: Should use CustomDialer instead.
-func Dialer(dialer *net.Dialer) Option {
- return func(o *Options) error {
- o.Dialer = dialer
- return nil
- }
-}
-
-// SetCustomDialer is an Option to set a custom dialer which will be
-// used when attempting to establish a connection. If both Dialer
-// and CustomDialer are specified, CustomDialer takes precedence.
-func SetCustomDialer(dialer CustomDialer) Option {
- return func(o *Options) error {
- o.CustomDialer = dialer
- return nil
- }
-}
-
-// UseOldRequestStyle is an Option to force usage of the old Request style.
-func UseOldRequestStyle() Option {
- return func(o *Options) error {
- o.UseOldRequestStyle = true
- return nil
- }
-}
-
-// NoCallbacksAfterClientClose is an Option to disable callbacks when user code
-// calls Close(). If close is initiated by any other condition, callbacks
-// if any will be invoked.
-func NoCallbacksAfterClientClose() Option {
- return func(o *Options) error {
- o.NoCallbacksAfterClientClose = true
- return nil
- }
-}
-
-// LameDuckModeHandler sets the callback to invoke when the server notifies
-// the connection that it entered lame duck mode, that is, going to
-// gradually disconnect all its connections before shutting down. This is
-// often used in deployments when upgrading NATS Servers.
-func LameDuckModeHandler(cb ConnHandler) Option {
- return func(o *Options) error {
- o.LameDuckModeHandler = cb
- return nil
- }
-}
-
-// RetryOnFailedConnect sets the connection in reconnecting state right away
-// if it can't connect to a server in the initial set.
-// See RetryOnFailedConnect option for more details.
-func RetryOnFailedConnect(retry bool) Option {
- return func(o *Options) error {
- o.RetryOnFailedConnect = retry
- return nil
- }
-}
-
-// Compression is an Option to indicate if this connection supports
-// compression. Currently only supported for Websocket connections.
-func Compression(enabled bool) Option {
- return func(o *Options) error {
- o.Compression = enabled
- return nil
- }
-}
-
-// ProxyPath is an option for websocket connections that adds a path to connections url.
-// This is useful when connecting to NATS behind a proxy.
-func ProxyPath(path string) Option {
- return func(o *Options) error {
- o.ProxyPath = path
- return nil
- }
-}
-
-// CustomInboxPrefix configures the request + reply inbox prefix
-func CustomInboxPrefix(p string) Option {
- return func(o *Options) error {
- if p == "" || strings.Contains(p, ">") || strings.Contains(p, "*") || strings.HasSuffix(p, ".") {
- return fmt.Errorf("nats: invalid custom prefix")
- }
- o.InboxPrefix = p
- return nil
- }
-}
-
-// IgnoreAuthErrorAbort opts out of the default connect behavior of aborting
-// subsequent reconnect attempts if server returns the same auth error twice.
-func IgnoreAuthErrorAbort() Option {
- return func(o *Options) error {
- o.IgnoreAuthErrorAbort = true
- return nil
- }
-}
-
-// SkipHostLookup is an Option to skip the host lookup when connecting to a server.
-func SkipHostLookup() Option {
- return func(o *Options) error {
- o.SkipHostLookup = true
- return nil
- }
-}
-
-// TLSHandshakeFirst is an Option to perform the TLS handshake first, that is
-// before receiving the INFO protocol. This requires the server to also be
-// configured with such option, otherwise the connection will fail.
-func TLSHandshakeFirst() Option {
- return func(o *Options) error {
- o.TLSHandshakeFirst = true
- o.Secure = true
- return nil
- }
-}
-
-// Handler processing
-
-// SetDisconnectHandler will set the disconnect event handler.
-// DEPRECATED: Use SetDisconnectErrHandler
-func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) {
- if nc == nil {
- return
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.Opts.DisconnectedCB = dcb
-}
-
-// SetDisconnectErrHandler will set the disconnect event handler.
-func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) {
- if nc == nil {
- return
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.Opts.DisconnectedErrCB = dcb
-}
-
-// DisconnectErrHandler will return the disconnect event handler.
-func (nc *Conn) DisconnectErrHandler() ConnErrHandler {
- if nc == nil {
- return nil
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- return nc.Opts.DisconnectedErrCB
-}
-
-// SetReconnectHandler will set the reconnect event handler.
-func (nc *Conn) SetReconnectHandler(rcb ConnHandler) {
- if nc == nil {
- return
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.Opts.ReconnectedCB = rcb
-}
-
-// ReconnectHandler will return the reconnect event handler.
-func (nc *Conn) ReconnectHandler() ConnHandler {
- if nc == nil {
- return nil
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- return nc.Opts.ReconnectedCB
-}
-
-// SetDiscoveredServersHandler will set the discovered servers handler.
-func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) {
- if nc == nil {
- return
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.Opts.DiscoveredServersCB = dscb
-}
-
-// DiscoveredServersHandler will return the discovered servers handler.
-func (nc *Conn) DiscoveredServersHandler() ConnHandler {
- if nc == nil {
- return nil
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- return nc.Opts.DiscoveredServersCB
-}
-
-// SetClosedHandler will set the closed event handler.
-func (nc *Conn) SetClosedHandler(cb ConnHandler) {
- if nc == nil {
- return
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.Opts.ClosedCB = cb
-}
-
-// ClosedHandler will return the closed event handler.
-func (nc *Conn) ClosedHandler() ConnHandler {
- if nc == nil {
- return nil
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- return nc.Opts.ClosedCB
-}
-
-// SetErrorHandler will set the async error handler.
-func (nc *Conn) SetErrorHandler(cb ErrHandler) {
- if nc == nil {
- return
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.Opts.AsyncErrorCB = cb
-}
-
-// ErrorHandler will return the async error handler.
-func (nc *Conn) ErrorHandler() ErrHandler {
- if nc == nil {
- return nil
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- return nc.Opts.AsyncErrorCB
-}
-
-// Process the url string argument to Connect.
-// Return an array of urls, even if only one.
-func processUrlString(url string) []string {
- urls := strings.Split(url, ",")
- var j int
- for _, s := range urls {
- u := strings.TrimSpace(s)
- if len(u) > 0 {
- urls[j] = u
- j++
- }
- }
- return urls[:j]
-}
-
-// Connect will attempt to connect to a NATS server with multiple options.
-func (o Options) Connect() (*Conn, error) {
- nc := &Conn{Opts: o}
-
- // Some default options processing.
- if nc.Opts.MaxPingsOut == 0 {
- nc.Opts.MaxPingsOut = DefaultMaxPingOut
- }
- // Allow old default for channel length to work correctly.
- if nc.Opts.SubChanLen == 0 {
- nc.Opts.SubChanLen = DefaultMaxChanLen
- }
- // Default ReconnectBufSize
- if nc.Opts.ReconnectBufSize == 0 {
- nc.Opts.ReconnectBufSize = DefaultReconnectBufSize
- }
- // Ensure that Timeout is not 0
- if nc.Opts.Timeout == 0 {
- nc.Opts.Timeout = DefaultTimeout
- }
-
- // Check first for user jwt callback being defined and nkey.
- if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" {
- return nil, ErrNkeyAndUser
- }
-
- // Check if we have an nkey but no signature callback defined.
- if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil {
- return nil, ErrNkeyButNoSigCB
- }
-
- // Allow custom Dialer for connecting using a timeout by default
- if nc.Opts.Dialer == nil {
- nc.Opts.Dialer = &net.Dialer{
- Timeout: nc.Opts.Timeout,
- }
- }
-
- // If the TLSHandshakeFirst option is specified, make sure that
- // the Secure boolean is true.
- if nc.Opts.TLSHandshakeFirst {
- nc.Opts.Secure = true
- }
-
- if err := nc.setupServerPool(); err != nil {
- return nil, err
- }
-
- // Create the async callback handler.
- nc.ach = &asyncCallbacksHandler{}
- nc.ach.cond = sync.NewCond(&nc.ach.mu)
-
- // Set a default error handler that will print to stderr.
- if nc.Opts.AsyncErrorCB == nil {
- nc.Opts.AsyncErrorCB = defaultErrHandler
- }
-
- // Create reader/writer
- nc.newReaderWriter()
-
- connectionEstablished, err := nc.connect()
- if err != nil {
- return nil, err
- }
-
- // Spin up the async cb dispatcher on success
- go nc.ach.asyncCBDispatcher()
-
- if connectionEstablished && nc.Opts.ConnectedCB != nil {
- nc.ach.push(func() { nc.Opts.ConnectedCB(nc) })
- }
-
- return nc, nil
-}
-
-func defaultErrHandler(nc *Conn, sub *Subscription, err error) {
- var cid uint64
- if nc != nil {
- nc.mu.RLock()
- cid = nc.info.CID
- nc.mu.RUnlock()
- }
- var errStr string
- if sub != nil {
- var subject string
- sub.mu.Lock()
- if sub.jsi != nil {
- subject = sub.jsi.psubj
- } else {
- subject = sub.Subject
- }
- sub.mu.Unlock()
- errStr = fmt.Sprintf("%s on connection [%d] for subscription on %q\n", err.Error(), cid, subject)
- } else {
- errStr = fmt.Sprintf("%s on connection [%d]\n", err.Error(), cid)
- }
- os.Stderr.WriteString(errStr)
-}
-
-const (
- _CRLF_ = "\r\n"
- _EMPTY_ = ""
- _SPC_ = " "
- _PUB_P_ = "PUB "
- _HPUB_P_ = "HPUB "
-)
-
-var _CRLF_BYTES_ = []byte(_CRLF_)
-
-const (
- _OK_OP_ = "+OK"
- _ERR_OP_ = "-ERR"
- _PONG_OP_ = "PONG"
- _INFO_OP_ = "INFO"
-)
-
-const (
- connectProto = "CONNECT %s" + _CRLF_
- pingProto = "PING" + _CRLF_
- pongProto = "PONG" + _CRLF_
- subProto = "SUB %s %s %d" + _CRLF_
- unsubProto = "UNSUB %d %s" + _CRLF_
- okProto = _OK_OP_ + _CRLF_
-)
-
-// Return the currently selected server
-func (nc *Conn) currentServer() (int, *srv) {
- for i, s := range nc.srvPool {
- if s == nil {
- continue
- }
- if s == nc.current {
- return i, s
- }
- }
- return -1, nil
-}
-
-// Pop the current server and put onto the end of the list. Select head of list as long
-// as number of reconnect attempts under MaxReconnect.
-func (nc *Conn) selectNextServer() (*srv, error) {
- i, s := nc.currentServer()
- if i < 0 {
- return nil, ErrNoServers
- }
- sp := nc.srvPool
- num := len(sp)
- copy(sp[i:num-1], sp[i+1:num])
- maxReconnect := nc.Opts.MaxReconnect
- if maxReconnect < 0 || s.reconnects < maxReconnect {
- nc.srvPool[num-1] = s
- } else {
- nc.srvPool = sp[0 : num-1]
- }
- if len(nc.srvPool) <= 0 {
- nc.current = nil
- return nil, ErrNoServers
- }
- nc.current = nc.srvPool[0]
- return nc.srvPool[0], nil
-}
-
-// Will assign the correct server to nc.current
-func (nc *Conn) pickServer() error {
- nc.current = nil
- if len(nc.srvPool) <= 0 {
- return ErrNoServers
- }
-
- for _, s := range nc.srvPool {
- if s != nil {
- nc.current = s
- return nil
- }
- }
- return ErrNoServers
-}
-
-const tlsScheme = "tls"
-
-// Create the server pool using the options given.
-// We will place a Url option first, followed by any
-// Server Options. We will randomize the server pool unless
-// the NoRandomize flag is set.
-func (nc *Conn) setupServerPool() error {
- nc.srvPool = make([]*srv, 0, srvPoolSize)
- nc.urls = make(map[string]struct{}, srvPoolSize)
-
- // Create srv objects from each url string in nc.Opts.Servers
- // and add them to the pool.
- for _, urlString := range nc.Opts.Servers {
- if err := nc.addURLToPool(urlString, false, false); err != nil {
- return err
- }
- }
-
- // Randomize if allowed to
- if !nc.Opts.NoRandomize {
- nc.shufflePool(0)
- }
-
- // Normally, if this one is set, Options.Servers should not be,
- // but we always allowed that, so continue to do so.
- if nc.Opts.Url != _EMPTY_ {
- // Add to the end of the array
- if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil {
- return err
- }
- // Then swap it with first to guarantee that Options.Url is tried first.
- last := len(nc.srvPool) - 1
- if last > 0 {
- nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0]
- }
- } else if len(nc.srvPool) <= 0 {
- // Place default URL if pool is empty.
- if err := nc.addURLToPool(DefaultURL, false, false); err != nil {
- return err
- }
- }
-
- // Check for Scheme hint to move to TLS mode.
- for _, srv := range nc.srvPool {
- if srv.url.Scheme == tlsScheme || srv.url.Scheme == wsSchemeTLS {
- // FIXME(dlc), this is for all in the pool, should be case by case.
- nc.Opts.Secure = true
- if nc.Opts.TLSConfig == nil {
- nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
- }
- }
- }
-
- return nc.pickServer()
-}
-
-// Helper function to return scheme
-func (nc *Conn) connScheme() string {
- if nc.ws {
- if nc.Opts.Secure {
- return wsSchemeTLS
- }
- return wsScheme
- }
- if nc.Opts.Secure {
- return tlsScheme
- }
- return "nats"
-}
-
-// Return true iff u.Hostname() is an IP address.
-func hostIsIP(u *url.URL) bool {
- return net.ParseIP(u.Hostname()) != nil
-}
-
-// addURLToPool adds an entry to the server pool
-func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error {
- if !strings.Contains(sURL, "://") {
- sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL)
- }
- var (
- u *url.URL
- err error
- )
- for i := 0; i < 2; i++ {
- u, err = url.Parse(sURL)
- if err != nil {
- return err
- }
- if u.Port() != "" {
- break
- }
- // In case given URL is of the form "localhost:", just add
- // the port number at the end, otherwise, add ":4222".
- if sURL[len(sURL)-1] != ':' {
- sURL += ":"
- }
- switch u.Scheme {
- case wsScheme:
- sURL += defaultWSPortString
- case wsSchemeTLS:
- sURL += defaultWSSPortString
- default:
- sURL += defaultPortString
- }
- }
-
- isWS := isWebsocketScheme(u)
- // We don't support mix and match of websocket and non websocket URLs.
- // If this is the first URL, then we accept and switch the global state
- // to websocket. After that, we will know how to reject mixed URLs.
- if len(nc.srvPool) == 0 {
- nc.ws = isWS
- } else if isWS && !nc.ws || !isWS && nc.ws {
- return fmt.Errorf("mixing of websocket and non websocket URLs is not allowed")
- }
-
- var tlsName string
- if implicit {
- curl := nc.current.url
- // Check to see if we do not have a url.User but current connected
- // url does. If so copy over.
- if u.User == nil && curl.User != nil {
- u.User = curl.User
- }
- // We are checking to see if we have a secure connection and are
- // adding an implicit server that just has an IP. If so we will remember
- // the current hostname we are connected to.
- if saveTLSName && hostIsIP(u) {
- tlsName = curl.Hostname()
- }
- }
-
- s := &srv{url: u, isImplicit: implicit, tlsName: tlsName}
- nc.srvPool = append(nc.srvPool, s)
- nc.urls[u.Host] = struct{}{}
- return nil
-}
-
-// shufflePool swaps randomly elements in the server pool
-// The `offset` value indicates that the shuffling should start at
-// this offset and leave the elements from [0..offset) intact.
-func (nc *Conn) shufflePool(offset int) {
- if len(nc.srvPool) <= offset+1 {
- return
- }
- source := rand.NewSource(time.Now().UnixNano())
- r := rand.New(source)
- for i := offset; i < len(nc.srvPool); i++ {
- j := offset + r.Intn(i+1-offset)
- nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i]
- }
-}
-
-func (nc *Conn) newReaderWriter() {
- nc.br = &natsReader{
- buf: make([]byte, defaultBufSize),
- off: -1,
- }
- nc.bw = &natsWriter{
- limit: defaultBufSize,
- plimit: nc.Opts.ReconnectBufSize,
- }
-}
-
-func (nc *Conn) bindToNewConn() {
- bw := nc.bw
- bw.w, bw.bufs = nc.newWriter(), nil
- br := nc.br
- br.r, br.n, br.off = nc.conn, 0, -1
-}
-
-func (nc *Conn) newWriter() io.Writer {
- var w io.Writer = nc.conn
- if nc.Opts.FlusherTimeout > 0 {
- w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout}
- }
- return w
-}
-
-func (w *natsWriter) appendString(str string) error {
- return w.appendBufs([]byte(str))
-}
-
-func (w *natsWriter) appendBufs(bufs ...[]byte) error {
- for _, buf := range bufs {
- if len(buf) == 0 {
- continue
- }
- if w.pending != nil {
- w.pending.Write(buf)
- } else {
- w.bufs = append(w.bufs, buf...)
- }
- }
- if w.pending == nil && len(w.bufs) >= w.limit {
- return w.flush()
- }
- return nil
-}
-
-func (w *natsWriter) writeDirect(strs ...string) error {
- for _, str := range strs {
- if _, err := w.w.Write([]byte(str)); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (w *natsWriter) flush() error {
- // If a pending buffer is set, we don't flush. Code that needs to
- // write directly to the socket, by-passing buffers during (re)connect,
- // will use the writeDirect() API.
- if w.pending != nil {
- return nil
- }
- // Do not skip calling w.w.Write() here if len(w.bufs) is 0 because
- // the actual writer (if websocket for instance) may have things
- // to do such as sending control frames, etc..
- _, err := w.w.Write(w.bufs)
- w.bufs = w.bufs[:0]
- return err
-}
-
-func (w *natsWriter) buffered() int {
- if w.pending != nil {
- return w.pending.Len()
- }
- return len(w.bufs)
-}
-
-func (w *natsWriter) switchToPending() {
- w.pending = new(bytes.Buffer)
-}
-
-func (w *natsWriter) flushPendingBuffer() error {
- if w.pending == nil || w.pending.Len() == 0 {
- return nil
- }
- _, err := w.w.Write(w.pending.Bytes())
- // Reset the pending buffer at this point because we don't want
- // to take the risk of sending duplicates or partials.
- w.pending.Reset()
- return err
-}
-
-func (w *natsWriter) atLimitIfUsingPending() bool {
- if w.pending == nil {
- return false
- }
- return w.pending.Len() >= w.plimit
-}
-
-func (w *natsWriter) doneWithPending() {
- w.pending = nil
-}
-
-// Notify the reader that we are done with the connect, where "read" operations
-// happen synchronously and under the connection lock. After this point, "read"
-// will be happening from the read loop, without the connection lock.
-//
-// Note: this runs under the connection lock.
-func (r *natsReader) doneWithConnect() {
- if wsr, ok := r.r.(*websocketReader); ok {
- wsr.doneWithConnect()
- }
-}
-
-func (r *natsReader) Read() ([]byte, error) {
- if r.off >= 0 {
- off := r.off
- r.off = -1
- return r.buf[off:r.n], nil
- }
- var err error
- r.n, err = r.r.Read(r.buf)
- return r.buf[:r.n], err
-}
-
-func (r *natsReader) ReadString(delim byte) (string, error) {
- var s string
-build_string:
- // First look if we have something in the buffer
- if r.off >= 0 {
- i := bytes.IndexByte(r.buf[r.off:r.n], delim)
- if i >= 0 {
- end := r.off + i + 1
- s += string(r.buf[r.off:end])
- r.off = end
- if r.off >= r.n {
- r.off = -1
- }
- return s, nil
- }
- // We did not find the delim, so will have to read more.
- s += string(r.buf[r.off:r.n])
- r.off = -1
- }
- if _, err := r.Read(); err != nil {
- return s, err
- }
- r.off = 0
- goto build_string
-}
-
-// createConn will connect to the server and wrap the appropriate
-// bufio structures. It will do the right thing when an existing
-// connection is in place.
-func (nc *Conn) createConn() (err error) {
- if nc.Opts.Timeout < 0 {
- return ErrBadTimeout
- }
- if _, cur := nc.currentServer(); cur == nil {
- return ErrNoServers
- }
-
- // If we have a reference to an in-process server then establish a
- // connection using that.
- if nc.Opts.InProcessServer != nil {
- conn, err := nc.Opts.InProcessServer.InProcessConn()
- if err != nil {
- return fmt.Errorf("failed to get in-process connection: %w", err)
- }
- nc.conn = conn
- nc.bindToNewConn()
- return nil
- }
-
- // We will auto-expand host names if they resolve to multiple IPs
- hosts := []string{}
- u := nc.current.url
-
- if !nc.Opts.SkipHostLookup && net.ParseIP(u.Hostname()) == nil {
- addrs, _ := net.LookupHost(u.Hostname())
- for _, addr := range addrs {
- hosts = append(hosts, net.JoinHostPort(addr, u.Port()))
- }
- }
- // Fall back to what we were given.
- if len(hosts) == 0 {
- hosts = append(hosts, u.Host)
- }
-
- // CustomDialer takes precedence. If not set, use Opts.Dialer which
- // is set to a default *net.Dialer (in Connect()) if not explicitly
- // set by the user.
- dialer := nc.Opts.CustomDialer
- if dialer == nil {
- // We will copy and shorten the timeout if we have multiple hosts to try.
- copyDialer := *nc.Opts.Dialer
- copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts))
- dialer = ©Dialer
- }
-
- if len(hosts) > 1 && !nc.Opts.NoRandomize {
- rand.Shuffle(len(hosts), func(i, j int) {
- hosts[i], hosts[j] = hosts[j], hosts[i]
- })
- }
- for _, host := range hosts {
- nc.conn, err = dialer.Dial("tcp", host)
- if err == nil {
- break
- }
- }
- if err != nil {
- return err
- }
-
- // If scheme starts with "ws" then branch out to websocket code.
- if isWebsocketScheme(u) {
- return nc.wsInitHandshake(u)
- }
-
- // Reset reader/writer to this new TCP connection
- nc.bindToNewConn()
- return nil
-}
-
-type skipTLSDialer interface {
- SkipTLSHandshake() bool
-}
-
-// makeTLSConn will wrap an existing Conn using TLS
-func (nc *Conn) makeTLSConn() error {
- if nc.Opts.CustomDialer != nil {
- // we do nothing when asked to skip the TLS wrapper
- sd, ok := nc.Opts.CustomDialer.(skipTLSDialer)
- if ok && sd.SkipTLSHandshake() {
- return nil
- }
- }
- // Allow the user to configure their own tls.Config structure.
- tlsCopy := &tls.Config{}
- if nc.Opts.TLSConfig != nil {
- tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig)
- }
- if nc.Opts.TLSCertCB != nil {
- cert, err := nc.Opts.TLSCertCB()
- if err != nil {
- return err
- }
- tlsCopy.Certificates = []tls.Certificate{cert}
- }
- if nc.Opts.RootCAsCB != nil {
- rootCAs, err := nc.Opts.RootCAsCB()
- if err != nil {
- return err
- }
- tlsCopy.RootCAs = rootCAs
- }
- // If its blank we will override it with the current host
- if tlsCopy.ServerName == _EMPTY_ {
- if nc.current.tlsName != _EMPTY_ {
- tlsCopy.ServerName = nc.current.tlsName
- } else {
- h, _, _ := net.SplitHostPort(nc.current.url.Host)
- tlsCopy.ServerName = h
- }
- }
- nc.conn = tls.Client(nc.conn, tlsCopy)
- conn := nc.conn.(*tls.Conn)
- if err := conn.Handshake(); err != nil {
- return err
- }
- nc.bindToNewConn()
- return nil
-}
-
-// TLSConnectionState retrieves the state of the TLS connection to the server
-func (nc *Conn) TLSConnectionState() (tls.ConnectionState, error) {
- if !nc.isConnected() {
- return tls.ConnectionState{}, ErrDisconnected
- }
-
- nc.mu.RLock()
- conn := nc.conn
- nc.mu.RUnlock()
-
- tc, ok := conn.(*tls.Conn)
- if !ok {
- return tls.ConnectionState{}, ErrConnectionNotTLS
- }
-
- return tc.ConnectionState(), nil
-}
-
-// waitForExits will wait for all socket watcher Go routines to
-// be shutdown before proceeding.
-func (nc *Conn) waitForExits() {
- // Kick old flusher forcefully.
- select {
- case nc.fch <- struct{}{}:
- default:
- }
-
- // Wait for any previous go routines.
- nc.wg.Wait()
-}
-
-// ForceReconnect forces a reconnect attempt to the server.
-// This is a non-blocking call and will start the reconnect
-// process without waiting for it to complete.
-//
-// If the connection is already in the process of reconnecting,
-// this call will force an immediate reconnect attempt (bypassing
-// the current reconnect delay).
-func (nc *Conn) ForceReconnect() error {
- nc.mu.Lock()
- defer nc.mu.Unlock()
-
- if nc.isClosed() {
- return ErrConnectionClosed
- }
- if nc.isReconnecting() {
- // if we're already reconnecting, force a reconnect attempt
- // even if we're in the middle of a backoff
- if nc.rqch != nil {
- close(nc.rqch)
- }
- return nil
- }
-
- // Clear any queued pongs
- nc.clearPendingFlushCalls()
-
- // Clear any queued and blocking requests.
- nc.clearPendingRequestCalls()
-
- // Stop ping timer if set.
- nc.stopPingTimer()
-
- // Go ahead and make sure we have flushed the outbound
- nc.bw.flush()
- nc.conn.Close()
-
- nc.changeConnStatus(RECONNECTING)
- go nc.doReconnect(nil, true)
- return nil
-}
-
-// ConnectedUrl reports the connected server's URL
-func (nc *Conn) ConnectedUrl() string {
- if nc == nil {
- return _EMPTY_
- }
-
- nc.mu.RLock()
- defer nc.mu.RUnlock()
-
- if nc.status != CONNECTED {
- return _EMPTY_
- }
- return nc.current.url.String()
-}
-
-// ConnectedUrlRedacted reports the connected server's URL with passwords redacted
-func (nc *Conn) ConnectedUrlRedacted() string {
- if nc == nil {
- return _EMPTY_
- }
-
- nc.mu.RLock()
- defer nc.mu.RUnlock()
-
- if nc.status != CONNECTED {
- return _EMPTY_
- }
- return nc.current.url.Redacted()
-}
-
-// ConnectedAddr returns the connected server's IP
-func (nc *Conn) ConnectedAddr() string {
- if nc == nil {
- return _EMPTY_
- }
-
- nc.mu.RLock()
- defer nc.mu.RUnlock()
-
- if nc.status != CONNECTED {
- return _EMPTY_
- }
- return nc.conn.RemoteAddr().String()
-}
-
-// ConnectedServerId reports the connected server's Id
-func (nc *Conn) ConnectedServerId() string {
- if nc == nil {
- return _EMPTY_
- }
-
- nc.mu.RLock()
- defer nc.mu.RUnlock()
-
- if nc.status != CONNECTED {
- return _EMPTY_
- }
- return nc.info.ID
-}
-
-// ConnectedServerName reports the connected server's name
-func (nc *Conn) ConnectedServerName() string {
- if nc == nil {
- return _EMPTY_
- }
-
- nc.mu.RLock()
- defer nc.mu.RUnlock()
-
- if nc.status != CONNECTED {
- return _EMPTY_
- }
- return nc.info.Name
-}
-
-var semVerRe = regexp.MustCompile(`\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?`)
-
-func versionComponents(version string) (major, minor, patch int, err error) {
- m := semVerRe.FindStringSubmatch(version)
- if m == nil {
- return 0, 0, 0, errors.New("invalid semver")
- }
- major, err = strconv.Atoi(m[1])
- if err != nil {
- return -1, -1, -1, err
- }
- minor, err = strconv.Atoi(m[2])
- if err != nil {
- return -1, -1, -1, err
- }
- patch, err = strconv.Atoi(m[3])
- if err != nil {
- return -1, -1, -1, err
- }
- return major, minor, patch, err
-}
-
-// Check for minimum server requirement.
-func (nc *Conn) serverMinVersion(major, minor, patch int) bool {
- smajor, sminor, spatch, _ := versionComponents(nc.ConnectedServerVersion())
- if smajor < major || (smajor == major && sminor < minor) || (smajor == major && sminor == minor && spatch < patch) {
- return false
- }
- return true
-}
-
-// ConnectedServerVersion reports the connected server's version as a string
-func (nc *Conn) ConnectedServerVersion() string {
- if nc == nil {
- return _EMPTY_
- }
-
- nc.mu.RLock()
- defer nc.mu.RUnlock()
-
- if nc.status != CONNECTED {
- return _EMPTY_
- }
- return nc.info.Version
-}
-
-// ConnectedClusterName reports the connected server's cluster name if any
-func (nc *Conn) ConnectedClusterName() string {
- if nc == nil {
- return _EMPTY_
- }
-
- nc.mu.RLock()
- defer nc.mu.RUnlock()
-
- if nc.status != CONNECTED {
- return _EMPTY_
- }
- return nc.info.Cluster
-}
-
-// Low level setup for structs, etc
-func (nc *Conn) setup() {
- nc.subs = make(map[int64]*Subscription)
- nc.pongs = make([]chan struct{}, 0, 8)
-
- nc.fch = make(chan struct{}, flushChanSize)
- nc.rqch = make(chan struct{})
-
- // Setup scratch outbound buffer for PUB/HPUB
- pub := nc.scratch[:len(_HPUB_P_)]
- copy(pub, _HPUB_P_)
-}
-
-// Process a connected connection and initialize properly.
-func (nc *Conn) processConnectInit() error {
-
- // Set our deadline for the whole connect process
- nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout))
- defer nc.conn.SetDeadline(time.Time{})
-
- // Set our status to connecting.
- nc.changeConnStatus(CONNECTING)
-
- // If we need to have a TLS connection and want the TLS handshake to occur
- // first, do it now.
- if nc.Opts.Secure && nc.Opts.TLSHandshakeFirst {
- if err := nc.makeTLSConn(); err != nil {
- return err
- }
- }
-
- // Process the INFO protocol received from the server
- err := nc.processExpectedInfo()
- if err != nil {
- return err
- }
-
- // Send the CONNECT protocol along with the initial PING protocol.
- // Wait for the PONG response (or any error that we get from the server).
- err = nc.sendConnect()
- if err != nil {
- return err
- }
-
- // Reset the number of PING sent out
- nc.pout = 0
-
- // Start or reset Timer
- if nc.Opts.PingInterval > 0 {
- if nc.ptmr == nil {
- nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer)
- } else {
- nc.ptmr.Reset(nc.Opts.PingInterval)
- }
- }
-
- // Start the readLoop and flusher go routines, we will wait on both on a reconnect event.
- nc.wg.Add(2)
- go nc.readLoop()
- go nc.flusher()
-
- // Notify the reader that we are done with the connect handshake, where
- // reads were done synchronously and under the connection lock.
- nc.br.doneWithConnect()
-
- return nil
-}
-
-// Main connect function. Will connect to the nats-server.
-func (nc *Conn) connect() (bool, error) {
- var err error
- var connectionEstablished bool
-
- // Create actual socket connection
- // For first connect we walk all servers in the pool and try
- // to connect immediately.
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.initc = true
- // The pool may change inside the loop iteration due to INFO protocol.
- for i := 0; i < len(nc.srvPool); i++ {
- nc.current = nc.srvPool[i]
-
- if err = nc.createConn(); err == nil {
- // This was moved out of processConnectInit() because
- // that function is now invoked from doReconnect() too.
- nc.setup()
-
- err = nc.processConnectInit()
-
- if err == nil {
- nc.current.didConnect = true
- nc.current.reconnects = 0
- nc.current.lastErr = nil
- break
- } else {
- nc.mu.Unlock()
- nc.close(DISCONNECTED, false, err)
- nc.mu.Lock()
- // Do not reset nc.current here since it would prevent
- // RetryOnFailedConnect to work should this be the last server
- // to try before starting doReconnect().
- }
- } else {
- // Cancel out default connection refused, will trigger the
- // No servers error conditional
- if strings.Contains(err.Error(), "connection refused") {
- err = nil
- }
- }
- }
-
- if err == nil && nc.status != CONNECTED {
- err = ErrNoServers
- }
-
- if err == nil {
- connectionEstablished = true
- nc.initc = false
- } else if nc.Opts.RetryOnFailedConnect {
- nc.setup()
- nc.changeConnStatus(RECONNECTING)
- nc.bw.switchToPending()
- go nc.doReconnect(ErrNoServers, false)
- err = nil
- } else {
- nc.current = nil
- }
-
- return connectionEstablished, err
-}
-
-// This will check to see if the connection should be
-// secure. This can be dictated from either end and should
-// only be called after the INIT protocol has been received.
-func (nc *Conn) checkForSecure() error {
- // Check to see if we need to engage TLS
- o := nc.Opts
-
- // Check for mismatch in setups
- if o.Secure && !nc.info.TLSRequired && !nc.info.TLSAvailable {
- return ErrSecureConnWanted
- } else if nc.info.TLSRequired && !o.Secure {
- // Switch to Secure since server needs TLS.
- o.Secure = true
- }
-
- if o.Secure {
- // If TLS handshake first is true, we have already done
- // the handshake, so we are done here.
- if o.TLSHandshakeFirst {
- return nil
- }
- // Need to rewrap with bufio
- if err := nc.makeTLSConn(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// processExpectedInfo will look for the expected first INFO message
-// sent when a connection is established. The lock should be held entering.
-func (nc *Conn) processExpectedInfo() error {
-
- c := &control{}
-
- // Read the protocol
- err := nc.readOp(c)
- if err != nil {
- return err
- }
-
- // The nats protocol should send INFO first always.
- if c.op != _INFO_OP_ {
- return ErrNoInfoReceived
- }
-
- // Parse the protocol
- if err := nc.processInfo(c.args); err != nil {
- return err
- }
-
- if nc.Opts.Nkey != "" && nc.info.Nonce == "" {
- return ErrNkeysNotSupported
- }
-
- // For websocket connections, we already switched to TLS if need be,
- // so we are done here.
- if nc.ws {
- return nil
- }
-
- return nc.checkForSecure()
-}
-
-// Sends a protocol control message by queuing into the bufio writer
-// and kicking the flush Go routine. These writes are protected.
-func (nc *Conn) sendProto(proto string) {
- nc.mu.Lock()
- nc.bw.appendString(proto)
- nc.kickFlusher()
- nc.mu.Unlock()
-}
-
-// Generate a connect protocol message, issuing user/password if
-// applicable. The lock is assumed to be held upon entering.
-func (nc *Conn) connectProto() (string, error) {
- o := nc.Opts
- var nkey, sig, user, pass, token, ujwt string
- u := nc.current.url.User
- if u != nil {
- // if no password, assume username is authToken
- if _, ok := u.Password(); !ok {
- token = u.Username()
- } else {
- user = u.Username()
- pass, _ = u.Password()
- }
- } else {
- // Take from options (possibly all empty strings)
- user = o.User
- pass = o.Password
- token = o.Token
- nkey = o.Nkey
- }
-
- // Look for user jwt.
- if o.UserJWT != nil {
- if jwt, err := o.UserJWT(); err != nil {
- return _EMPTY_, err
- } else {
- ujwt = jwt
- }
- if nkey != _EMPTY_ {
- return _EMPTY_, ErrNkeyAndUser
- }
- }
-
- if ujwt != _EMPTY_ || nkey != _EMPTY_ {
- if o.SignatureCB == nil {
- if ujwt == _EMPTY_ {
- return _EMPTY_, ErrNkeyButNoSigCB
- }
- return _EMPTY_, ErrUserButNoSigCB
- }
- sigraw, err := o.SignatureCB([]byte(nc.info.Nonce))
- if err != nil {
- return _EMPTY_, fmt.Errorf("error signing nonce: %w", err)
- }
- sig = base64.RawURLEncoding.EncodeToString(sigraw)
- }
-
- if nc.Opts.TokenHandler != nil {
- if token != _EMPTY_ {
- return _EMPTY_, ErrTokenAlreadySet
- }
- token = nc.Opts.TokenHandler()
- }
-
- // If our server does not support headers then we can't do them or no responders.
- hdrs := nc.info.Headers
- cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token,
- o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs}
-
- b, err := json.Marshal(cinfo)
- if err != nil {
- return _EMPTY_, ErrJsonParse
- }
-
- // Check if NoEcho is set and we have a server that supports it.
- if o.NoEcho && nc.info.Proto < 1 {
- return _EMPTY_, ErrNoEchoNotSupported
- }
-
- return fmt.Sprintf(connectProto, b), nil
-}
-
-// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes.
-func normalizeErr(line string) string {
- s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_))
- s = strings.TrimLeft(strings.TrimRight(s, "'"), "'")
- return s
-}
-
-// natsProtoErr represents an -ERR protocol message sent by the server.
-type natsProtoErr struct {
- description string
-}
-
-func (nerr *natsProtoErr) Error() string {
- return fmt.Sprintf("nats: %s", nerr.description)
-}
-
-func (nerr *natsProtoErr) Is(err error) bool {
- return strings.ToLower(nerr.Error()) == err.Error()
-}
-
-// Send a connect protocol message to the server, issue user/password if
-// applicable. Will wait for a flush to return from the server for error
-// processing.
-func (nc *Conn) sendConnect() error {
- // Construct the CONNECT protocol string
- cProto, err := nc.connectProto()
- if err != nil {
- if !nc.initc && nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) })
- }
- return err
- }
-
- // Write the protocol and PING directly to the underlying writer.
- if err := nc.bw.writeDirect(cProto, pingProto); err != nil {
- return err
- }
-
- // We don't want to read more than we need here, otherwise
- // we would need to transfer the excess read data to the readLoop.
- // Since in normal situations we just are looking for a PONG\r\n,
- // reading byte-by-byte here is ok.
- proto, err := nc.readProto()
- if err != nil {
- if !nc.initc && nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) })
- }
- return err
- }
-
- // If opts.Verbose is set, handle +OK
- if nc.Opts.Verbose && proto == okProto {
- // Read the rest now...
- proto, err = nc.readProto()
- if err != nil {
- if !nc.initc && nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) })
- }
- return err
- }
- }
-
- // We expect a PONG
- if proto != pongProto {
- // But it could be something else, like -ERR
-
- // Since we no longer use ReadLine(), trim the trailing "\r\n"
- proto = strings.TrimRight(proto, "\r\n")
-
- // If it's a server error...
- if strings.HasPrefix(proto, _ERR_OP_) {
- // Remove -ERR, trim spaces and quotes, and convert to lower case.
- proto = normalizeErr(proto)
-
- // Check if this is an auth error
- if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil {
- // This will schedule an async error if we are in reconnect,
- // and keep track of the auth error for the current server.
- // If we have got the same error twice, this sets nc.ar to true to
- // indicate that the reconnect should be aborted (will be checked
- // in doReconnect()).
- nc.processAuthError(authErr)
- }
- return &natsProtoErr{proto}
- }
-
- // Notify that we got an unexpected protocol.
- return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto)
- }
-
- // This is where we are truly connected.
- nc.changeConnStatus(CONNECTED)
-
- return nil
-}
-
-// reads a protocol line.
-func (nc *Conn) readProto() (string, error) {
- return nc.br.ReadString('\n')
-}
-
-// A control protocol line.
-type control struct {
- op, args string
-}
-
-// Read a control line and process the intended op.
-func (nc *Conn) readOp(c *control) error {
- line, err := nc.readProto()
- if err != nil {
- return err
- }
- parseControl(line, c)
- return nil
-}
-
-// Parse a control line from the server.
-func parseControl(line string, c *control) {
- toks := strings.SplitN(line, _SPC_, 2)
- if len(toks) == 1 {
- c.op = strings.TrimSpace(toks[0])
- c.args = _EMPTY_
- } else if len(toks) == 2 {
- c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
- } else {
- c.op = _EMPTY_
- }
-}
-
-// flushReconnectPendingItems will push the pending items that were
-// gathered while we were in a RECONNECTING state to the socket.
-func (nc *Conn) flushReconnectPendingItems() error {
- return nc.bw.flushPendingBuffer()
-}
-
-// Stops the ping timer if set.
-// Connection lock is held on entry.
-func (nc *Conn) stopPingTimer() {
- if nc.ptmr != nil {
- nc.ptmr.Stop()
- }
-}
-
-// Try to reconnect using the option parameters.
-// This function assumes we are allowed to reconnect.
-func (nc *Conn) doReconnect(err error, forceReconnect bool) {
- // We want to make sure we have the other watchers shutdown properly
- // here before we proceed past this point.
- nc.waitForExits()
-
- // FIXME(dlc) - We have an issue here if we have
- // outstanding flush points (pongs) and they were not
- // sent out, but are still in the pipe.
-
- // Hold the lock manually and release where needed below,
- // can't do defer here.
- nc.mu.Lock()
-
- // Clear any errors.
- nc.err = nil
- // Perform appropriate callback if needed for a disconnect.
- // DisconnectedErrCB has priority over deprecated DisconnectedCB
- if !nc.initc {
- if nc.Opts.DisconnectedErrCB != nil {
- nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) })
- } else if nc.Opts.DisconnectedCB != nil {
- nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) })
- }
- }
-
- // This is used to wait on go routines exit if we start them in the loop
- // but an error occurs after that.
- waitForGoRoutines := false
- var rt *time.Timer
- // Channel used to kick routine out of sleep when conn is closed.
- rqch := nc.rqch
- // Counter that is increased when the whole list of servers has been tried.
- var wlf int
-
- var jitter time.Duration
- var rw time.Duration
- // If a custom reconnect delay handler is set, this takes precedence.
- crd := nc.Opts.CustomReconnectDelayCB
- if crd == nil {
- rw = nc.Opts.ReconnectWait
- // TODO: since we sleep only after the whole list has been tried, we can't
- // rely on individual *srv to know if it is a TLS or non-TLS url.
- // We have to pick which type of jitter to use, for now, we use these hints:
- jitter = nc.Opts.ReconnectJitter
- if nc.Opts.Secure || nc.Opts.TLSConfig != nil {
- jitter = nc.Opts.ReconnectJitterTLS
- }
- }
-
- for i := 0; len(nc.srvPool) > 0; {
- cur, err := nc.selectNextServer()
- if err != nil {
- nc.err = err
- break
- }
-
- doSleep := i+1 >= len(nc.srvPool) && !forceReconnect
- forceReconnect = false
- nc.mu.Unlock()
-
- if !doSleep {
- i++
- // Release the lock to give a chance to a concurrent nc.Close() to break the loop.
- runtime.Gosched()
- } else {
- i = 0
- var st time.Duration
- if crd != nil {
- wlf++
- st = crd(wlf)
- } else {
- st = rw
- if jitter > 0 {
- st += time.Duration(rand.Int63n(int64(jitter)))
- }
- }
- if rt == nil {
- rt = time.NewTimer(st)
- } else {
- rt.Reset(st)
- }
- select {
- case <-rqch:
- rt.Stop()
-
- // we need to reset the rqch channel to avoid
- // closing a closed channel in the next iteration
- nc.mu.Lock()
- nc.rqch = make(chan struct{})
- nc.mu.Unlock()
- case <-rt.C:
- }
- }
- // If the readLoop, etc.. go routines were started, wait for them to complete.
- if waitForGoRoutines {
- nc.waitForExits()
- waitForGoRoutines = false
- }
- nc.mu.Lock()
-
- // Check if we have been closed first.
- if nc.isClosed() {
- break
- }
-
- // Mark that we tried a reconnect
- cur.reconnects++
-
- // Try to create a new connection
- err = nc.createConn()
-
- // Not yet connected, retry...
- // Continue to hold the lock
- if err != nil {
- nc.err = nil
- continue
- }
-
- // We are reconnected
- nc.Reconnects++
-
- // Process connect logic
- if nc.err = nc.processConnectInit(); nc.err != nil {
- // Check if we should abort reconnect. If so, break out
- // of the loop and connection will be closed.
- if nc.ar {
- break
- }
- nc.changeConnStatus(RECONNECTING)
- continue
- }
-
- // Clear possible lastErr under the connection lock after
- // a successful processConnectInit().
- nc.current.lastErr = nil
-
- // Clear out server stats for the server we connected to..
- cur.didConnect = true
- cur.reconnects = 0
-
- // Send existing subscription state
- nc.resendSubscriptions()
-
- // Now send off and clear pending buffer
- nc.err = nc.flushReconnectPendingItems()
- if nc.err != nil {
- nc.changeConnStatus(RECONNECTING)
- // Stop the ping timer (if set)
- nc.stopPingTimer()
- // Since processConnectInit() returned without error, the
- // go routines were started, so wait for them to return
- // on the next iteration (after releasing the lock).
- waitForGoRoutines = true
- continue
- }
-
- // Done with the pending buffer
- nc.bw.doneWithPending()
-
- // Queue up the correct callback. If we are in initial connect state
- // (using retry on failed connect), we will call the ConnectedCB,
- // otherwise the ReconnectedCB.
- if nc.Opts.ReconnectedCB != nil && !nc.initc {
- nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) })
- } else if nc.Opts.ConnectedCB != nil && nc.initc {
- nc.ach.push(func() { nc.Opts.ConnectedCB(nc) })
- }
-
- // If we are here with a retry on failed connect, indicate that the
- // initial connect is now complete.
- nc.initc = false
-
- // Release lock here, we will return below.
- nc.mu.Unlock()
-
- // Make sure to flush everything
- nc.Flush()
-
- return
- }
-
- // Call into close.. We have no servers left..
- if nc.err == nil {
- nc.err = ErrNoServers
- }
- nc.mu.Unlock()
- nc.close(CLOSED, true, nil)
-}
-
-// processOpErr handles errors from reading or parsing the protocol.
-// The lock should not be held entering this function.
-func (nc *Conn) processOpErr(err error) {
- nc.mu.Lock()
- if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() {
- nc.mu.Unlock()
- return
- }
-
- if nc.Opts.AllowReconnect && nc.status == CONNECTED {
- // Set our new status
- nc.changeConnStatus(RECONNECTING)
- // Stop ping timer if set
- nc.stopPingTimer()
- if nc.conn != nil {
- nc.conn.Close()
- nc.conn = nil
- }
-
- // Create pending buffer before reconnecting.
- nc.bw.switchToPending()
-
- // Clear any queued pongs, e.g. pending flush calls.
- nc.clearPendingFlushCalls()
-
- go nc.doReconnect(err, false)
- nc.mu.Unlock()
- return
- }
-
- nc.changeConnStatus(DISCONNECTED)
- nc.err = err
- nc.mu.Unlock()
- nc.close(CLOSED, true, nil)
-}
-
-// dispatch is responsible for calling any async callbacks
-func (ac *asyncCallbacksHandler) asyncCBDispatcher() {
- for {
- ac.mu.Lock()
- // Protect for spurious wakeups. We should get out of the
- // wait only if there is an element to pop from the list.
- for ac.head == nil {
- ac.cond.Wait()
- }
- cur := ac.head
- ac.head = cur.next
- if cur == ac.tail {
- ac.tail = nil
- }
- ac.mu.Unlock()
-
- // This signals that the dispatcher has been closed and all
- // previous callbacks have been dispatched.
- if cur.f == nil {
- return
- }
- // Invoke callback outside of handler's lock
- cur.f()
- }
-}
-
-// Add the given function to the tail of the list and
-// signals the dispatcher.
-func (ac *asyncCallbacksHandler) push(f func()) {
- ac.pushOrClose(f, false)
-}
-
-// Signals that we are closing...
-func (ac *asyncCallbacksHandler) close() {
- ac.pushOrClose(nil, true)
-}
-
-// Add the given function to the tail of the list and
-// signals the dispatcher.
-func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- // Make sure that library is not calling push with nil function,
- // since this is used to notify the dispatcher that it should stop.
- if !close && f == nil {
- panic("pushing a nil callback")
- }
- cb := &asyncCB{f: f}
- if ac.tail != nil {
- ac.tail.next = cb
- } else {
- ac.head = cb
- }
- ac.tail = cb
- if close {
- ac.cond.Broadcast()
- } else {
- ac.cond.Signal()
- }
-}
-
-// readLoop() will sit on the socket reading and processing the
-// protocol from the server. It will dispatch appropriately based
-// on the op type.
-func (nc *Conn) readLoop() {
- // Release the wait group on exit
- defer nc.wg.Done()
-
- // Create a parseState if needed.
- nc.mu.Lock()
- if nc.ps == nil {
- nc.ps = &parseState{}
- }
- conn := nc.conn
- br := nc.br
- nc.mu.Unlock()
-
- if conn == nil {
- return
- }
-
- for {
- buf, err := br.Read()
- if err == nil {
- // With websocket, it is possible that there is no error but
- // also no buffer returned (either WS control message or read of a
- // partial compressed message). We could call parse(buf) which
- // would ignore an empty buffer, but simply go back to top of the loop.
- if len(buf) == 0 {
- continue
- }
- err = nc.parse(buf)
- }
- if err != nil {
- nc.processOpErr(err)
- break
- }
- }
- // Clear the parseState here..
- nc.mu.Lock()
- nc.ps = nil
- nc.mu.Unlock()
-}
-
-// waitForMsgs waits on the conditional shared with readLoop and processMsg.
-// It is used to deliver messages to asynchronous subscribers.
-func (nc *Conn) waitForMsgs(s *Subscription) {
- var closed bool
- var delivered, max uint64
-
- // Used to account for adjustments to sub.pBytes when we wrap back around.
- msgLen := -1
-
- for {
- s.mu.Lock()
- // Do accounting for last msg delivered here so we only lock once
- // and drain state trips after callback has returned.
- if msgLen >= 0 {
- s.pMsgs--
- s.pBytes -= msgLen
- msgLen = -1
- }
-
- if s.pHead == nil && !s.closed {
- s.pCond.Wait()
- }
- // Pop the msg off the list
- m := s.pHead
- if m != nil {
- s.pHead = m.next
- if s.pHead == nil {
- s.pTail = nil
- }
- if m.barrier != nil {
- s.mu.Unlock()
- if atomic.AddInt64(&m.barrier.refs, -1) == 0 {
- m.barrier.f()
- }
- continue
- }
- msgLen = len(m.Data)
- }
- mcb := s.mcb
- max = s.max
- closed = s.closed
- var fcReply string
- if !s.closed {
- s.delivered++
- delivered = s.delivered
- if s.jsi != nil {
- fcReply = s.checkForFlowControlResponse()
- }
- }
- s.mu.Unlock()
-
- // Respond to flow control if applicable
- if fcReply != _EMPTY_ {
- nc.Publish(fcReply, nil)
- }
-
- if closed {
- break
- }
-
- // Deliver the message.
- if m != nil && (max == 0 || delivered <= max) {
- mcb(m)
- }
- // If we have hit the max for delivered msgs, remove sub.
- if max > 0 && delivered >= max {
- nc.mu.Lock()
- nc.removeSub(s)
- nc.mu.Unlock()
- break
- }
- }
- // Check for barrier messages
- s.mu.Lock()
- for m := s.pHead; m != nil; m = s.pHead {
- if m.barrier != nil {
- s.mu.Unlock()
- if atomic.AddInt64(&m.barrier.refs, -1) == 0 {
- m.barrier.f()
- }
- s.mu.Lock()
- }
- s.pHead = m.next
- }
- // Now check for pDone
- done := s.pDone
- s.mu.Unlock()
-
- if done != nil {
- done(s.Subject)
- }
-}
-
-// Used for debugging and simulating loss for certain tests.
-// Return what is to be used. If we return nil the message will be dropped.
-type msgFilter func(m *Msg) *Msg
-
-// processMsg is called by parse and will place the msg on the
-// appropriate channel/pending queue for processing. If the channel is full,
-// or the pending queue is over the pending limits, the connection is
-// considered a slow consumer.
-func (nc *Conn) processMsg(data []byte) {
- // Stats
- atomic.AddUint64(&nc.InMsgs, 1)
- atomic.AddUint64(&nc.InBytes, uint64(len(data)))
-
- // Don't lock the connection to avoid server cutting us off if the
- // flusher is holding the connection lock, trying to send to the server
- // that is itself trying to send data to us.
- nc.subsMu.RLock()
- sub := nc.subs[nc.ps.ma.sid]
- var mf msgFilter
- if nc.filters != nil {
- mf = nc.filters[string(nc.ps.ma.subject)]
- }
- nc.subsMu.RUnlock()
-
- if sub == nil {
- return
- }
-
- // Copy them into string
- subj := string(nc.ps.ma.subject)
- reply := string(nc.ps.ma.reply)
-
- // Doing message create outside of the sub's lock to reduce contention.
- // It's possible that we end-up not using the message, but that's ok.
-
- // FIXME(dlc): Need to copy, should/can do COW?
- var msgPayload = data
- if !nc.ps.msgCopied {
- msgPayload = make([]byte, len(data))
- copy(msgPayload, data)
- }
-
- // Check if we have headers encoded here.
- var h Header
- var err error
- var ctrlMsg bool
- var ctrlType int
- var fcReply string
-
- if nc.ps.ma.hdr > 0 {
- hbuf := msgPayload[:nc.ps.ma.hdr]
- msgPayload = msgPayload[nc.ps.ma.hdr:]
- h, err = DecodeHeadersMsg(hbuf)
- if err != nil {
- // We will pass the message through but send async error.
- nc.mu.Lock()
- nc.err = ErrBadHeaderMsg
- if nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrBadHeaderMsg) })
- }
- nc.mu.Unlock()
- }
- }
-
- // FIXME(dlc): Should we recycle these containers?
- m := &Msg{
- Subject: subj,
- Reply: reply,
- Header: h,
- Data: msgPayload,
- Sub: sub,
- wsz: len(data) + len(subj) + len(reply),
- }
-
- // Check for message filters.
- if mf != nil {
- if m = mf(m); m == nil {
- // Drop message.
- return
- }
- }
-
- sub.mu.Lock()
-
- // Check if closed.
- if sub.closed {
- sub.mu.Unlock()
- return
- }
-
- // Skip flow control messages in case of using a JetStream context.
- jsi := sub.jsi
- if jsi != nil {
- // There has to be a header for it to be a control message.
- if h != nil {
- ctrlMsg, ctrlType = isJSControlMessage(m)
- if ctrlMsg && ctrlType == jsCtrlHB {
- // Check if the heartbeat has a "Consumer Stalled" header, if
- // so, the value is the FC reply to send a nil message to.
- // We will send it at the end of this function.
- fcReply = m.Header.Get(consumerStalledHdr)
- }
- }
- // Check for ordered consumer here. If checkOrderedMsgs returns true that means it detected a gap.
- if !ctrlMsg && jsi.ordered && sub.checkOrderedMsgs(m) {
- sub.mu.Unlock()
- return
- }
- }
-
- // Skip processing if this is a control message and
- // if not a pull consumer heartbeat. For pull consumers,
- // heartbeats have to be handled on per request basis.
- if !ctrlMsg || (jsi != nil && jsi.pull) {
- var chanSubCheckFC bool
- // Subscription internal stats (applicable only for non ChanSubscription's)
- if sub.typ != ChanSubscription {
- sub.pMsgs++
- if sub.pMsgs > sub.pMsgsMax {
- sub.pMsgsMax = sub.pMsgs
- }
- sub.pBytes += len(m.Data)
- if sub.pBytes > sub.pBytesMax {
- sub.pBytesMax = sub.pBytes
- }
-
- // Check for a Slow Consumer
- if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) ||
- (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) {
- goto slowConsumer
- }
- } else if jsi != nil {
- chanSubCheckFC = true
- }
-
- // We have two modes of delivery. One is the channel, used by channel
- // subscribers and syncSubscribers, the other is a linked list for async.
- if sub.mch != nil {
- select {
- case sub.mch <- m:
- default:
- goto slowConsumer
- }
- } else {
- // Push onto the async pList
- if sub.pHead == nil {
- sub.pHead = m
- sub.pTail = m
- if sub.pCond != nil {
- sub.pCond.Signal()
- }
- } else {
- sub.pTail.next = m
- sub.pTail = m
- }
- }
- if jsi != nil {
- // Store the ACK metadata from the message to
- // compare later on with the received heartbeat.
- sub.trackSequences(m.Reply)
- if chanSubCheckFC {
- // For ChanSubscription, since we can't call this when a message
- // is "delivered" (since user is pull from their own channel),
- // we have a go routine that does this check, however, we do it
- // also here to make it much more responsive. The go routine is
- // really to avoid stalling when there is no new messages coming.
- fcReply = sub.checkForFlowControlResponse()
- }
- }
- } else if ctrlType == jsCtrlFC && m.Reply != _EMPTY_ {
- // This is a flow control message.
- // We will schedule the send of the FC reply once we have delivered the
- // DATA message that was received before this flow control message, which
- // has sequence `jsi.fciseq`. However, it is possible that this message
- // has already been delivered, in that case, we need to send the FC reply now.
- if sub.getJSDelivered() >= jsi.fciseq {
- fcReply = m.Reply
- } else {
- // Schedule a reply after the previous message is delivered.
- sub.scheduleFlowControlResponse(m.Reply)
- }
- }
-
- // Clear any SlowConsumer status.
- if sub.sc {
- sub.changeSubStatus(SubscriptionActive)
- }
- sub.sc = false
- sub.mu.Unlock()
-
- if fcReply != _EMPTY_ {
- nc.Publish(fcReply, nil)
- }
-
- // Handle control heartbeat messages.
- if ctrlMsg && ctrlType == jsCtrlHB && m.Reply == _EMPTY_ {
- nc.checkForSequenceMismatch(m, sub, jsi)
- }
-
- return
-
-slowConsumer:
- sub.dropped++
- sc := !sub.sc
- sub.sc = true
- // Undo stats from above
- if sub.typ != ChanSubscription {
- sub.pMsgs--
- sub.pBytes -= len(m.Data)
- }
- if sc {
- sub.changeSubStatus(SubscriptionSlowConsumer)
- sub.mu.Unlock()
- // Now we need connection's lock and we may end-up in the situation
- // that we were trying to avoid, except that in this case, the client
- // is already experiencing client-side slow consumer situation.
- nc.mu.Lock()
- nc.err = ErrSlowConsumer
- if nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) })
- }
- nc.mu.Unlock()
- } else {
- sub.mu.Unlock()
- }
-}
-
-// processPermissionsViolation is called when the server signals a subject
-// permissions violation on either publish or subscribe.
-func (nc *Conn) processPermissionsViolation(err string) {
- nc.mu.Lock()
- // create error here so we can pass it as a closure to the async cb dispatcher.
- e := errors.New("nats: " + err)
- nc.err = e
- if nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) })
- }
- nc.mu.Unlock()
-}
-
-// processAuthError generally processing for auth errors. We want to do retries
-// unless we get the same error again. This allows us for instance to swap credentials
-// and have the app reconnect, but if nothing is changing we should bail.
-// This function will return true if the connection should be closed, false otherwise.
-// Connection lock is held on entry
-func (nc *Conn) processAuthError(err error) bool {
- nc.err = err
- if !nc.initc && nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) })
- }
- // We should give up if we tried twice on this server and got the
- // same error. This behavior can be modified using IgnoreAuthErrorAbort.
- if nc.current.lastErr == err && !nc.Opts.IgnoreAuthErrorAbort {
- nc.ar = true
- } else {
- nc.current.lastErr = err
- }
- return nc.ar
-}
-
-// flusher is a separate Go routine that will process flush requests for the write
-// bufio. This allows coalescing of writes to the underlying socket.
-func (nc *Conn) flusher() {
- // Release the wait group
- defer nc.wg.Done()
-
- // snapshot the bw and conn since they can change from underneath of us.
- nc.mu.Lock()
- bw := nc.bw
- conn := nc.conn
- fch := nc.fch
- nc.mu.Unlock()
-
- if conn == nil || bw == nil {
- return
- }
-
- for {
- if _, ok := <-fch; !ok {
- return
- }
- nc.mu.Lock()
-
- // Check to see if we should bail out.
- if !nc.isConnected() || nc.isConnecting() || conn != nc.conn {
- nc.mu.Unlock()
- return
- }
- if bw.buffered() > 0 {
- if err := bw.flush(); err != nil {
- if nc.err == nil {
- nc.err = err
- }
- if nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) })
- }
- }
- }
- nc.mu.Unlock()
- }
-}
-
-// processPing will send an immediate pong protocol response to the
-// server. The server uses this mechanism to detect dead clients.
-func (nc *Conn) processPing() {
- nc.sendProto(pongProto)
-}
-
-// processPong is used to process responses to the client's ping
-// messages. We use pings for the flush mechanism as well.
-func (nc *Conn) processPong() {
- var ch chan struct{}
-
- nc.mu.Lock()
- if len(nc.pongs) > 0 {
- ch = nc.pongs[0]
- nc.pongs = append(nc.pongs[:0], nc.pongs[1:]...)
- }
- nc.pout = 0
- nc.mu.Unlock()
- if ch != nil {
- ch <- struct{}{}
- }
-}
-
-// processOK is a placeholder for processing OK messages.
-func (nc *Conn) processOK() {
- // do nothing
-}
-
-// processInfo is used to parse the info messages sent
-// from the server.
-// This function may update the server pool.
-func (nc *Conn) processInfo(info string) error {
- if info == _EMPTY_ {
- return nil
- }
- var ncInfo serverInfo
- if err := json.Unmarshal([]byte(info), &ncInfo); err != nil {
- return err
- }
-
- // Copy content into connection's info structure.
- nc.info = ncInfo
- // The array could be empty/not present on initial connect,
- // if advertise is disabled on that server, or servers that
- // did not include themselves in the async INFO protocol.
- // If empty, do not remove the implicit servers from the pool.
- if len(nc.info.ConnectURLs) == 0 {
- if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil {
- nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) })
- }
- return nil
- }
- // Note about pool randomization: when the pool was first created,
- // it was randomized (if allowed). We keep the order the same (removing
- // implicit servers that are no longer sent to us). New URLs are sent
- // to us in no specific order so don't need extra randomization.
- hasNew := false
- // This is what we got from the server we are connected to.
- urls := nc.info.ConnectURLs
- // Transform that to a map for easy lookups
- tmp := make(map[string]struct{}, len(urls))
- for _, curl := range urls {
- tmp[curl] = struct{}{}
- }
- // Walk the pool and removed the implicit servers that are no longer in the
- // given array/map
- sp := nc.srvPool
- for i := 0; i < len(sp); i++ {
- srv := sp[i]
- curl := srv.url.Host
- // Check if this URL is in the INFO protocol
- _, inInfo := tmp[curl]
- // Remove from the temp map so that at the end we are left with only
- // new (or restarted) servers that need to be added to the pool.
- delete(tmp, curl)
- // Keep servers that were set through Options, but also the one that
- // we are currently connected to (even if it is a discovered server).
- if !srv.isImplicit || srv.url == nc.current.url {
- continue
- }
- if !inInfo {
- // Remove from server pool. Keep current order.
- copy(sp[i:], sp[i+1:])
- nc.srvPool = sp[:len(sp)-1]
- sp = nc.srvPool
- i--
- }
- }
- // Figure out if we should save off the current non-IP hostname if we encounter a bare IP.
- saveTLS := nc.current != nil && !hostIsIP(nc.current.url)
-
- // If there are any left in the tmp map, these are new (or restarted) servers
- // and need to be added to the pool.
- for curl := range tmp {
- // Before adding, check if this is a new (as in never seen) URL.
- // This is used to figure out if we invoke the DiscoveredServersCB
- if _, present := nc.urls[curl]; !present {
- hasNew = true
- }
- nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS)
- }
- if hasNew {
- // Randomize the pool if allowed but leave the first URL in place.
- if !nc.Opts.NoRandomize {
- nc.shufflePool(1)
- }
- if !nc.initc && nc.Opts.DiscoveredServersCB != nil {
- nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) })
- }
- }
- if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil {
- nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) })
- }
- return nil
-}
-
-// processAsyncInfo does the same than processInfo, but is called
-// from the parser. Calls processInfo under connection's lock
-// protection.
-func (nc *Conn) processAsyncInfo(info []byte) {
- nc.mu.Lock()
- // Ignore errors, we will simply not update the server pool...
- nc.processInfo(string(info))
- nc.mu.Unlock()
-}
-
-// LastError reports the last error encountered via the connection.
-// It can be used reliably within ClosedCB in order to find out reason
-// why connection was closed for example.
-func (nc *Conn) LastError() error {
- if nc == nil {
- return ErrInvalidConnection
- }
- nc.mu.RLock()
- err := nc.err
- nc.mu.RUnlock()
- return err
-}
-
-// Check if the given error string is an auth error, and if so returns
-// the corresponding ErrXXX error, nil otherwise
-func checkAuthError(e string) error {
- if strings.HasPrefix(e, AUTHORIZATION_ERR) {
- return ErrAuthorization
- }
- if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) {
- return ErrAuthExpired
- }
- if strings.HasPrefix(e, AUTHENTICATION_REVOKED_ERR) {
- return ErrAuthRevoked
- }
- if strings.HasPrefix(e, ACCOUNT_AUTHENTICATION_EXPIRED_ERR) {
- return ErrAccountAuthExpired
- }
- return nil
-}
-
-// processErr processes any error messages from the server and
-// sets the connection's LastError.
-func (nc *Conn) processErr(ie string) {
- // Trim, remove quotes
- ne := normalizeErr(ie)
- // convert to lower case.
- e := strings.ToLower(ne)
-
- close := false
-
- // FIXME(dlc) - process Slow Consumer signals special.
- if e == STALE_CONNECTION {
- nc.processOpErr(ErrStaleConnection)
- } else if e == MAX_CONNECTIONS_ERR {
- nc.processOpErr(ErrMaxConnectionsExceeded)
- } else if strings.HasPrefix(e, PERMISSIONS_ERR) {
- nc.processPermissionsViolation(ne)
- } else if authErr := checkAuthError(e); authErr != nil {
- nc.mu.Lock()
- close = nc.processAuthError(authErr)
- nc.mu.Unlock()
- } else {
- close = true
- nc.mu.Lock()
- nc.err = errors.New("nats: " + ne)
- nc.mu.Unlock()
- }
- if close {
- nc.close(CLOSED, true, nil)
- }
-}
-
-// kickFlusher will send a bool on a channel to kick the
-// flush Go routine to flush data to the server.
-func (nc *Conn) kickFlusher() {
- if nc.bw != nil {
- select {
- case nc.fch <- struct{}{}:
- default:
- }
- }
-}
-
-// Publish publishes the data argument to the given subject. The data
-// argument is left untouched and needs to be correctly interpreted on
-// the receiver.
-func (nc *Conn) Publish(subj string, data []byte) error {
- return nc.publish(subj, _EMPTY_, nil, data)
-}
-
-// Header represents the optional Header for a NATS message,
-// based on the implementation of http.Header.
-type Header map[string][]string
-
-// Add adds the key, value pair to the header. It is case-sensitive
-// and appends to any existing values associated with key.
-func (h Header) Add(key, value string) {
- h[key] = append(h[key], value)
-}
-
-// Set sets the header entries associated with key to the single
-// element value. It is case-sensitive and replaces any existing
-// values associated with key.
-func (h Header) Set(key, value string) {
- h[key] = []string{value}
-}
-
-// Get gets the first value associated with the given key.
-// It is case-sensitive.
-func (h Header) Get(key string) string {
- if h == nil {
- return _EMPTY_
- }
- if v := h[key]; v != nil {
- return v[0]
- }
- return _EMPTY_
-}
-
-// Values returns all values associated with the given key.
-// It is case-sensitive.
-func (h Header) Values(key string) []string {
- return h[key]
-}
-
-// Del deletes the values associated with a key.
-// It is case-sensitive.
-func (h Header) Del(key string) {
- delete(h, key)
-}
-
-// NewMsg creates a message for publishing that will use headers.
-func NewMsg(subject string) *Msg {
- return &Msg{
- Subject: subject,
- Header: make(Header),
- }
-}
-
-const (
- hdrLine = "NATS/1.0\r\n"
- crlf = "\r\n"
- hdrPreEnd = len(hdrLine) - len(crlf)
- statusHdr = "Status"
- descrHdr = "Description"
- lastConsumerSeqHdr = "Nats-Last-Consumer"
- lastStreamSeqHdr = "Nats-Last-Stream"
- consumerStalledHdr = "Nats-Consumer-Stalled"
- noResponders = "503"
- noMessagesSts = "404"
- reqTimeoutSts = "408"
- jetStream409Sts = "409"
- controlMsg = "100"
- statusLen = 3 // e.g. 20x, 40x, 50x
-)
-
-// DecodeHeadersMsg will decode and headers.
-func DecodeHeadersMsg(data []byte) (Header, error) {
- br := bufio.NewReaderSize(bytes.NewReader(data), 128)
- tp := textproto.NewReader(br)
- l, err := tp.ReadLine()
- if err != nil || len(l) < hdrPreEnd || l[:hdrPreEnd] != hdrLine[:hdrPreEnd] {
- return nil, ErrBadHeaderMsg
- }
-
- mh, err := readMIMEHeader(tp)
- if err != nil {
- return nil, err
- }
-
- // Check if we have an inlined status.
- if len(l) > hdrPreEnd {
- var description string
- status := strings.TrimSpace(l[hdrPreEnd:])
- if len(status) != statusLen {
- description = strings.TrimSpace(status[statusLen:])
- status = status[:statusLen]
- }
- mh.Add(statusHdr, status)
- if len(description) > 0 {
- mh.Add(descrHdr, description)
- }
- }
- return Header(mh), nil
-}
-
-// readMIMEHeader returns a MIMEHeader that preserves the
-// original case of the MIME header, based on the implementation
-// of textproto.ReadMIMEHeader.
-//
-// https://golang.org/pkg/net/textproto/#Reader.ReadMIMEHeader
-func readMIMEHeader(tp *textproto.Reader) (textproto.MIMEHeader, error) {
- m := make(textproto.MIMEHeader)
- for {
- kv, err := tp.ReadLine()
- if len(kv) == 0 {
- return m, err
- }
-
- // Process key fetching original case.
- i := strings.IndexByte(kv, ':')
- if i < 0 {
- return nil, ErrBadHeaderMsg
- }
- key := kv[:i]
- if key == "" {
- // Skip empty keys.
- continue
- }
- i++
- for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') {
- i++
- }
- m[key] = append(m[key], kv[i:])
- if err != nil {
- return m, err
- }
- }
-}
-
-// PublishMsg publishes the Msg structure, which includes the
-// Subject, an optional Reply and an optional Data field.
-func (nc *Conn) PublishMsg(m *Msg) error {
- if m == nil {
- return ErrInvalidMsg
- }
- hdr, err := m.headerBytes()
- if err != nil {
- return err
- }
- return nc.publish(m.Subject, m.Reply, hdr, m.Data)
-}
-
-// PublishRequest will perform a Publish() expecting a response on the
-// reply subject. Use Request() for automatically waiting for a response
-// inline.
-func (nc *Conn) PublishRequest(subj, reply string, data []byte) error {
- return nc.publish(subj, reply, nil, data)
-}
-
-// Used for handrolled Itoa
-const digits = "0123456789"
-
-// publish is the internal function to publish messages to a nats-server.
-// Sends a protocol data message by queuing into the bufio writer
-// and kicking the flush go routine. These writes should be protected.
-func (nc *Conn) publish(subj, reply string, hdr, data []byte) error {
- if nc == nil {
- return ErrInvalidConnection
- }
- if subj == "" {
- return ErrBadSubject
- }
- nc.mu.Lock()
-
- // Check if headers attempted to be sent to server that does not support them.
- if len(hdr) > 0 && !nc.info.Headers {
- nc.mu.Unlock()
- return ErrHeadersNotSupported
- }
-
- if nc.isClosed() {
- nc.mu.Unlock()
- return ErrConnectionClosed
- }
-
- if nc.isDrainingPubs() {
- nc.mu.Unlock()
- return ErrConnectionDraining
- }
-
- // Proactively reject payloads over the threshold set by server.
- msgSize := int64(len(data) + len(hdr))
- // Skip this check if we are not yet connected (RetryOnFailedConnect)
- if !nc.initc && msgSize > nc.info.MaxPayload {
- nc.mu.Unlock()
- return ErrMaxPayload
- }
-
- // Check if we are reconnecting, and if so check if
- // we have exceeded our reconnect outbound buffer limits.
- if nc.bw.atLimitIfUsingPending() {
- nc.mu.Unlock()
- return ErrReconnectBufExceeded
- }
-
- var mh []byte
- if hdr != nil {
- mh = nc.scratch[:len(_HPUB_P_)]
- } else {
- mh = nc.scratch[1:len(_HPUB_P_)]
- }
- mh = append(mh, subj...)
- mh = append(mh, ' ')
- if reply != "" {
- mh = append(mh, reply...)
- mh = append(mh, ' ')
- }
-
- // We could be smarter here, but simple loop is ok,
- // just avoid strconv in fast path.
- // FIXME(dlc) - Find a better way here.
- // msgh = strconv.AppendInt(msgh, int64(len(data)), 10)
- // go 1.14 some values strconv faster, may be able to switch over.
-
- var b [12]byte
- var i = len(b)
-
- if hdr != nil {
- if len(hdr) > 0 {
- for l := len(hdr); l > 0; l /= 10 {
- i--
- b[i] = digits[l%10]
- }
- } else {
- i--
- b[i] = digits[0]
- }
- mh = append(mh, b[i:]...)
- mh = append(mh, ' ')
- // reset for below.
- i = len(b)
- }
-
- if msgSize > 0 {
- for l := msgSize; l > 0; l /= 10 {
- i--
- b[i] = digits[l%10]
- }
- } else {
- i--
- b[i] = digits[0]
- }
-
- mh = append(mh, b[i:]...)
- mh = append(mh, _CRLF_...)
-
- if err := nc.bw.appendBufs(mh, hdr, data, _CRLF_BYTES_); err != nil {
- nc.mu.Unlock()
- return err
- }
-
- nc.OutMsgs++
- nc.OutBytes += uint64(len(data) + len(hdr))
-
- if len(nc.fch) == 0 {
- nc.kickFlusher()
- }
- nc.mu.Unlock()
- return nil
-}
-
-// respHandler is the global response handler. It will look up
-// the appropriate channel based on the last token and place
-// the message on the channel if possible.
-func (nc *Conn) respHandler(m *Msg) {
- nc.mu.Lock()
-
- // Just return if closed.
- if nc.isClosed() {
- nc.mu.Unlock()
- return
- }
-
- var mch chan *Msg
-
- // Grab mch
- rt := nc.respToken(m.Subject)
- if rt != _EMPTY_ {
- mch = nc.respMap[rt]
- // Delete the key regardless, one response only.
- delete(nc.respMap, rt)
- } else if len(nc.respMap) == 1 {
- // If the server has rewritten the subject, the response token (rt)
- // will not match (could be the case with JetStream). If that is the
- // case and there is a single entry, use that.
- for k, v := range nc.respMap {
- mch = v
- delete(nc.respMap, k)
- break
- }
- }
- nc.mu.Unlock()
-
- // Don't block, let Request timeout instead, mch is
- // buffered and we should delete the key before a
- // second response is processed.
- select {
- case mch <- m:
- default:
- return
- }
-}
-
-// Helper to setup and send new request style requests. Return the chan to receive the response.
-func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Msg, string, error) {
- nc.mu.Lock()
- // Do setup for the new style if needed.
- if nc.respMap == nil {
- nc.initNewResp()
- }
- // Create new literal Inbox and map to a chan msg.
- mch := make(chan *Msg, RequestChanLen)
- respInbox := nc.newRespInbox()
- token := respInbox[nc.respSubLen:]
-
- nc.respMap[token] = mch
- if nc.respMux == nil {
- // Create the response subscription we will use for all new style responses.
- // This will be on an _INBOX with an additional terminal token. The subscription
- // will be on a wildcard.
- s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false, nil)
- if err != nil {
- nc.mu.Unlock()
- return nil, token, err
- }
- nc.respMux = s
- }
- nc.mu.Unlock()
-
- if err := nc.publish(subj, respInbox, hdr, data); err != nil {
- return nil, token, err
- }
-
- return mch, token, nil
-}
-
-// RequestMsg will send a request payload including optional headers and deliver
-// the response message, or an error, including a timeout if no message was received properly.
-func (nc *Conn) RequestMsg(msg *Msg, timeout time.Duration) (*Msg, error) {
- if msg == nil {
- return nil, ErrInvalidMsg
- }
- hdr, err := msg.headerBytes()
- if err != nil {
- return nil, err
- }
-
- return nc.request(msg.Subject, hdr, msg.Data, timeout)
-}
-
-// Request will send a request payload and deliver the response message,
-// or an error, including a timeout if no message was received properly.
-func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) {
- return nc.request(subj, nil, data, timeout)
-}
-
-func (nc *Conn) useOldRequestStyle() bool {
- nc.mu.RLock()
- r := nc.Opts.UseOldRequestStyle
- nc.mu.RUnlock()
- return r
-}
-
-func (nc *Conn) request(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) {
- if nc == nil {
- return nil, ErrInvalidConnection
- }
-
- var m *Msg
- var err error
-
- if nc.useOldRequestStyle() {
- m, err = nc.oldRequest(subj, hdr, data, timeout)
- } else {
- m, err = nc.newRequest(subj, hdr, data, timeout)
- }
-
- // Check for no responder status.
- if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
- m, err = nil, ErrNoResponders
- }
- return m, err
-}
-
-func (nc *Conn) newRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) {
- mch, token, err := nc.createNewRequestAndSend(subj, hdr, data)
- if err != nil {
- return nil, err
- }
-
- t := globalTimerPool.Get(timeout)
- defer globalTimerPool.Put(t)
-
- var ok bool
- var msg *Msg
-
- select {
- case msg, ok = <-mch:
- if !ok {
- return nil, ErrConnectionClosed
- }
- case <-t.C:
- nc.mu.Lock()
- delete(nc.respMap, token)
- nc.mu.Unlock()
- return nil, ErrTimeout
- }
-
- return msg, nil
-}
-
-// oldRequest will create an Inbox and perform a Request() call
-// with the Inbox reply and return the first reply received.
-// This is optimized for the case of multiple responses.
-func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) {
- inbox := nc.NewInbox()
- ch := make(chan *Msg, RequestChanLen)
-
- s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil)
- if err != nil {
- return nil, err
- }
- s.AutoUnsubscribe(1)
- defer s.Unsubscribe()
-
- err = nc.publish(subj, inbox, hdr, data)
- if err != nil {
- return nil, err
- }
-
- return s.NextMsg(timeout)
-}
-
-// InboxPrefix is the prefix for all inbox subjects.
-const (
- InboxPrefix = "_INBOX."
- inboxPrefixLen = len(InboxPrefix)
- replySuffixLen = 8 // Gives us 62^8
- rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
- base = 62
-)
-
-// NewInbox will return an inbox string which can be used for directed replies from
-// subscribers. These are guaranteed to be unique, but can be shared and subscribed
-// to by others.
-func NewInbox() string {
- var b [inboxPrefixLen + nuidSize]byte
- pres := b[:inboxPrefixLen]
- copy(pres, InboxPrefix)
- ns := b[inboxPrefixLen:]
- copy(ns, nuid.Next())
- return string(b[:])
-}
-
-// Create a new inbox that is prefix aware.
-func (nc *Conn) NewInbox() string {
- if nc.Opts.InboxPrefix == _EMPTY_ {
- return NewInbox()
- }
-
- var sb strings.Builder
- sb.WriteString(nc.Opts.InboxPrefix)
- sb.WriteByte('.')
- sb.WriteString(nuid.Next())
- return sb.String()
-}
-
-// Function to init new response structures.
-func (nc *Conn) initNewResp() {
- nc.respSubPrefix = fmt.Sprintf("%s.", nc.NewInbox())
- nc.respSubLen = len(nc.respSubPrefix)
- nc.respSub = fmt.Sprintf("%s*", nc.respSubPrefix)
- nc.respMap = make(map[string]chan *Msg)
- nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano()))
-}
-
-// newRespInbox creates a new literal response subject
-// that will trigger the mux subscription handler.
-// Lock should be held.
-func (nc *Conn) newRespInbox() string {
- if nc.respMap == nil {
- nc.initNewResp()
- }
-
- var sb strings.Builder
- sb.WriteString(nc.respSubPrefix)
-
- rn := nc.respRand.Int63()
- for i := 0; i < replySuffixLen; i++ {
- sb.WriteByte(rdigits[rn%base])
- rn /= base
- }
-
- return sb.String()
-}
-
-// NewRespInbox is the new format used for _INBOX.
-func (nc *Conn) NewRespInbox() string {
- nc.mu.Lock()
- s := nc.newRespInbox()
- nc.mu.Unlock()
- return s
-}
-
-// respToken will return the last token of a literal response inbox
-// which we use for the message channel lookup. This needs to verify the subject
-// prefix matches to protect itself against the server changing the subject.
-// Lock should be held.
-func (nc *Conn) respToken(respInbox string) string {
- if token, found := strings.CutPrefix(respInbox, nc.respSubPrefix); found {
- return token
- }
- return ""
-}
-
-// Subscribe will express interest in the given subject. The subject
-// can have wildcards.
-// There are two type of wildcards: * for partial, and > for full.
-// A subscription on subject time.*.east would receive messages sent to time.us.east and time.eu.east.
-// A subscription on subject time.us.> would receive messages sent to
-// time.us.east and time.us.east.atlanta, while time.us.* would only match time.us.east
-// since it can't match more than one token.
-// Messages will be delivered to the associated MsgHandler.
-func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) {
- return nc.subscribe(subj, _EMPTY_, cb, nil, false, nil)
-}
-
-// ChanSubscribe will express interest in the given subject and place
-// all messages received on the channel.
-// You should not close the channel until sub.Unsubscribe() has been called.
-func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) {
- return nc.subscribe(subj, _EMPTY_, nil, ch, false, nil)
-}
-
-// ChanQueueSubscribe will express interest in the given subject.
-// All subscribers with the same queue name will form the queue group
-// and only one member of the group will be selected to receive any given message,
-// which will be placed on the channel.
-// You should not close the channel until sub.Unsubscribe() has been called.
-// Note: This is the same than QueueSubscribeSyncWithChan.
-func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) {
- return nc.subscribe(subj, group, nil, ch, false, nil)
-}
-
-// SubscribeSync will express interest on the given subject. Messages will
-// be received synchronously using Subscription.NextMsg().
-func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) {
- if nc == nil {
- return nil, ErrInvalidConnection
- }
- mch := make(chan *Msg, nc.Opts.SubChanLen)
- return nc.subscribe(subj, _EMPTY_, nil, mch, true, nil)
-}
-
-// QueueSubscribe creates an asynchronous queue subscriber on the given subject.
-// All subscribers with the same queue name will form the queue group and
-// only one member of the group will be selected to receive any given
-// message asynchronously.
-func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) {
- return nc.subscribe(subj, queue, cb, nil, false, nil)
-}
-
-// QueueSubscribeSync creates a synchronous queue subscriber on the given
-// subject. All subscribers with the same queue name will form the queue
-// group and only one member of the group will be selected to receive any
-// given message synchronously using Subscription.NextMsg().
-func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) {
- mch := make(chan *Msg, nc.Opts.SubChanLen)
- return nc.subscribe(subj, queue, nil, mch, true, nil)
-}
-
-// QueueSubscribeSyncWithChan will express interest in the given subject.
-// All subscribers with the same queue name will form the queue group
-// and only one member of the group will be selected to receive any given message,
-// which will be placed on the channel.
-// You should not close the channel until sub.Unsubscribe() has been called.
-// Note: This is the same than ChanQueueSubscribe.
-func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) {
- return nc.subscribe(subj, queue, nil, ch, false, nil)
-}
-
-// badSubject will do quick test on whether a subject is acceptable.
-// Spaces are not allowed and all tokens should be > 0 in len.
-func badSubject(subj string) bool {
- if strings.ContainsAny(subj, " \t\r\n") {
- return true
- }
- tokens := strings.Split(subj, ".")
- for _, t := range tokens {
- if len(t) == 0 {
- return true
- }
- }
- return false
-}
-
-// badQueue will check a queue name for whitespace.
-func badQueue(qname string) bool {
- return strings.ContainsAny(qname, " \t\r\n")
-}
-
-// subscribe is the internal subscribe function that indicates interest in a subject.
-func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) {
- if nc == nil {
- return nil, ErrInvalidConnection
- }
- nc.mu.Lock()
- defer nc.mu.Unlock()
- return nc.subscribeLocked(subj, queue, cb, ch, isSync, js)
-}
-
-func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) {
- if nc == nil {
- return nil, ErrInvalidConnection
- }
- if badSubject(subj) {
- return nil, ErrBadSubject
- }
- if queue != _EMPTY_ && badQueue(queue) {
- return nil, ErrBadQueueName
- }
-
- // Check for some error conditions.
- if nc.isClosed() {
- return nil, ErrConnectionClosed
- }
- if nc.isDraining() {
- return nil, ErrConnectionDraining
- }
-
- if cb == nil && ch == nil {
- return nil, ErrBadSubscription
- }
-
- sub := &Subscription{
- Subject: subj,
- Queue: queue,
- mcb: cb,
- conn: nc,
- jsi: js,
- }
- // Set pending limits.
- if ch != nil {
- sub.pMsgsLimit = cap(ch)
- } else {
- sub.pMsgsLimit = DefaultSubPendingMsgsLimit
- }
- sub.pBytesLimit = DefaultSubPendingBytesLimit
-
- // If we have an async callback, start up a sub specific
- // Go routine to deliver the messages.
- var sr bool
- if cb != nil {
- sub.typ = AsyncSubscription
- sub.pCond = sync.NewCond(&sub.mu)
- sr = true
- } else if !isSync {
- sub.typ = ChanSubscription
- sub.mch = ch
- } else { // Sync Subscription
- sub.typ = SyncSubscription
- sub.mch = ch
- }
-
- nc.subsMu.Lock()
- nc.ssid++
- sub.sid = nc.ssid
- nc.subs[sub.sid] = sub
- nc.subsMu.Unlock()
-
- // Let's start the go routine now that it is fully setup and registered.
- if sr {
- go nc.waitForMsgs(sub)
- }
-
- // We will send these for all subs when we reconnect
- // so that we can suppress here if reconnecting.
- if !nc.isReconnecting() {
- nc.bw.appendString(fmt.Sprintf(subProto, subj, queue, sub.sid))
- nc.kickFlusher()
- }
-
- sub.changeSubStatus(SubscriptionActive)
- return sub, nil
-}
-
-// NumSubscriptions returns active number of subscriptions.
-func (nc *Conn) NumSubscriptions() int {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return len(nc.subs)
-}
-
-// Lock for nc should be held here upon entry
-func (nc *Conn) removeSub(s *Subscription) {
- nc.subsMu.Lock()
- delete(nc.subs, s.sid)
- nc.subsMu.Unlock()
- s.mu.Lock()
- defer s.mu.Unlock()
- // Release callers on NextMsg for SyncSubscription only
- if s.mch != nil && s.typ == SyncSubscription {
- close(s.mch)
- }
- s.mch = nil
-
- // If JS subscription then stop HB timer.
- if jsi := s.jsi; jsi != nil {
- if jsi.hbc != nil {
- jsi.hbc.Stop()
- jsi.hbc = nil
- }
- if jsi.csfct != nil {
- jsi.csfct.Stop()
- jsi.csfct = nil
- }
- }
-
- if s.typ != AsyncSubscription {
- done := s.pDone
- if done != nil {
- done(s.Subject)
- }
- }
- // Mark as invalid
- s.closed = true
- s.changeSubStatus(SubscriptionClosed)
- if s.pCond != nil {
- s.pCond.Broadcast()
- }
-}
-
-// SubscriptionType is the type of the Subscription.
-type SubscriptionType int
-
-// The different types of subscription types.
-const (
- AsyncSubscription = SubscriptionType(iota)
- SyncSubscription
- ChanSubscription
- NilSubscription
- PullSubscription
-)
-
-// Type returns the type of Subscription.
-func (s *Subscription) Type() SubscriptionType {
- if s == nil {
- return NilSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- // Pull subscriptions are really a SyncSubscription and we want this
- // type to be set internally for all delivered messages management, etc..
- // So check when to return PullSubscription to the user.
- if s.jsi != nil && s.jsi.pull {
- return PullSubscription
- }
- return s.typ
-}
-
-// IsValid returns a boolean indicating whether the subscription
-// is still active. This will return false if the subscription has
-// already been closed.
-func (s *Subscription) IsValid() bool {
- if s == nil {
- return false
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.conn != nil && !s.closed
-}
-
-// Drain will remove interest but continue callbacks until all messages
-// have been processed.
-//
-// For a JetStream subscription, if the library has created the JetStream
-// consumer, the library will send a DeleteConsumer request to the server
-// when the Drain operation completes. If a failure occurs when deleting
-// the JetStream consumer, an error will be reported to the asynchronous
-// error callback.
-// If you do not wish the JetStream consumer to be automatically deleted,
-// ensure that the consumer is not created by the library, which means
-// create the consumer with AddConsumer and bind to this consumer.
-func (s *Subscription) Drain() error {
- if s == nil {
- return ErrBadSubscription
- }
- s.mu.Lock()
- conn := s.conn
- s.mu.Unlock()
- if conn == nil {
- return ErrBadSubscription
- }
- return conn.unsubscribe(s, 0, true)
-}
-
-// IsDraining returns a boolean indicating whether the subscription
-// is being drained.
-// This will return false if the subscription has already been closed.
-func (s *Subscription) IsDraining() bool {
- if s == nil {
- return false
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.draining
-}
-
-// StatusChanged returns a channel on which given list of subscription status
-// changes will be sent. If no status is provided, all status changes will be sent.
-// Available statuses are SubscriptionActive, SubscriptionDraining, SubscriptionClosed,
-// and SubscriptionSlowConsumer.
-// The returned channel will be closed when the subscription is closed.
-func (s *Subscription) StatusChanged(statuses ...SubStatus) <-chan SubStatus {
- if len(statuses) == 0 {
- statuses = []SubStatus{SubscriptionActive, SubscriptionDraining, SubscriptionClosed, SubscriptionSlowConsumer}
- }
- ch := make(chan SubStatus, 10)
- for _, status := range statuses {
- s.registerStatusChangeListener(status, ch)
- // initial status
- if status == s.status {
- ch <- status
- }
- }
- return ch
-}
-
-// registerStatusChangeListener registers a channel waiting for a specific status change event.
-// Status change events are non-blocking - if no receiver is waiting for the status change,
-// it will not be sent on the channel. Closed channels are ignored.
-func (s *Subscription) registerStatusChangeListener(status SubStatus, ch chan SubStatus) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.statListeners == nil {
- s.statListeners = make(map[chan SubStatus][]SubStatus)
- }
- if _, ok := s.statListeners[ch]; !ok {
- s.statListeners[ch] = make([]SubStatus, 0)
- }
- s.statListeners[ch] = append(s.statListeners[ch], status)
-}
-
-// sendStatusEvent sends subscription status event to all channels.
-// If there is no listener, sendStatusEvent
-// will not block. Lock should be held entering.
-func (s *Subscription) sendStatusEvent(status SubStatus) {
- for ch, statuses := range s.statListeners {
- if !containsStatus(statuses, status) {
- continue
- }
- // only send event if someone's listening
- select {
- case ch <- status:
- default:
- }
- if status == SubscriptionClosed {
- close(ch)
- }
- }
-}
-
-func containsStatus(statuses []SubStatus, status SubStatus) bool {
- for _, s := range statuses {
- if s == status {
- return true
- }
- }
- return false
-}
-
-// changeSubStatus changes subscription status and sends events
-// to all listeners. Lock should be held entering.
-func (s *Subscription) changeSubStatus(status SubStatus) {
- if s == nil {
- return
- }
- s.sendStatusEvent(status)
- s.status = status
-}
-
-// Unsubscribe will remove interest in the given subject.
-//
-// For a JetStream subscription, if the library has created the JetStream
-// consumer, it will send a DeleteConsumer request to the server (if the
-// unsubscribe itself was successful). If the delete operation fails, the
-// error will be returned.
-// If you do not wish the JetStream consumer to be automatically deleted,
-// ensure that the consumer is not created by the library, which means
-// create the consumer with AddConsumer and bind to this consumer (using
-// the nats.Bind() option).
-func (s *Subscription) Unsubscribe() error {
- if s == nil {
- return ErrBadSubscription
- }
- s.mu.Lock()
- conn := s.conn
- closed := s.closed
- dc := s.jsi != nil && s.jsi.dc
- s.mu.Unlock()
- if conn == nil || conn.IsClosed() {
- return ErrConnectionClosed
- }
- if closed {
- return ErrBadSubscription
- }
- if conn.IsDraining() {
- return ErrConnectionDraining
- }
- err := conn.unsubscribe(s, 0, false)
- if err == nil && dc {
- err = s.deleteConsumer()
- }
- return err
-}
-
-// checkDrained will watch for a subscription to be fully drained
-// and then remove it.
-func (nc *Conn) checkDrained(sub *Subscription) {
- defer func() {
- sub.mu.Lock()
- defer sub.mu.Unlock()
- sub.draining = false
- }()
- if nc == nil || sub == nil {
- return
- }
-
- // This allows us to know that whatever we have in the client pending
- // is correct and the server will not send additional information.
- nc.Flush()
-
- sub.mu.Lock()
- // For JS subscriptions, check if we are going to delete the
- // JS consumer when drain completes.
- dc := sub.jsi != nil && sub.jsi.dc
- sub.mu.Unlock()
-
- // Once we are here we just wait for Pending to reach 0 or
- // any other state to exit this go routine.
- for {
- // check connection is still valid.
- if nc.IsClosed() {
- return
- }
-
- // Check subscription state
- sub.mu.Lock()
- conn := sub.conn
- closed := sub.closed
- pMsgs := sub.pMsgs
- sub.mu.Unlock()
-
- if conn == nil || closed || pMsgs == 0 {
- nc.mu.Lock()
- nc.removeSub(sub)
- nc.mu.Unlock()
- if dc {
- if err := sub.deleteConsumer(); err != nil {
- nc.mu.Lock()
- if errCB := nc.Opts.AsyncErrorCB; errCB != nil {
- nc.ach.push(func() { errCB(nc, sub, err) })
- }
- nc.mu.Unlock()
- }
- }
- return
- }
-
- time.Sleep(100 * time.Millisecond)
- }
-}
-
-// AutoUnsubscribe will issue an automatic Unsubscribe that is
-// processed by the server when max messages have been received.
-// This can be useful when sending a request to an unknown number
-// of subscribers.
-func (s *Subscription) AutoUnsubscribe(max int) error {
- if s == nil {
- return ErrBadSubscription
- }
- s.mu.Lock()
- conn := s.conn
- closed := s.closed
- s.mu.Unlock()
- if conn == nil || closed {
- return ErrBadSubscription
- }
- return conn.unsubscribe(s, max, false)
-}
-
-// SetClosedHandler will set the closed handler for when a subscription
-// is closed (either unsubscribed or drained).
-func (s *Subscription) SetClosedHandler(handler func(subject string)) {
- s.mu.Lock()
- s.pDone = handler
- s.mu.Unlock()
-}
-
-// unsubscribe performs the low level unsubscribe to the server.
-// Use Subscription.Unsubscribe()
-func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error {
- var maxStr string
- if max > 0 {
- sub.mu.Lock()
- sub.max = uint64(max)
- if sub.delivered < sub.max {
- maxStr = strconv.Itoa(max)
- }
- sub.mu.Unlock()
- }
-
- nc.mu.Lock()
- // ok here, but defer is expensive
- defer nc.mu.Unlock()
-
- if nc.isClosed() {
- return ErrConnectionClosed
- }
-
- nc.subsMu.RLock()
- s := nc.subs[sub.sid]
- nc.subsMu.RUnlock()
- // Already unsubscribed
- if s == nil {
- return nil
- }
-
- if maxStr == _EMPTY_ && !drainMode {
- nc.removeSub(s)
- }
-
- if drainMode {
- s.mu.Lock()
- s.draining = true
- sub.changeSubStatus(SubscriptionDraining)
- s.mu.Unlock()
- go nc.checkDrained(sub)
- }
-
- // We will send these for all subs when we reconnect
- // so that we can suppress here.
- if !nc.isReconnecting() {
- nc.bw.appendString(fmt.Sprintf(unsubProto, s.sid, maxStr))
- nc.kickFlusher()
- }
-
- // For JetStream subscriptions cancel the attached context if there is any.
- var cancel func()
- sub.mu.Lock()
- jsi := sub.jsi
- if jsi != nil {
- cancel = jsi.cancel
- jsi.cancel = nil
- }
- sub.mu.Unlock()
- if cancel != nil {
- cancel()
- }
-
- return nil
-}
-
-// NextMsg will return the next message available to a synchronous subscriber
-// or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription),
-// the connection is closed (ErrConnectionClosed), the timeout is reached (ErrTimeout),
-// or if there were no responders (ErrNoResponders) when used in the context of a request/reply.
-func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) {
- if s == nil {
- return nil, ErrBadSubscription
- }
-
- s.mu.Lock()
- err := s.validateNextMsgState(false)
- if err != nil {
- s.mu.Unlock()
- return nil, err
- }
-
- // snapshot
- mch := s.mch
- s.mu.Unlock()
-
- var ok bool
- var msg *Msg
-
- // If something is available right away, let's optimize that case.
- select {
- case msg, ok = <-mch:
- if !ok {
- return nil, s.getNextMsgErr()
- }
- if err := s.processNextMsgDelivered(msg); err != nil {
- return nil, err
- } else {
- return msg, nil
- }
- default:
- }
-
- // If we are here a message was not immediately available, so lets loop
- // with a timeout.
-
- t := globalTimerPool.Get(timeout)
- defer globalTimerPool.Put(t)
-
- select {
- case msg, ok = <-mch:
- if !ok {
- return nil, s.getNextMsgErr()
- }
- if err := s.processNextMsgDelivered(msg); err != nil {
- return nil, err
- }
- case <-t.C:
- return nil, ErrTimeout
- }
-
- return msg, nil
-}
-
-// validateNextMsgState checks whether the subscription is in a valid
-// state to call NextMsg and be delivered another message synchronously.
-// This should be called while holding the lock.
-func (s *Subscription) validateNextMsgState(pullSubInternal bool) error {
- if s.connClosed {
- return ErrConnectionClosed
- }
- if s.mch == nil {
- if s.max > 0 && s.delivered >= s.max {
- return ErrMaxMessages
- } else if s.closed {
- return ErrBadSubscription
- }
- }
- if s.mcb != nil {
- return ErrSyncSubRequired
- }
- if s.sc {
- s.changeSubStatus(SubscriptionActive)
- s.sc = false
- return ErrSlowConsumer
- }
- // Unless this is from an internal call, reject use of this API.
- // Users should use Fetch() instead.
- if !pullSubInternal && s.jsi != nil && s.jsi.pull {
- return ErrTypeSubscription
- }
- return nil
-}
-
-// This is called when the sync channel has been closed.
-// The error returned will be either connection or subscription
-// closed depending on what caused NextMsg() to fail.
-func (s *Subscription) getNextMsgErr() error {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.connClosed {
- return ErrConnectionClosed
- }
- return ErrBadSubscription
-}
-
-// processNextMsgDelivered takes a message and applies the needed
-// accounting to the stats from the subscription, returning an
-// error in case we have the maximum number of messages have been
-// delivered already. It should not be called while holding the lock.
-func (s *Subscription) processNextMsgDelivered(msg *Msg) error {
- s.mu.Lock()
- nc := s.conn
- max := s.max
-
- var fcReply string
- // Update some stats.
- s.delivered++
- delivered := s.delivered
- if s.jsi != nil {
- fcReply = s.checkForFlowControlResponse()
- }
-
- if s.typ == SyncSubscription {
- s.pMsgs--
- s.pBytes -= len(msg.Data)
- }
- s.mu.Unlock()
-
- if fcReply != _EMPTY_ {
- nc.Publish(fcReply, nil)
- }
-
- if max > 0 {
- if delivered > max {
- return ErrMaxMessages
- }
- // Remove subscription if we have reached max.
- if delivered == max {
- nc.mu.Lock()
- nc.removeSub(s)
- nc.mu.Unlock()
- }
- }
- if len(msg.Data) == 0 && msg.Header.Get(statusHdr) == noResponders {
- return ErrNoResponders
- }
-
- return nil
-}
-
-// Queued returns the number of queued messages in the client for this subscription.
-// DEPRECATED: Use Pending()
-func (s *Subscription) QueuedMsgs() (int, error) {
- m, _, err := s.Pending()
- return int(m), err
-}
-
-// Pending returns the number of queued messages and queued bytes in the client for this subscription.
-func (s *Subscription) Pending() (int, int, error) {
- if s == nil {
- return -1, -1, ErrBadSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conn == nil || s.closed {
- return -1, -1, ErrBadSubscription
- }
- if s.typ == ChanSubscription {
- return -1, -1, ErrTypeSubscription
- }
- return s.pMsgs, s.pBytes, nil
-}
-
-// MaxPending returns the maximum number of queued messages and queued bytes seen so far.
-func (s *Subscription) MaxPending() (int, int, error) {
- if s == nil {
- return -1, -1, ErrBadSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conn == nil || s.closed {
- return -1, -1, ErrBadSubscription
- }
- if s.typ == ChanSubscription {
- return -1, -1, ErrTypeSubscription
- }
- return s.pMsgsMax, s.pBytesMax, nil
-}
-
-// ClearMaxPending resets the maximums seen so far.
-func (s *Subscription) ClearMaxPending() error {
- if s == nil {
- return ErrBadSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conn == nil || s.closed {
- return ErrBadSubscription
- }
- if s.typ == ChanSubscription {
- return ErrTypeSubscription
- }
- s.pMsgsMax, s.pBytesMax = 0, 0
- return nil
-}
-
-// Pending Limits
-const (
- // DefaultSubPendingMsgsLimit will be 512k msgs.
- DefaultSubPendingMsgsLimit = 512 * 1024
- // DefaultSubPendingBytesLimit is 64MB
- DefaultSubPendingBytesLimit = 64 * 1024 * 1024
-)
-
-// PendingLimits returns the current limits for this subscription.
-// If no error is returned, a negative value indicates that the
-// given metric is not limited.
-func (s *Subscription) PendingLimits() (int, int, error) {
- if s == nil {
- return -1, -1, ErrBadSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conn == nil || s.closed {
- return -1, -1, ErrBadSubscription
- }
- if s.typ == ChanSubscription {
- return -1, -1, ErrTypeSubscription
- }
- return s.pMsgsLimit, s.pBytesLimit, nil
-}
-
-// SetPendingLimits sets the limits for pending msgs and bytes for this subscription.
-// Zero is not allowed. Any negative value means that the given metric is not limited.
-func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error {
- if s == nil {
- return ErrBadSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conn == nil || s.closed {
- return ErrBadSubscription
- }
- if s.typ == ChanSubscription {
- return ErrTypeSubscription
- }
- if msgLimit == 0 || bytesLimit == 0 {
- return ErrInvalidArg
- }
- s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit
- return nil
-}
-
-// Delivered returns the number of delivered messages for this subscription.
-func (s *Subscription) Delivered() (int64, error) {
- if s == nil {
- return -1, ErrBadSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conn == nil || s.closed {
- return -1, ErrBadSubscription
- }
- return int64(s.delivered), nil
-}
-
-// Dropped returns the number of known dropped messages for this subscription.
-// This will correspond to messages dropped by violations of PendingLimits. If
-// the server declares the connection a SlowConsumer, this number may not be
-// valid.
-func (s *Subscription) Dropped() (int, error) {
- if s == nil {
- return -1, ErrBadSubscription
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.conn == nil || s.closed {
- return -1, ErrBadSubscription
- }
- return s.dropped, nil
-}
-
-// Respond allows a convenient way to respond to requests in service based subscriptions.
-func (m *Msg) Respond(data []byte) error {
- if m == nil || m.Sub == nil {
- return ErrMsgNotBound
- }
- if m.Reply == "" {
- return ErrMsgNoReply
- }
- m.Sub.mu.Lock()
- nc := m.Sub.conn
- m.Sub.mu.Unlock()
- // No need to check the connection here since the call to publish will do all the checking.
- return nc.Publish(m.Reply, data)
-}
-
-// RespondMsg allows a convenient way to respond to requests in service based subscriptions that might include headers
-func (m *Msg) RespondMsg(msg *Msg) error {
- if m == nil || m.Sub == nil {
- return ErrMsgNotBound
- }
- if m.Reply == "" {
- return ErrMsgNoReply
- }
- msg.Subject = m.Reply
- m.Sub.mu.Lock()
- nc := m.Sub.conn
- m.Sub.mu.Unlock()
- // No need to check the connection here since the call to publish will do all the checking.
- return nc.PublishMsg(msg)
-}
-
-// FIXME: This is a hack
-// removeFlushEntry is needed when we need to discard queued up responses
-// for our pings as part of a flush call. This happens when we have a flush
-// call outstanding and we call close.
-func (nc *Conn) removeFlushEntry(ch chan struct{}) bool {
- nc.mu.Lock()
- defer nc.mu.Unlock()
- if nc.pongs == nil {
- return false
- }
- for i, c := range nc.pongs {
- if c == ch {
- nc.pongs[i] = nil
- return true
- }
- }
- return false
-}
-
-// The lock must be held entering this function.
-func (nc *Conn) sendPing(ch chan struct{}) {
- nc.pongs = append(nc.pongs, ch)
- nc.bw.appendString(pingProto)
- // Flush in place.
- nc.bw.flush()
-}
-
-// This will fire periodically and send a client origin
-// ping to the server. Will also check that we have received
-// responses from the server.
-func (nc *Conn) processPingTimer() {
- nc.mu.Lock()
-
- if nc.status != CONNECTED {
- nc.mu.Unlock()
- return
- }
-
- // Check for violation
- nc.pout++
- if nc.pout > nc.Opts.MaxPingsOut {
- nc.mu.Unlock()
- nc.processOpErr(ErrStaleConnection)
- return
- }
-
- nc.sendPing(nil)
- nc.ptmr.Reset(nc.Opts.PingInterval)
- nc.mu.Unlock()
-}
-
-// FlushTimeout allows a Flush operation to have an associated timeout.
-func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) {
- if nc == nil {
- return ErrInvalidConnection
- }
- if timeout <= 0 {
- return ErrBadTimeout
- }
-
- nc.mu.Lock()
- if nc.isClosed() {
- nc.mu.Unlock()
- return ErrConnectionClosed
- }
- t := globalTimerPool.Get(timeout)
- defer globalTimerPool.Put(t)
-
- // Create a buffered channel to prevent chan send to block
- // in processPong() if this code here times out just when
- // PONG was received.
- ch := make(chan struct{}, 1)
- nc.sendPing(ch)
- nc.mu.Unlock()
-
- select {
- case _, ok := <-ch:
- if !ok {
- err = ErrConnectionClosed
- } else {
- close(ch)
- }
- case <-t.C:
- err = ErrTimeout
- }
-
- if err != nil {
- nc.removeFlushEntry(ch)
- }
- return
-}
-
-// RTT calculates the round trip time between this client and the server.
-func (nc *Conn) RTT() (time.Duration, error) {
- if nc.IsClosed() {
- return 0, ErrConnectionClosed
- }
- if nc.IsReconnecting() {
- return 0, ErrDisconnected
- }
- start := time.Now()
- if err := nc.FlushTimeout(10 * time.Second); err != nil {
- return 0, err
- }
- return time.Since(start), nil
-}
-
-// Flush will perform a round trip to the server and return when it
-// receives the internal reply.
-func (nc *Conn) Flush() error {
- return nc.FlushTimeout(10 * time.Second)
-}
-
-// Buffered will return the number of bytes buffered to be sent to the server.
-// FIXME(dlc) take into account disconnected state.
-func (nc *Conn) Buffered() (int, error) {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- if nc.isClosed() || nc.bw == nil {
- return -1, ErrConnectionClosed
- }
- return nc.bw.buffered(), nil
-}
-
-// resendSubscriptions will send our subscription state back to the
-// server. Used in reconnects
-func (nc *Conn) resendSubscriptions() {
- // Since we are going to send protocols to the server, we don't want to
- // be holding the subsMu lock (which is used in processMsg). So copy
- // the subscriptions in a temporary array.
- nc.subsMu.RLock()
- subs := make([]*Subscription, 0, len(nc.subs))
- for _, s := range nc.subs {
- subs = append(subs, s)
- }
- nc.subsMu.RUnlock()
- for _, s := range subs {
- adjustedMax := uint64(0)
- s.mu.Lock()
- if s.max > 0 {
- if s.delivered < s.max {
- adjustedMax = s.max - s.delivered
- }
- // adjustedMax could be 0 here if the number of delivered msgs
- // reached the max, if so unsubscribe.
- if adjustedMax == 0 {
- s.mu.Unlock()
- nc.bw.writeDirect(fmt.Sprintf(unsubProto, s.sid, _EMPTY_))
- continue
- }
- }
- subj, queue, sid := s.Subject, s.Queue, s.sid
- s.mu.Unlock()
-
- nc.bw.writeDirect(fmt.Sprintf(subProto, subj, queue, sid))
- if adjustedMax > 0 {
- maxStr := strconv.Itoa(int(adjustedMax))
- nc.bw.writeDirect(fmt.Sprintf(unsubProto, sid, maxStr))
- }
- }
-}
-
-// This will clear any pending flush calls and release pending calls.
-// Lock is assumed to be held by the caller.
-func (nc *Conn) clearPendingFlushCalls() {
- // Clear any queued pongs, e.g. pending flush calls.
- for _, ch := range nc.pongs {
- if ch != nil {
- close(ch)
- }
- }
- nc.pongs = nil
-}
-
-// This will clear any pending Request calls.
-// Lock is assumed to be held by the caller.
-func (nc *Conn) clearPendingRequestCalls() {
- if nc.respMap == nil {
- return
- }
- for key, ch := range nc.respMap {
- if ch != nil {
- close(ch)
- delete(nc.respMap, key)
- }
- }
-}
-
-// Low level close call that will do correct cleanup and set
-// desired status. Also controls whether user defined callbacks
-// will be triggered. The lock should not be held entering this
-// function. This function will handle the locking manually.
-func (nc *Conn) close(status Status, doCBs bool, err error) {
- nc.mu.Lock()
- if nc.isClosed() {
- nc.status = status
- nc.mu.Unlock()
- return
- }
- nc.status = CLOSED
-
- // Kick the Go routines so they fall out.
- nc.kickFlusher()
-
- // If the reconnect timer is waiting between a reconnect attempt,
- // this will kick it out.
- if nc.rqch != nil {
- close(nc.rqch)
- nc.rqch = nil
- }
-
- // Clear any queued pongs, e.g. pending flush calls.
- nc.clearPendingFlushCalls()
-
- // Clear any queued and blocking Requests.
- nc.clearPendingRequestCalls()
-
- // Stop ping timer if set.
- nc.stopPingTimer()
- nc.ptmr = nil
-
- // Need to close and set TCP conn to nil if reconnect loop has stopped,
- // otherwise we would incorrectly invoke Disconnect handler (if set)
- // down below.
- if nc.ar && nc.conn != nil {
- nc.conn.Close()
- nc.conn = nil
- } else if nc.conn != nil {
- // Go ahead and make sure we have flushed the outbound
- nc.bw.flush()
- defer nc.conn.Close()
- }
-
- // Close sync subscriber channels and release any
- // pending NextMsg() calls.
- nc.subsMu.Lock()
- for _, s := range nc.subs {
- s.mu.Lock()
-
- // Release callers on NextMsg for SyncSubscription only
- if s.mch != nil && s.typ == SyncSubscription {
- close(s.mch)
- }
- s.mch = nil
- // Mark as invalid, for signaling to waitForMsgs
- s.closed = true
- // Mark connection closed in subscription
- s.connClosed = true
- // If we have an async subscription, signals it to exit
- if s.typ == AsyncSubscription && s.pCond != nil {
- s.pCond.Signal()
- }
-
- s.mu.Unlock()
- }
- nc.subs = nil
- nc.subsMu.Unlock()
-
- nc.changeConnStatus(status)
-
- // Perform appropriate callback if needed for a disconnect.
- if doCBs {
- if nc.conn != nil {
- if disconnectedErrCB := nc.Opts.DisconnectedErrCB; disconnectedErrCB != nil {
- nc.ach.push(func() { disconnectedErrCB(nc, err) })
- } else if disconnectedCB := nc.Opts.DisconnectedCB; disconnectedCB != nil {
- nc.ach.push(func() { disconnectedCB(nc) })
- }
- }
- if nc.Opts.ClosedCB != nil {
- nc.ach.push(func() { nc.Opts.ClosedCB(nc) })
- }
- }
- // If this is terminal, then we have to notify the asyncCB handler that
- // it can exit once all async callbacks have been dispatched.
- if status == CLOSED {
- nc.ach.close()
- }
- nc.mu.Unlock()
-}
-
-// Close will close the connection to the server. This call will release
-// all blocking calls, such as Flush() and NextMsg()
-func (nc *Conn) Close() {
- if nc != nil {
- // This will be a no-op if the connection was not websocket.
- // We do this here as opposed to inside close() because we want
- // to do this only for the final user-driven close of the client.
- // Otherwise, we would need to change close() to pass a boolean
- // indicating that this is the case.
- nc.wsClose()
- nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil)
- }
-}
-
-// IsClosed tests if a Conn has been closed.
-func (nc *Conn) IsClosed() bool {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.isClosed()
-}
-
-// IsReconnecting tests if a Conn is reconnecting.
-func (nc *Conn) IsReconnecting() bool {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.isReconnecting()
-}
-
-// IsConnected tests if a Conn is connected.
-func (nc *Conn) IsConnected() bool {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.isConnected()
-}
-
-// drainConnection will run in a separate Go routine and will
-// flush all publishes and drain all active subscriptions.
-func (nc *Conn) drainConnection() {
- // Snapshot subs list.
- nc.mu.Lock()
-
- // Check again here if we are in a state to not process.
- if nc.isClosed() {
- nc.mu.Unlock()
- return
- }
- if nc.isConnecting() || nc.isReconnecting() {
- nc.mu.Unlock()
- // Move to closed state.
- nc.Close()
- return
- }
-
- subs := make([]*Subscription, 0, len(nc.subs))
- for _, s := range nc.subs {
- if s == nc.respMux {
- // Skip since might be in use while messages
- // are being processed (can miss responses).
- continue
- }
- subs = append(subs, s)
- }
- errCB := nc.Opts.AsyncErrorCB
- drainWait := nc.Opts.DrainTimeout
- respMux := nc.respMux
- nc.mu.Unlock()
-
- // for pushing errors with context.
- pushErr := func(err error) {
- nc.mu.Lock()
- nc.err = err
- if errCB != nil {
- nc.ach.push(func() { errCB(nc, nil, err) })
- }
- nc.mu.Unlock()
- }
-
- // Do subs first, skip request handler if present.
- for _, s := range subs {
- if err := s.Drain(); err != nil {
- // We will notify about these but continue.
- pushErr(err)
- }
- }
-
- // Wait for the subscriptions to drop to zero.
- timeout := time.Now().Add(drainWait)
- var min int
- if respMux != nil {
- min = 1
- } else {
- min = 0
- }
- for time.Now().Before(timeout) {
- if nc.NumSubscriptions() == min {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
-
- // In case there was a request/response handler
- // then need to call drain at the end.
- if respMux != nil {
- if err := respMux.Drain(); err != nil {
- // We will notify about these but continue.
- pushErr(err)
- }
- for time.Now().Before(timeout) {
- if nc.NumSubscriptions() == 0 {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- }
-
- // Check if we timed out.
- if nc.NumSubscriptions() != 0 {
- pushErr(ErrDrainTimeout)
- }
-
- // Flip State
- nc.mu.Lock()
- nc.changeConnStatus(DRAINING_PUBS)
- nc.mu.Unlock()
-
- // Do publish drain via Flush() call.
- err := nc.FlushTimeout(5 * time.Second)
- if err != nil {
- pushErr(err)
- }
-
- // Move to closed state.
- nc.Close()
-}
-
-// Drain will put a connection into a drain state. All subscriptions will
-// immediately be put into a drain state. Upon completion, the publishers
-// will be drained and can not publish any additional messages. Upon draining
-// of the publishers, the connection will be closed. Use the ClosedCB()
-// option to know when the connection has moved from draining to closed.
-//
-// See note in Subscription.Drain for JetStream subscriptions.
-func (nc *Conn) Drain() error {
- nc.mu.Lock()
- if nc.isClosed() {
- nc.mu.Unlock()
- return ErrConnectionClosed
- }
- if nc.isConnecting() || nc.isReconnecting() {
- nc.mu.Unlock()
- nc.Close()
- return ErrConnectionReconnecting
- }
- if nc.isDraining() {
- nc.mu.Unlock()
- return nil
- }
- nc.changeConnStatus(DRAINING_SUBS)
- go nc.drainConnection()
- nc.mu.Unlock()
-
- return nil
-}
-
-// IsDraining tests if a Conn is in the draining state.
-func (nc *Conn) IsDraining() bool {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.isDraining()
-}
-
-// caller must lock
-func (nc *Conn) getServers(implicitOnly bool) []string {
- poolSize := len(nc.srvPool)
- var servers = make([]string, 0)
- for i := 0; i < poolSize; i++ {
- if implicitOnly && !nc.srvPool[i].isImplicit {
- continue
- }
- url := nc.srvPool[i].url
- servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host))
- }
- return servers
-}
-
-// Servers returns the list of known server urls, including additional
-// servers discovered after a connection has been established. If
-// authentication is enabled, use UserInfo or Token when connecting with
-// these urls.
-func (nc *Conn) Servers() []string {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.getServers(false)
-}
-
-// DiscoveredServers returns only the server urls that have been discovered
-// after a connection has been established. If authentication is enabled,
-// use UserInfo or Token when connecting with these urls.
-func (nc *Conn) DiscoveredServers() []string {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.getServers(true)
-}
-
-// Status returns the current state of the connection.
-func (nc *Conn) Status() Status {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.status
-}
-
-// Test if Conn has been closed Lock is assumed held.
-func (nc *Conn) isClosed() bool {
- return nc.status == CLOSED
-}
-
-// Test if Conn is in the process of connecting
-func (nc *Conn) isConnecting() bool {
- return nc.status == CONNECTING
-}
-
-// Test if Conn is being reconnected.
-func (nc *Conn) isReconnecting() bool {
- return nc.status == RECONNECTING
-}
-
-// Test if Conn is connected or connecting.
-func (nc *Conn) isConnected() bool {
- return nc.status == CONNECTED || nc.isDraining()
-}
-
-// Test if Conn is in the draining state.
-func (nc *Conn) isDraining() bool {
- return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS
-}
-
-// Test if Conn is in the draining state for pubs.
-func (nc *Conn) isDrainingPubs() bool {
- return nc.status == DRAINING_PUBS
-}
-
-// Stats will return a race safe copy of the Statistics section for the connection.
-func (nc *Conn) Stats() Statistics {
- // Stats are updated either under connection's mu or with atomic operations
- // for inbound stats in processMsg().
- nc.mu.Lock()
- stats := Statistics{
- InMsgs: atomic.LoadUint64(&nc.InMsgs),
- InBytes: atomic.LoadUint64(&nc.InBytes),
- OutMsgs: nc.OutMsgs,
- OutBytes: nc.OutBytes,
- Reconnects: nc.Reconnects,
- }
- nc.mu.Unlock()
- return stats
-}
-
-// MaxPayload returns the size limit that a message payload can have.
-// This is set by the server configuration and delivered to the client
-// upon connect.
-func (nc *Conn) MaxPayload() int64 {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.info.MaxPayload
-}
-
-// HeadersSupported will return if the server supports headers
-func (nc *Conn) HeadersSupported() bool {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.info.Headers
-}
-
-// AuthRequired will return if the connected server requires authorization.
-func (nc *Conn) AuthRequired() bool {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.info.AuthRequired
-}
-
-// TLSRequired will return if the connected server requires TLS connections.
-func (nc *Conn) TLSRequired() bool {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- return nc.info.TLSRequired
-}
-
-// Barrier schedules the given function `f` to all registered asynchronous
-// subscriptions.
-// Only the last subscription to see this barrier will invoke the function.
-// If no subscription is registered at the time of this call, `f()` is invoked
-// right away.
-// ErrConnectionClosed is returned if the connection is closed prior to
-// the call.
-func (nc *Conn) Barrier(f func()) error {
- nc.mu.Lock()
- if nc.isClosed() {
- nc.mu.Unlock()
- return ErrConnectionClosed
- }
- nc.subsMu.Lock()
- // Need to figure out how many non chan subscriptions there are
- numSubs := 0
- for _, sub := range nc.subs {
- if sub.typ == AsyncSubscription {
- numSubs++
- }
- }
- if numSubs == 0 {
- nc.subsMu.Unlock()
- nc.mu.Unlock()
- f()
- return nil
- }
- barrier := &barrierInfo{refs: int64(numSubs), f: f}
- for _, sub := range nc.subs {
- sub.mu.Lock()
- if sub.mch == nil {
- msg := &Msg{barrier: barrier}
- // Push onto the async pList
- if sub.pTail != nil {
- sub.pTail.next = msg
- } else {
- sub.pHead = msg
- sub.pCond.Signal()
- }
- sub.pTail = msg
- }
- sub.mu.Unlock()
- }
- nc.subsMu.Unlock()
- nc.mu.Unlock()
- return nil
-}
-
-// GetClientIP returns the client IP as known by the server.
-// Supported as of server version 2.1.6.
-func (nc *Conn) GetClientIP() (net.IP, error) {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- if nc.isClosed() {
- return nil, ErrConnectionClosed
- }
- if nc.info.ClientIP == "" {
- return nil, ErrClientIPNotSupported
- }
- ip := net.ParseIP(nc.info.ClientIP)
- return ip, nil
-}
-
-// GetClientID returns the client ID assigned by the server to which
-// the client is currently connected to. Note that the value may change if
-// the client reconnects.
-// This function returns ErrClientIDNotSupported if the server is of a
-// version prior to 1.2.0.
-func (nc *Conn) GetClientID() (uint64, error) {
- nc.mu.RLock()
- defer nc.mu.RUnlock()
- if nc.isClosed() {
- return 0, ErrConnectionClosed
- }
- if nc.info.CID == 0 {
- return 0, ErrClientIDNotSupported
- }
- return nc.info.CID, nil
-}
-
-// StatusChanged returns a channel on which given list of connection status changes will be reported.
-// If no statuses are provided, defaults will be used: CONNECTED, RECONNECTING, DISCONNECTED, CLOSED.
-func (nc *Conn) StatusChanged(statuses ...Status) chan Status {
- if len(statuses) == 0 {
- statuses = []Status{CONNECTED, RECONNECTING, DISCONNECTED, CLOSED}
- }
- ch := make(chan Status, 10)
- for _, s := range statuses {
- nc.registerStatusChangeListener(s, ch)
- }
- return ch
-}
-
-// registerStatusChangeListener registers a channel waiting for a specific status change event.
-// Status change events are non-blocking - if no receiver is waiting for the status change,
-// it will not be sent on the channel. Closed channels are ignored.
-func (nc *Conn) registerStatusChangeListener(status Status, ch chan Status) {
- nc.mu.Lock()
- defer nc.mu.Unlock()
- if nc.statListeners == nil {
- nc.statListeners = make(map[Status][]chan Status)
- }
- if _, ok := nc.statListeners[status]; !ok {
- nc.statListeners[status] = make([]chan Status, 0)
- }
- nc.statListeners[status] = append(nc.statListeners[status], ch)
-}
-
-// sendStatusEvent sends connection status event to all channels.
-// If channel is closed, or there is no listener, sendStatusEvent
-// will not block. Lock should be held entering.
-func (nc *Conn) sendStatusEvent(s Status) {
-Loop:
- for i := 0; i < len(nc.statListeners[s]); i++ {
- // make sure channel is not closed
- select {
- case <-nc.statListeners[s][i]:
- // if chan is closed, remove it
- nc.statListeners[s][i] = nc.statListeners[s][len(nc.statListeners[s])-1]
- nc.statListeners[s] = nc.statListeners[s][:len(nc.statListeners[s])-1]
- i--
- continue Loop
- default:
- }
- // only send event if someone's listening
- select {
- case nc.statListeners[s][i] <- s:
- default:
- }
- }
-}
-
-// changeConnStatus changes connections status and sends events
-// to all listeners. Lock should be held entering.
-func (nc *Conn) changeConnStatus(status Status) {
- if nc == nil {
- return
- }
- nc.sendStatusEvent(status)
- nc.status = status
-}
-
-// NkeyOptionFromSeed will load an nkey pair from a seed file.
-// It will return the NKey Option and will handle
-// signing of nonce challenges from the server. It will take
-// care to not hold keys in memory and to wipe memory.
-func NkeyOptionFromSeed(seedFile string) (Option, error) {
- kp, err := nkeyPairFromSeedFile(seedFile)
- if err != nil {
- return nil, err
- }
- // Wipe our key on exit.
- defer kp.Wipe()
-
- pub, err := kp.PublicKey()
- if err != nil {
- return nil, err
- }
- if !nkeys.IsValidPublicUserKey(pub) {
- return nil, fmt.Errorf("nats: Not a valid nkey user seed")
- }
- sigCB := func(nonce []byte) ([]byte, error) {
- return sigHandler(nonce, seedFile)
- }
- return Nkey(string(pub), sigCB), nil
-}
-
-// Just wipe slice with 'x', for clearing contents of creds or nkey seed file.
-func wipeSlice(buf []byte) {
- for i := range buf {
- buf[i] = 'x'
- }
-}
-
-func userFromFile(userFile string) (string, error) {
- path, err := expandPath(userFile)
- if err != nil {
- return _EMPTY_, fmt.Errorf("nats: %w", err)
- }
-
- contents, err := os.ReadFile(path)
- if err != nil {
- return _EMPTY_, fmt.Errorf("nats: %w", err)
- }
- defer wipeSlice(contents)
- return nkeys.ParseDecoratedJWT(contents)
-}
-
-func homeDir() (string, error) {
- if runtime.GOOS == "windows" {
- homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
- userProfile := os.Getenv("USERPROFILE")
-
- var home string
- if homeDrive == "" || homePath == "" {
- if userProfile == "" {
- return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%")
- }
- home = userProfile
- } else {
- home = filepath.Join(homeDrive, homePath)
- }
-
- return home, nil
- }
-
- home := os.Getenv("HOME")
- if home == "" {
- return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME")
- }
- return home, nil
-}
-
-func expandPath(p string) (string, error) {
- p = os.ExpandEnv(p)
-
- if !strings.HasPrefix(p, "~") {
- return p, nil
- }
-
- home, err := homeDir()
- if err != nil {
- return _EMPTY_, err
- }
-
- return filepath.Join(home, p[1:]), nil
-}
-
-func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) {
- contents, err := os.ReadFile(seedFile)
- if err != nil {
- return nil, fmt.Errorf("nats: %w", err)
- }
- defer wipeSlice(contents)
- return nkeys.ParseDecoratedNKey(contents)
-}
-
-// Sign authentication challenges from the server.
-// Do not keep private seed in memory.
-func sigHandler(nonce []byte, seedFile string) ([]byte, error) {
- kp, err := nkeyPairFromSeedFile(seedFile)
- if err != nil {
- return nil, fmt.Errorf("unable to extract key pair from file %q: %w", seedFile, err)
- }
- // Wipe our key on exit.
- defer kp.Wipe()
-
- sig, _ := kp.Sign(nonce)
- return sig, nil
-}
-
-type timeoutWriter struct {
- timeout time.Duration
- conn net.Conn
- err error
-}
-
-// Write implements the io.Writer interface.
-func (tw *timeoutWriter) Write(p []byte) (int, error) {
- if tw.err != nil {
- return 0, tw.err
- }
-
- var n int
- tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout))
- n, tw.err = tw.conn.Write(p)
- tw.conn.SetWriteDeadline(time.Time{})
- return n, tw.err
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/netchan.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/netchan.go
deleted file mode 100644
index 6b13690..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/netchan.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2013-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "errors"
- "reflect"
-)
-
-// This allows the functionality for network channels by binding send and receive Go chans
-// to subjects and optionally queue groups.
-// Data will be encoded and decoded via the EncodedConn and its associated encoders.
-
-// BindSendChan binds a channel for send operations to NATS.
-func (c *EncodedConn) BindSendChan(subject string, channel any) error {
- chVal := reflect.ValueOf(channel)
- if chVal.Kind() != reflect.Chan {
- return ErrChanArg
- }
- go chPublish(c, chVal, subject)
- return nil
-}
-
-// Publish all values that arrive on the channel until it is closed or we
-// encounter an error.
-func chPublish(c *EncodedConn, chVal reflect.Value, subject string) {
- for {
- val, ok := chVal.Recv()
- if !ok {
- // Channel has most likely been closed.
- return
- }
- if e := c.Publish(subject, val.Interface()); e != nil {
- // Do this under lock.
- c.Conn.mu.Lock()
- defer c.Conn.mu.Unlock()
-
- if c.Conn.Opts.AsyncErrorCB != nil {
- // FIXME(dlc) - Not sure this is the right thing to do.
- // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback
- if c.Conn.isClosed() {
- go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e)
- } else {
- c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) })
- }
- }
- return
- }
- }
-}
-
-// BindRecvChan binds a channel for receive operations from NATS.
-func (c *EncodedConn) BindRecvChan(subject string, channel any) (*Subscription, error) {
- return c.bindRecvChan(subject, _EMPTY_, channel)
-}
-
-// BindRecvQueueChan binds a channel for queue-based receive operations from NATS.
-func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel any) (*Subscription, error) {
- return c.bindRecvChan(subject, queue, channel)
-}
-
-// Internal function to bind receive operations for a channel.
-func (c *EncodedConn) bindRecvChan(subject, queue string, channel any) (*Subscription, error) {
- chVal := reflect.ValueOf(channel)
- if chVal.Kind() != reflect.Chan {
- return nil, ErrChanArg
- }
- argType := chVal.Type().Elem()
-
- cb := func(m *Msg) {
- var oPtr reflect.Value
- if argType.Kind() != reflect.Ptr {
- oPtr = reflect.New(argType)
- } else {
- oPtr = reflect.New(argType.Elem())
- }
- if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
- c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error())
- if c.Conn.Opts.AsyncErrorCB != nil {
- c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) })
- }
- return
- }
- if argType.Kind() != reflect.Ptr {
- oPtr = reflect.Indirect(oPtr)
- }
- // This is a bit hacky, but in this instance we may be trying to send to a closed channel.
- // and the user does not know when it is safe to close the channel.
- defer func() {
- // If we have panicked, recover and close the subscription.
- if r := recover(); r != nil {
- m.Sub.Unsubscribe()
- }
- }()
- // Actually do the send to the channel.
- chVal.Send(oPtr)
- }
-
- return c.Conn.subscribe(subject, queue, cb, nil, false, nil)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/object.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/object.go
deleted file mode 100644
index 75ceaa8..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/object.go
+++ /dev/null
@@ -1,1428 +0,0 @@
-// Copyright 2021-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "bytes"
- "context"
- "crypto/sha256"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "hash"
- "io"
- "net"
- "os"
- "strings"
- "sync"
- "time"
-
- "github.com/nats-io/nats.go/internal/parser"
- "github.com/nats-io/nuid"
-)
-
-// ObjectStoreManager creates, loads and deletes Object Stores
-type ObjectStoreManager interface {
- // ObjectStore will look up and bind to an existing object store instance.
- ObjectStore(bucket string) (ObjectStore, error)
- // CreateObjectStore will create an object store.
- CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error)
- // DeleteObjectStore will delete the underlying stream for the named object.
- DeleteObjectStore(bucket string) error
- // ObjectStoreNames is used to retrieve a list of bucket names
- ObjectStoreNames(opts ...ObjectOpt) <-chan string
- // ObjectStores is used to retrieve a list of bucket statuses
- ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus
-}
-
-// ObjectStore is a blob store capable of storing large objects efficiently in
-// JetStream streams
-type ObjectStore interface {
- // Put will place the contents from the reader into a new object.
- Put(obj *ObjectMeta, reader io.Reader, opts ...ObjectOpt) (*ObjectInfo, error)
- // Get will pull the named object from the object store.
- Get(name string, opts ...GetObjectOpt) (ObjectResult, error)
-
- // PutBytes is convenience function to put a byte slice into this object store.
- PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error)
- // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice.
- GetBytes(name string, opts ...GetObjectOpt) ([]byte, error)
-
- // PutString is convenience function to put a string into this object store.
- PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error)
- // GetString is a convenience function to pull an object from this object store and return it as a string.
- GetString(name string, opts ...GetObjectOpt) (string, error)
-
- // PutFile is convenience function to put a file into this object store.
- PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error)
- // GetFile is a convenience function to pull an object from this object store and place it in a file.
- GetFile(name, file string, opts ...GetObjectOpt) error
-
- // GetInfo will retrieve the current information for the object.
- GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error)
- // UpdateMeta will update the metadata for the object.
- UpdateMeta(name string, meta *ObjectMeta) error
-
- // Delete will delete the named object.
- Delete(name string) error
-
- // AddLink will add a link to another object.
- AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error)
-
- // AddBucketLink will add a link to another object store.
- AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error)
-
- // Seal will seal the object store, no further modifications will be allowed.
- Seal() error
-
- // Watch for changes in the underlying store and receive meta information updates.
- Watch(opts ...WatchOpt) (ObjectWatcher, error)
-
- // List will list all the objects in this store.
- List(opts ...ListObjectsOpt) ([]*ObjectInfo, error)
-
- // Status retrieves run-time status about the backing store of the bucket.
- Status() (ObjectStoreStatus, error)
-}
-
-type ObjectOpt interface {
- configureObject(opts *objOpts) error
-}
-
-type objOpts struct {
- ctx context.Context
-}
-
-// For nats.Context() support.
-func (ctx ContextOpt) configureObject(opts *objOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-// ObjectWatcher is what is returned when doing a watch.
-type ObjectWatcher interface {
- // Updates returns a channel to read any updates to entries.
- Updates() <-chan *ObjectInfo
- // Stop will stop this watcher.
- Stop() error
-}
-
-var (
- ErrObjectConfigRequired = errors.New("nats: object-store config required")
- ErrBadObjectMeta = errors.New("nats: object-store meta information invalid")
- ErrObjectNotFound = errors.New("nats: object not found")
- ErrInvalidStoreName = errors.New("nats: invalid object-store name")
- ErrDigestMismatch = errors.New("nats: received a corrupt object, digests do not match")
- ErrInvalidDigestFormat = errors.New("nats: object digest hash has invalid format")
- ErrNoObjectsFound = errors.New("nats: no objects found")
- ErrObjectAlreadyExists = errors.New("nats: an object already exists with that name")
- ErrNameRequired = errors.New("nats: name is required")
- ErrNeeds262 = errors.New("nats: object-store requires at least server version 2.6.2")
- ErrLinkNotAllowed = errors.New("nats: link cannot be set when putting the object in bucket")
- ErrObjectRequired = errors.New("nats: object required")
- ErrNoLinkToDeleted = errors.New("nats: not allowed to link to a deleted object")
- ErrNoLinkToLink = errors.New("nats: not allowed to link to another link")
- ErrCantGetBucket = errors.New("nats: invalid Get, object is a link to a bucket")
- ErrBucketRequired = errors.New("nats: bucket required")
- ErrBucketMalformed = errors.New("nats: bucket malformed")
- ErrUpdateMetaDeleted = errors.New("nats: cannot update meta for a deleted object")
-)
-
-// ObjectStoreConfig is the config for the object store.
-type ObjectStoreConfig struct {
- Bucket string `json:"bucket"`
- Description string `json:"description,omitempty"`
- TTL time.Duration `json:"max_age,omitempty"`
- MaxBytes int64 `json:"max_bytes,omitempty"`
- Storage StorageType `json:"storage,omitempty"`
- Replicas int `json:"num_replicas,omitempty"`
- Placement *Placement `json:"placement,omitempty"`
-
- // Bucket-specific metadata
- // NOTE: Metadata requires nats-server v2.10.0+
- Metadata map[string]string `json:"metadata,omitempty"`
- // Enable underlying stream compression.
- // NOTE: Compression is supported for nats-server 2.10.0+
- Compression bool `json:"compression,omitempty"`
-}
-
-type ObjectStoreStatus interface {
- // Bucket is the name of the bucket
- Bucket() string
- // Description is the description supplied when creating the bucket
- Description() string
- // TTL indicates how long objects are kept in the bucket
- TTL() time.Duration
- // Storage indicates the underlying JetStream storage technology used to store data
- Storage() StorageType
- // Replicas indicates how many storage replicas are kept for the data in the bucket
- Replicas() int
- // Sealed indicates the stream is sealed and cannot be modified in any way
- Sealed() bool
- // Size is the combined size of all data in the bucket including metadata, in bytes
- Size() uint64
- // BackingStore provides details about the underlying storage
- BackingStore() string
- // Metadata is the user supplied metadata for the bucket
- Metadata() map[string]string
- // IsCompressed indicates if the data is compressed on disk
- IsCompressed() bool
-}
-
-// ObjectMetaOptions
-type ObjectMetaOptions struct {
- Link *ObjectLink `json:"link,omitempty"`
- ChunkSize uint32 `json:"max_chunk_size,omitempty"`
-}
-
-// ObjectMeta is high level information about an object.
-type ObjectMeta struct {
- Name string `json:"name"`
- Description string `json:"description,omitempty"`
- Headers Header `json:"headers,omitempty"`
- Metadata map[string]string `json:"metadata,omitempty"`
-
- // Optional options.
- Opts *ObjectMetaOptions `json:"options,omitempty"`
-}
-
-// ObjectInfo is meta plus instance information.
-type ObjectInfo struct {
- ObjectMeta
- Bucket string `json:"bucket"`
- NUID string `json:"nuid"`
- Size uint64 `json:"size"`
- ModTime time.Time `json:"mtime"`
- Chunks uint32 `json:"chunks"`
- Digest string `json:"digest,omitempty"`
- Deleted bool `json:"deleted,omitempty"`
-}
-
-// ObjectLink is used to embed links to other buckets and objects.
-type ObjectLink struct {
- // Bucket is the name of the other object store.
- Bucket string `json:"bucket"`
- // Name can be used to link to a single object.
- // If empty means this is a link to the whole store, like a directory.
- Name string `json:"name,omitempty"`
-}
-
-// ObjectResult will return the underlying stream info and also be an io.ReadCloser.
-type ObjectResult interface {
- io.ReadCloser
- Info() (*ObjectInfo, error)
- Error() error
-}
-
-const (
- objNameTmpl = "OBJ_%s" // OBJ_ // stream name
- objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject
- objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject
- objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject
- objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject
- objNoPending = "0"
- objDefaultChunkSize = uint32(128 * 1024) // 128k
- objDigestType = "SHA-256="
- objDigestTmpl = objDigestType + "%s"
-)
-
-type obs struct {
- name string
- stream string
- js *js
-}
-
-// CreateObjectStore will create an object store.
-func (js *js) CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) {
- if !js.nc.serverMinVersion(2, 6, 2) {
- return nil, ErrNeeds262
- }
- if cfg == nil {
- return nil, ErrObjectConfigRequired
- }
- if !validBucketRe.MatchString(cfg.Bucket) {
- return nil, ErrInvalidStoreName
- }
-
- name := cfg.Bucket
- chunks := fmt.Sprintf(objAllChunksPreTmpl, name)
- meta := fmt.Sprintf(objAllMetaPreTmpl, name)
-
- // We will set explicitly some values so that we can do comparison
- // if we get an "already in use" error and need to check if it is same.
- // See kv
- replicas := cfg.Replicas
- if replicas == 0 {
- replicas = 1
- }
- maxBytes := cfg.MaxBytes
- if maxBytes == 0 {
- maxBytes = -1
- }
- var compression StoreCompression
- if cfg.Compression {
- compression = S2Compression
- }
- scfg := &StreamConfig{
- Name: fmt.Sprintf(objNameTmpl, name),
- Description: cfg.Description,
- Subjects: []string{chunks, meta},
- MaxAge: cfg.TTL,
- MaxBytes: maxBytes,
- Storage: cfg.Storage,
- Replicas: replicas,
- Placement: cfg.Placement,
- Discard: DiscardNew,
- AllowRollup: true,
- AllowDirect: true,
- Metadata: cfg.Metadata,
- Compression: compression,
- }
-
- // Create our stream.
- _, err := js.AddStream(scfg)
- if err != nil {
- return nil, err
- }
-
- return &obs{name: name, stream: scfg.Name, js: js}, nil
-}
-
-// ObjectStore will look up and bind to an existing object store instance.
-func (js *js) ObjectStore(bucket string) (ObjectStore, error) {
- if !validBucketRe.MatchString(bucket) {
- return nil, ErrInvalidStoreName
- }
- if !js.nc.serverMinVersion(2, 6, 2) {
- return nil, ErrNeeds262
- }
-
- stream := fmt.Sprintf(objNameTmpl, bucket)
- si, err := js.StreamInfo(stream)
- if err != nil {
- return nil, err
- }
- return &obs{name: bucket, stream: si.Config.Name, js: js}, nil
-}
-
-// DeleteObjectStore will delete the underlying stream for the named object.
-func (js *js) DeleteObjectStore(bucket string) error {
- stream := fmt.Sprintf(objNameTmpl, bucket)
- return js.DeleteStream(stream)
-}
-
-func encodeName(name string) string {
- return base64.URLEncoding.EncodeToString([]byte(name))
-}
-
-// Put will place the contents from the reader into this object-store.
-func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) {
- if meta == nil || meta.Name == "" {
- return nil, ErrBadObjectMeta
- }
-
- if meta.Opts == nil {
- meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize}
- } else if meta.Opts.Link != nil {
- return nil, ErrLinkNotAllowed
- } else if meta.Opts.ChunkSize == 0 {
- meta.Opts.ChunkSize = objDefaultChunkSize
- }
-
- var o objOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureObject(&o); err != nil {
- return nil, err
- }
- }
- }
- ctx := o.ctx
-
- // Create the new nuid so chunks go on a new subject if the name is re-used
- newnuid := nuid.Next()
-
- // These will be used in more than one place
- chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid)
-
- // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem
- // Chunks on the old nuid can be cleaned up at the end
- einfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name
- if err != nil && err != ErrObjectNotFound {
- return nil, err
- }
-
- // For async error handling
- var perr error
- var mu sync.Mutex
- setErr := func(err error) {
- mu.Lock()
- defer mu.Unlock()
- perr = err
- }
- getErr := func() error {
- mu.Lock()
- defer mu.Unlock()
- return perr
- }
-
- // Create our own JS context to handle errors etc.
- jetStream, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) }))
- if err != nil {
- return nil, err
- }
-
- defer jetStream.(*js).cleanupReplySub()
-
- purgePartial := func() error {
- // wait until all pubs are complete or up to default timeout before attempting purge
- select {
- case <-jetStream.PublishAsyncComplete():
- case <-time.After(obs.js.opts.wait):
- }
- if err := obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}); err != nil {
- return fmt.Errorf("could not cleanup bucket after erroneous put operation: %w", err)
- }
- return nil
- }
-
- m, h := NewMsg(chunkSubj), sha256.New()
- chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0)
-
- // set up the info object. The chunk upload sets the size and digest
- info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: *meta}
-
- for r != nil {
- if ctx != nil {
- select {
- case <-ctx.Done():
- if ctx.Err() == context.Canceled {
- err = ctx.Err()
- } else {
- err = ErrTimeout
- }
- default:
- }
- if err != nil {
- if purgeErr := purgePartial(); purgeErr != nil {
- return nil, errors.Join(err, purgeErr)
- }
- return nil, err
- }
- }
-
- // Actual read.
- // TODO(dlc) - Deadline?
- n, readErr := r.Read(chunk)
-
- // Handle all non EOF errors
- if readErr != nil && readErr != io.EOF {
- if purgeErr := purgePartial(); purgeErr != nil {
- return nil, errors.Join(readErr, purgeErr)
- }
- return nil, readErr
- }
-
- // Add chunk only if we received data
- if n > 0 {
- // Chunk processing.
- m.Data = chunk[:n]
- h.Write(m.Data)
-
- // Send msg itself.
- if _, err := jetStream.PublishMsgAsync(m); err != nil {
- if purgeErr := purgePartial(); purgeErr != nil {
- return nil, errors.Join(err, purgeErr)
- }
- return nil, err
- }
- if err := getErr(); err != nil {
- if purgeErr := purgePartial(); purgeErr != nil {
- return nil, errors.Join(err, purgeErr)
- }
- return nil, err
- }
- // Update totals.
- sent++
- total += uint64(n)
- }
-
- // EOF Processing.
- if readErr == io.EOF {
- // Place meta info.
- info.Size, info.Chunks = uint64(total), uint32(sent)
- info.Digest = GetObjectDigestValue(h)
- break
- }
- }
-
- // Prepare the meta message
- metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name))
- mm := NewMsg(metaSubj)
- mm.Header.Set(MsgRollup, MsgRollupSubject)
- mm.Data, err = json.Marshal(info)
- if err != nil {
- if r != nil {
- if purgeErr := purgePartial(); purgeErr != nil {
- return nil, errors.Join(err, purgeErr)
- }
- }
- return nil, err
- }
-
- // Publish the meta message.
- _, err = jetStream.PublishMsgAsync(mm)
- if err != nil {
- if r != nil {
- if purgeErr := purgePartial(); purgeErr != nil {
- return nil, errors.Join(err, purgeErr)
- }
- }
- return nil, err
- }
-
- // Wait for all to be processed.
- select {
- case <-jetStream.PublishAsyncComplete():
- if err := getErr(); err != nil {
- if r != nil {
- if purgeErr := purgePartial(); purgeErr != nil {
- return nil, errors.Join(err, purgeErr)
- }
- }
- return nil, err
- }
- case <-time.After(obs.js.opts.wait):
- return nil, ErrTimeout
- }
-
- info.ModTime = time.Now().UTC() // This time is not actually the correct time
-
- // Delete any original chunks.
- if einfo != nil && !einfo.Deleted {
- echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID)
- if err := obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: echunkSubj}); err != nil {
- return info, err
- }
- }
-
- // TODO would it be okay to do this to return the info with the correct time?
- // With the understanding that it is an extra call to the server.
- // Otherwise the time the user gets back is the client time, not the server time.
- // return obs.GetInfo(info.Name)
-
- return info, nil
-}
-
-// GetObjectDigestValue calculates the base64 value of hashed data
-func GetObjectDigestValue(data hash.Hash) string {
- sha := data.Sum(nil)
- return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:]))
-}
-
-// DecodeObjectDigest decodes base64 hash
-func DecodeObjectDigest(data string) ([]byte, error) {
- digest := strings.SplitN(data, "=", 2)
- if len(digest) != 2 {
- return nil, ErrInvalidDigestFormat
- }
- return base64.URLEncoding.DecodeString(digest[1])
-}
-
-// ObjectResult impl.
-type objResult struct {
- sync.Mutex
- info *ObjectInfo
- r io.ReadCloser
- err error
- ctx context.Context
- digest hash.Hash
- readTimeout time.Duration
-}
-
-func (info *ObjectInfo) isLink() bool {
- return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil
-}
-
-type GetObjectOpt interface {
- configureGetObject(opts *getObjectOpts) error
-}
-type getObjectOpts struct {
- ctx context.Context
- // Include deleted object in the result.
- showDeleted bool
-}
-
-type getObjectFn func(opts *getObjectOpts) error
-
-func (opt getObjectFn) configureGetObject(opts *getObjectOpts) error {
- return opt(opts)
-}
-
-// GetObjectShowDeleted makes Get() return object if it was marked as deleted.
-func GetObjectShowDeleted() GetObjectOpt {
- return getObjectFn(func(opts *getObjectOpts) error {
- opts.showDeleted = true
- return nil
- })
-}
-
-// For nats.Context() support.
-func (ctx ContextOpt) configureGetObject(opts *getObjectOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-// Get will pull the object from the underlying stream.
-func (obs *obs) Get(name string, opts ...GetObjectOpt) (ObjectResult, error) {
- var o getObjectOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureGetObject(&o); err != nil {
- return nil, err
- }
- }
- }
- ctx := o.ctx
- infoOpts := make([]GetObjectInfoOpt, 0)
- if ctx != nil {
- infoOpts = append(infoOpts, Context(ctx))
- }
- if o.showDeleted {
- infoOpts = append(infoOpts, GetObjectInfoShowDeleted())
- }
-
- // Grab meta info.
- info, err := obs.GetInfo(name, infoOpts...)
- if err != nil {
- return nil, err
- }
- if info.NUID == _EMPTY_ {
- return nil, ErrBadObjectMeta
- }
-
- // Check for object links. If single objects we do a pass through.
- if info.isLink() {
- if info.ObjectMeta.Opts.Link.Name == _EMPTY_ {
- return nil, ErrCantGetBucket
- }
-
- // is the link in the same bucket?
- lbuck := info.ObjectMeta.Opts.Link.Bucket
- if lbuck == obs.name {
- return obs.Get(info.ObjectMeta.Opts.Link.Name)
- }
-
- // different bucket
- lobs, err := obs.js.ObjectStore(lbuck)
- if err != nil {
- return nil, err
- }
- return lobs.Get(info.ObjectMeta.Opts.Link.Name)
- }
-
- result := &objResult{info: info, ctx: ctx, readTimeout: obs.js.opts.wait}
- if info.Size == 0 {
- return result, nil
- }
-
- pr, pw := net.Pipe()
- result.r = pr
-
- gotErr := func(m *Msg, err error) {
- pw.Close()
- m.Sub.Unsubscribe()
- result.setErr(err)
- }
-
- // For calculating sum256
- result.digest = sha256.New()
-
- processChunk := func(m *Msg) {
- var err error
- if ctx != nil {
- select {
- case <-ctx.Done():
- if errors.Is(ctx.Err(), context.Canceled) {
- err = ctx.Err()
- } else {
- err = ErrTimeout
- }
- default:
- }
- if err != nil {
- gotErr(m, err)
- return
- }
- }
-
- tokens, err := parser.GetMetadataFields(m.Reply)
- if err != nil {
- gotErr(m, err)
- return
- }
-
- // Write to our pipe.
- for b := m.Data; len(b) > 0; {
- n, err := pw.Write(b)
- if err != nil {
- gotErr(m, err)
- return
- }
- b = b[n:]
- }
- // Update sha256
- result.digest.Write(m.Data)
-
- // Check if we are done.
- if tokens[parser.AckNumPendingTokenPos] == objNoPending {
- pw.Close()
- m.Sub.Unsubscribe()
- }
- }
-
- chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID)
- streamName := fmt.Sprintf(objNameTmpl, obs.name)
- subscribeOpts := []SubOpt{
- OrderedConsumer(),
- BindStream(streamName),
- }
- _, err = obs.js.Subscribe(chunkSubj, processChunk, subscribeOpts...)
- if err != nil {
- return nil, err
- }
-
- return result, nil
-}
-
-// Delete will delete the object.
-func (obs *obs) Delete(name string) error {
- // Grab meta info.
- info, err := obs.GetInfo(name, GetObjectInfoShowDeleted())
- if err != nil {
- return err
- }
- if info.NUID == _EMPTY_ {
- return ErrBadObjectMeta
- }
-
- // Place a rollup delete marker and publish the info
- info.Deleted = true
- info.Size, info.Chunks, info.Digest = 0, 0, _EMPTY_
-
- if err = publishMeta(info, obs.js); err != nil {
- return err
- }
-
- // Purge chunks for the object.
- chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID)
- return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj})
-}
-
-func publishMeta(info *ObjectInfo, js JetStreamContext) error {
- // marshal the object into json, don't store an actual time
- info.ModTime = time.Time{}
- data, err := json.Marshal(info)
- if err != nil {
- return err
- }
-
- // Prepare and publish the message.
- mm := NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name)))
- mm.Header.Set(MsgRollup, MsgRollupSubject)
- mm.Data = data
- if _, err := js.PublishMsg(mm); err != nil {
- return err
- }
-
- // set the ModTime in case it's returned to the user, even though it's not the correct time.
- info.ModTime = time.Now().UTC()
- return nil
-}
-
-// AddLink will add a link to another object if it's not deleted and not another link
-// name is the name of this link object
-// obj is what is being linked too
-func (obs *obs) AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) {
- if name == "" {
- return nil, ErrNameRequired
- }
-
- // TODO Handle stale info
-
- if obj == nil || obj.Name == "" {
- return nil, ErrObjectRequired
- }
- if obj.Deleted {
- return nil, ErrNoLinkToDeleted
- }
- if obj.isLink() {
- return nil, ErrNoLinkToLink
- }
-
- // If object with link's name is found, error.
- // If link with link's name is found, that's okay to overwrite.
- // If there was an error that was not ErrObjectNotFound, error.
- einfo, err := obs.GetInfo(name, GetObjectInfoShowDeleted())
- if einfo != nil {
- if !einfo.isLink() {
- return nil, ErrObjectAlreadyExists
- }
- } else if err != ErrObjectNotFound {
- return nil, err
- }
-
- // create the meta for the link
- meta := &ObjectMeta{
- Name: name,
- Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}},
- }
- info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta}
-
- // put the link object
- if err = publishMeta(info, obs.js); err != nil {
- return nil, err
- }
-
- return info, nil
-}
-
-// AddBucketLink will add a link to another object store.
-func (ob *obs) AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) {
- if name == "" {
- return nil, ErrNameRequired
- }
- if bucket == nil {
- return nil, ErrBucketRequired
- }
- bos, ok := bucket.(*obs)
- if !ok {
- return nil, ErrBucketMalformed
- }
-
- // If object with link's name is found, error.
- // If link with link's name is found, that's okay to overwrite.
- // If there was an error that was not ErrObjectNotFound, error.
- einfo, err := ob.GetInfo(name, GetObjectInfoShowDeleted())
- if einfo != nil {
- if !einfo.isLink() {
- return nil, ErrObjectAlreadyExists
- }
- } else if err != ErrObjectNotFound {
- return nil, err
- }
-
- // create the meta for the link
- meta := &ObjectMeta{
- Name: name,
- Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}},
- }
- info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta}
-
- // put the link object
- err = publishMeta(info, ob.js)
- if err != nil {
- return nil, err
- }
-
- return info, nil
-}
-
-// PutBytes is convenience function to put a byte slice into this object store.
-func (obs *obs) PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) {
- return obs.Put(&ObjectMeta{Name: name}, bytes.NewReader(data), opts...)
-}
-
-// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice.
-func (obs *obs) GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) {
- result, err := obs.Get(name, opts...)
- if err != nil {
- return nil, err
- }
- defer result.Close()
-
- var b bytes.Buffer
- if _, err := b.ReadFrom(result); err != nil {
- return nil, err
- }
- return b.Bytes(), nil
-}
-
-// PutString is convenience function to put a string into this object store.
-func (obs *obs) PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) {
- return obs.Put(&ObjectMeta{Name: name}, strings.NewReader(data), opts...)
-}
-
-// GetString is a convenience function to pull an object from this object store and return it as a string.
-func (obs *obs) GetString(name string, opts ...GetObjectOpt) (string, error) {
- result, err := obs.Get(name, opts...)
- if err != nil {
- return _EMPTY_, err
- }
- defer result.Close()
-
- var b bytes.Buffer
- if _, err := b.ReadFrom(result); err != nil {
- return _EMPTY_, err
- }
- return b.String(), nil
-}
-
-// PutFile is convenience function to put a file into an object store.
-func (obs *obs) PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) {
- f, err := os.Open(file)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- return obs.Put(&ObjectMeta{Name: file}, f, opts...)
-}
-
-// GetFile is a convenience function to pull and object and place in a file.
-func (obs *obs) GetFile(name, file string, opts ...GetObjectOpt) error {
- // Expect file to be new.
- f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600)
- if err != nil {
- return err
- }
- defer f.Close()
-
- result, err := obs.Get(name, opts...)
- if err != nil {
- os.Remove(f.Name())
- return err
- }
- defer result.Close()
-
- // Stream copy to the file.
- _, err = io.Copy(f, result)
- return err
-}
-
-type GetObjectInfoOpt interface {
- configureGetInfo(opts *getObjectInfoOpts) error
-}
-type getObjectInfoOpts struct {
- ctx context.Context
- // Include deleted object in the result.
- showDeleted bool
-}
-
-type getObjectInfoFn func(opts *getObjectInfoOpts) error
-
-func (opt getObjectInfoFn) configureGetInfo(opts *getObjectInfoOpts) error {
- return opt(opts)
-}
-
-// GetObjectInfoShowDeleted makes GetInfo() return object if it was marked as deleted.
-func GetObjectInfoShowDeleted() GetObjectInfoOpt {
- return getObjectInfoFn(func(opts *getObjectInfoOpts) error {
- opts.showDeleted = true
- return nil
- })
-}
-
-// For nats.Context() support.
-func (ctx ContextOpt) configureGetInfo(opts *getObjectInfoOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-// GetInfo will retrieve the current information for the object.
-func (obs *obs) GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) {
- // Grab last meta value we have.
- if name == "" {
- return nil, ErrNameRequired
- }
- var o getObjectInfoOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureGetInfo(&o); err != nil {
- return nil, err
- }
- }
- }
-
- metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call
- stream := fmt.Sprintf(objNameTmpl, obs.name)
-
- m, err := obs.js.GetLastMsg(stream, metaSubj)
- if err != nil {
- if errors.Is(err, ErrMsgNotFound) {
- err = ErrObjectNotFound
- }
- return nil, err
- }
- var info ObjectInfo
- if err := json.Unmarshal(m.Data, &info); err != nil {
- return nil, ErrBadObjectMeta
- }
- if !o.showDeleted && info.Deleted {
- return nil, ErrObjectNotFound
- }
- info.ModTime = m.Time
- return &info, nil
-}
-
-// UpdateMeta will update the meta for the object.
-func (obs *obs) UpdateMeta(name string, meta *ObjectMeta) error {
- if meta == nil {
- return ErrBadObjectMeta
- }
-
- // Grab the current meta.
- info, err := obs.GetInfo(name)
- if err != nil {
- if errors.Is(err, ErrObjectNotFound) {
- return ErrUpdateMetaDeleted
- }
- return err
- }
-
- // If the new name is different from the old, and it exists, error
- // If there was an error that was not ErrObjectNotFound, error.
- if name != meta.Name {
- existingInfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted())
- if err != nil && !errors.Is(err, ErrObjectNotFound) {
- return err
- }
- if err == nil && !existingInfo.Deleted {
- return ErrObjectAlreadyExists
- }
- }
-
- // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize)
- // These should only be updated internally when appropriate.
- info.Name = meta.Name
- info.Description = meta.Description
- info.Headers = meta.Headers
- info.Metadata = meta.Metadata
-
- // Prepare the meta message
- if err = publishMeta(info, obs.js); err != nil {
- return err
- }
-
- // did the name of this object change? We just stored the meta under the new name
- // so delete the meta from the old name via purge stream for subject
- if name != meta.Name {
- metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name))
- return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: metaSubj})
- }
-
- return nil
-}
-
-// Seal will seal the object store, no further modifications will be allowed.
-func (obs *obs) Seal() error {
- stream := fmt.Sprintf(objNameTmpl, obs.name)
- si, err := obs.js.StreamInfo(stream)
- if err != nil {
- return err
- }
- // Seal the stream from being able to take on more messages.
- cfg := si.Config
- cfg.Sealed = true
- _, err = obs.js.UpdateStream(&cfg)
- return err
-}
-
-// Implementation for Watch
-type objWatcher struct {
- updates chan *ObjectInfo
- sub *Subscription
-}
-
-// Updates returns the interior channel.
-func (w *objWatcher) Updates() <-chan *ObjectInfo {
- if w == nil {
- return nil
- }
- return w.updates
-}
-
-// Stop will unsubscribe from the watcher.
-func (w *objWatcher) Stop() error {
- if w == nil {
- return nil
- }
- return w.sub.Unsubscribe()
-}
-
-// Watch for changes in the underlying store and receive meta information updates.
-func (obs *obs) Watch(opts ...WatchOpt) (ObjectWatcher, error) {
- var o watchOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureWatcher(&o); err != nil {
- return nil, err
- }
- }
- }
-
- var initDoneMarker bool
-
- w := &objWatcher{updates: make(chan *ObjectInfo, 32)}
-
- update := func(m *Msg) {
- var info ObjectInfo
- if err := json.Unmarshal(m.Data, &info); err != nil {
- return // TODO(dlc) - Communicate this upwards?
- }
- meta, err := m.Metadata()
- if err != nil {
- return
- }
-
- if !o.ignoreDeletes || !info.Deleted {
- info.ModTime = meta.Timestamp
- w.updates <- &info
- }
-
- // if UpdatesOnly is set, no not send nil to the channel
- // as it would always be triggered after initializing the watcher
- if !initDoneMarker && meta.NumPending == 0 {
- initDoneMarker = true
- w.updates <- nil
- }
- }
-
- allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name)
- _, err := obs.js.GetLastMsg(obs.stream, allMeta)
- // if there are no messages on the stream and we are not watching
- // updates only, send nil to the channel to indicate that the initial
- // watch is done
- if !o.updatesOnly {
- if errors.Is(err, ErrMsgNotFound) {
- initDoneMarker = true
- w.updates <- nil
- }
- } else {
- // if UpdatesOnly was used, mark initialization as complete
- initDoneMarker = true
- }
-
- // Used ordered consumer to deliver results.
- streamName := fmt.Sprintf(objNameTmpl, obs.name)
- subOpts := []SubOpt{OrderedConsumer(), BindStream(streamName)}
- if !o.includeHistory {
- subOpts = append(subOpts, DeliverLastPerSubject())
- }
- if o.updatesOnly {
- subOpts = append(subOpts, DeliverNew())
- }
- sub, err := obs.js.Subscribe(allMeta, update, subOpts...)
- if err != nil {
- return nil, err
- }
- w.sub = sub
- return w, nil
-}
-
-type ListObjectsOpt interface {
- configureListObjects(opts *listObjectOpts) error
-}
-type listObjectOpts struct {
- ctx context.Context
- // Include deleted objects in the result channel.
- showDeleted bool
-}
-
-type listObjectsFn func(opts *listObjectOpts) error
-
-func (opt listObjectsFn) configureListObjects(opts *listObjectOpts) error {
- return opt(opts)
-}
-
-// ListObjectsShowDeleted makes ListObjects() return deleted objects.
-func ListObjectsShowDeleted() ListObjectsOpt {
- return listObjectsFn(func(opts *listObjectOpts) error {
- opts.showDeleted = true
- return nil
- })
-}
-
-// For nats.Context() support.
-func (ctx ContextOpt) configureListObjects(opts *listObjectOpts) error {
- opts.ctx = ctx
- return nil
-}
-
-// List will list all the objects in this store.
-func (obs *obs) List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) {
- var o listObjectOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureListObjects(&o); err != nil {
- return nil, err
- }
- }
- }
- watchOpts := make([]WatchOpt, 0)
- if !o.showDeleted {
- watchOpts = append(watchOpts, IgnoreDeletes())
- }
- watcher, err := obs.Watch(watchOpts...)
- if err != nil {
- return nil, err
- }
- defer watcher.Stop()
- if o.ctx == nil {
- o.ctx = context.Background()
- }
-
- var objs []*ObjectInfo
- updates := watcher.Updates()
-Updates:
- for {
- select {
- case entry := <-updates:
- if entry == nil {
- break Updates
- }
- objs = append(objs, entry)
- case <-o.ctx.Done():
- return nil, o.ctx.Err()
- }
- }
- if len(objs) == 0 {
- return nil, ErrNoObjectsFound
- }
- return objs, nil
-}
-
-// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus
-type ObjectBucketStatus struct {
- nfo *StreamInfo
- bucket string
-}
-
-// Bucket is the name of the bucket
-func (s *ObjectBucketStatus) Bucket() string { return s.bucket }
-
-// Description is the description supplied when creating the bucket
-func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description }
-
-// TTL indicates how long objects are kept in the bucket
-func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge }
-
-// Storage indicates the underlying JetStream storage technology used to store data
-func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage }
-
-// Replicas indicates how many storage replicas are kept for the data in the bucket
-func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas }
-
-// Sealed indicates the stream is sealed and cannot be modified in any way
-func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed }
-
-// Size is the combined size of all data in the bucket including metadata, in bytes
-func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes }
-
-// BackingStore indicates what technology is used for storage of the bucket
-func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" }
-
-// Metadata is the metadata supplied when creating the bucket
-func (s *ObjectBucketStatus) Metadata() map[string]string { return s.nfo.Config.Metadata }
-
-// StreamInfo is the stream info retrieved to create the status
-func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo }
-
-// IsCompressed indicates if the data is compressed on disk
-func (s *ObjectBucketStatus) IsCompressed() bool { return s.nfo.Config.Compression != NoCompression }
-
-// Status retrieves run-time status about a bucket
-func (obs *obs) Status() (ObjectStoreStatus, error) {
- nfo, err := obs.js.StreamInfo(obs.stream)
- if err != nil {
- return nil, err
- }
-
- status := &ObjectBucketStatus{
- nfo: nfo,
- bucket: obs.name,
- }
-
- return status, nil
-}
-
-// Read impl.
-func (o *objResult) Read(p []byte) (n int, err error) {
- o.Lock()
- defer o.Unlock()
- readDeadline := time.Now().Add(o.readTimeout)
- if ctx := o.ctx; ctx != nil {
- if deadline, ok := ctx.Deadline(); ok {
- readDeadline = deadline
- }
- select {
- case <-ctx.Done():
- if ctx.Err() == context.Canceled {
- o.err = ctx.Err()
- } else {
- o.err = ErrTimeout
- }
- default:
- }
- }
- if o.err != nil {
- return 0, o.err
- }
- if o.r == nil {
- return 0, io.EOF
- }
-
- r := o.r.(net.Conn)
- r.SetReadDeadline(readDeadline)
- n, err = r.Read(p)
- if err, ok := err.(net.Error); ok && err.Timeout() {
- if ctx := o.ctx; ctx != nil {
- select {
- case <-ctx.Done():
- if ctx.Err() == context.Canceled {
- return 0, ctx.Err()
- } else {
- return 0, ErrTimeout
- }
- default:
- err = nil
- }
- }
- }
- if err == io.EOF {
- // Make sure the digest matches.
- sha := o.digest.Sum(nil)
- rsha, decodeErr := DecodeObjectDigest(o.info.Digest)
- if decodeErr != nil {
- o.err = decodeErr
- return 0, o.err
- }
- if !bytes.Equal(sha[:], rsha) {
- o.err = ErrDigestMismatch
- return 0, o.err
- }
- }
- return n, err
-}
-
-// Close impl.
-func (o *objResult) Close() error {
- o.Lock()
- defer o.Unlock()
- if o.r == nil {
- return nil
- }
- return o.r.Close()
-}
-
-func (o *objResult) setErr(err error) {
- o.Lock()
- defer o.Unlock()
- o.err = err
-}
-
-func (o *objResult) Info() (*ObjectInfo, error) {
- o.Lock()
- defer o.Unlock()
- return o.info, o.err
-}
-
-func (o *objResult) Error() error {
- o.Lock()
- defer o.Unlock()
- return o.err
-}
-
-// ObjectStoreNames is used to retrieve a list of bucket names
-func (js *js) ObjectStoreNames(opts ...ObjectOpt) <-chan string {
- var o objOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureObject(&o); err != nil {
- return nil
- }
- }
- }
- ch := make(chan string)
- var cancel context.CancelFunc
- if o.ctx == nil {
- o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait)
- }
- l := &streamLister{js: js}
- l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*")
- l.js.opts.ctx = o.ctx
- go func() {
- if cancel != nil {
- defer cancel()
- }
- defer close(ch)
- for l.Next() {
- for _, info := range l.Page() {
- if !strings.HasPrefix(info.Config.Name, "OBJ_") {
- continue
- }
- select {
- case ch <- info.Config.Name:
- case <-o.ctx.Done():
- return
- }
- }
- }
- }()
-
- return ch
-}
-
-// ObjectStores is used to retrieve a list of bucket statuses
-func (js *js) ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus {
- var o objOpts
- for _, opt := range opts {
- if opt != nil {
- if err := opt.configureObject(&o); err != nil {
- return nil
- }
- }
- }
- ch := make(chan ObjectStoreStatus)
- var cancel context.CancelFunc
- if o.ctx == nil {
- o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait)
- }
- l := &streamLister{js: js}
- l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*")
- l.js.opts.ctx = o.ctx
- go func() {
- if cancel != nil {
- defer cancel()
- }
- defer close(ch)
- for l.Next() {
- for _, info := range l.Page() {
- if !strings.HasPrefix(info.Config.Name, "OBJ_") {
- continue
- }
- select {
- case ch <- &ObjectBucketStatus{
- nfo: info,
- bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"),
- }:
- case <-o.ctx.Done():
- return
- }
- }
- }
- }()
-
- return ch
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/parser.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/parser.go
deleted file mode 100644
index 70204e6..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/parser.go
+++ /dev/null
@@ -1,554 +0,0 @@
-// Copyright 2012-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "fmt"
-)
-
-type msgArg struct {
- subject []byte
- reply []byte
- sid int64
- hdr int
- size int
-}
-
-const MAX_CONTROL_LINE_SIZE = 4096
-
-type parseState struct {
- state int
- as int
- drop int
- hdr int
- ma msgArg
- argBuf []byte
- msgBuf []byte
- msgCopied bool
- scratch [MAX_CONTROL_LINE_SIZE]byte
-}
-
-const (
- OP_START = iota
- OP_PLUS
- OP_PLUS_O
- OP_PLUS_OK
- OP_MINUS
- OP_MINUS_E
- OP_MINUS_ER
- OP_MINUS_ERR
- OP_MINUS_ERR_SPC
- MINUS_ERR_ARG
- OP_M
- OP_MS
- OP_MSG
- OP_MSG_SPC
- MSG_ARG
- MSG_PAYLOAD
- MSG_END
- OP_H
- OP_P
- OP_PI
- OP_PIN
- OP_PING
- OP_PO
- OP_PON
- OP_PONG
- OP_I
- OP_IN
- OP_INF
- OP_INFO
- OP_INFO_SPC
- INFO_ARG
-)
-
-// parse is the fast protocol parser engine.
-func (nc *Conn) parse(buf []byte) error {
- var i int
- var b byte
-
- // Move to loop instead of range syntax to allow jumping of i
- for i = 0; i < len(buf); i++ {
- b = buf[i]
-
- switch nc.ps.state {
- case OP_START:
- switch b {
- case 'M', 'm':
- nc.ps.state = OP_M
- nc.ps.hdr = -1
- nc.ps.ma.hdr = -1
- case 'H', 'h':
- nc.ps.state = OP_H
- nc.ps.hdr = 0
- nc.ps.ma.hdr = 0
- case 'P', 'p':
- nc.ps.state = OP_P
- case '+':
- nc.ps.state = OP_PLUS
- case '-':
- nc.ps.state = OP_MINUS
- case 'I', 'i':
- nc.ps.state = OP_I
- default:
- goto parseErr
- }
- case OP_H:
- switch b {
- case 'M', 'm':
- nc.ps.state = OP_M
- default:
- goto parseErr
- }
- case OP_M:
- switch b {
- case 'S', 's':
- nc.ps.state = OP_MS
- default:
- goto parseErr
- }
- case OP_MS:
- switch b {
- case 'G', 'g':
- nc.ps.state = OP_MSG
- default:
- goto parseErr
- }
- case OP_MSG:
- switch b {
- case ' ', '\t':
- nc.ps.state = OP_MSG_SPC
- default:
- goto parseErr
- }
- case OP_MSG_SPC:
- switch b {
- case ' ', '\t':
- continue
- default:
- nc.ps.state = MSG_ARG
- nc.ps.as = i
- }
- case MSG_ARG:
- switch b {
- case '\r':
- nc.ps.drop = 1
- case '\n':
- var arg []byte
- if nc.ps.argBuf != nil {
- arg = nc.ps.argBuf
- } else {
- arg = buf[nc.ps.as : i-nc.ps.drop]
- }
- if err := nc.processMsgArgs(arg); err != nil {
- return err
- }
- nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD
-
- // jump ahead with the index. If this overruns
- // what is left we fall out and process a split buffer.
- i = nc.ps.as + nc.ps.ma.size - 1
- default:
- if nc.ps.argBuf != nil {
- nc.ps.argBuf = append(nc.ps.argBuf, b)
- }
- }
- case MSG_PAYLOAD:
- if nc.ps.msgBuf != nil {
- if len(nc.ps.msgBuf) >= nc.ps.ma.size {
- nc.processMsg(nc.ps.msgBuf)
- nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END
- } else {
- // copy as much as we can to the buffer and skip ahead.
- toCopy := nc.ps.ma.size - len(nc.ps.msgBuf)
- avail := len(buf) - i
-
- if avail < toCopy {
- toCopy = avail
- }
-
- if toCopy > 0 {
- start := len(nc.ps.msgBuf)
- // This is needed for copy to work.
- nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy]
- copy(nc.ps.msgBuf[start:], buf[i:i+toCopy])
- // Update our index
- i = (i + toCopy) - 1
- } else {
- nc.ps.msgBuf = append(nc.ps.msgBuf, b)
- }
- }
- } else if i-nc.ps.as >= nc.ps.ma.size {
- nc.processMsg(buf[nc.ps.as:i])
- nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END
- }
- case MSG_END:
- switch b {
- case '\n':
- nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
- default:
- continue
- }
- case OP_PLUS:
- switch b {
- case 'O', 'o':
- nc.ps.state = OP_PLUS_O
- default:
- goto parseErr
- }
- case OP_PLUS_O:
- switch b {
- case 'K', 'k':
- nc.ps.state = OP_PLUS_OK
- default:
- goto parseErr
- }
- case OP_PLUS_OK:
- switch b {
- case '\n':
- nc.processOK()
- nc.ps.drop, nc.ps.state = 0, OP_START
- }
- case OP_MINUS:
- switch b {
- case 'E', 'e':
- nc.ps.state = OP_MINUS_E
- default:
- goto parseErr
- }
- case OP_MINUS_E:
- switch b {
- case 'R', 'r':
- nc.ps.state = OP_MINUS_ER
- default:
- goto parseErr
- }
- case OP_MINUS_ER:
- switch b {
- case 'R', 'r':
- nc.ps.state = OP_MINUS_ERR
- default:
- goto parseErr
- }
- case OP_MINUS_ERR:
- switch b {
- case ' ', '\t':
- nc.ps.state = OP_MINUS_ERR_SPC
- default:
- goto parseErr
- }
- case OP_MINUS_ERR_SPC:
- switch b {
- case ' ', '\t':
- continue
- default:
- nc.ps.state = MINUS_ERR_ARG
- nc.ps.as = i
- }
- case MINUS_ERR_ARG:
- switch b {
- case '\r':
- nc.ps.drop = 1
- case '\n':
- var arg []byte
- if nc.ps.argBuf != nil {
- arg = nc.ps.argBuf
- nc.ps.argBuf = nil
- } else {
- arg = buf[nc.ps.as : i-nc.ps.drop]
- }
- nc.processErr(string(arg))
- nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
- default:
- if nc.ps.argBuf != nil {
- nc.ps.argBuf = append(nc.ps.argBuf, b)
- }
- }
- case OP_P:
- switch b {
- case 'I', 'i':
- nc.ps.state = OP_PI
- case 'O', 'o':
- nc.ps.state = OP_PO
- default:
- goto parseErr
- }
- case OP_PO:
- switch b {
- case 'N', 'n':
- nc.ps.state = OP_PON
- default:
- goto parseErr
- }
- case OP_PON:
- switch b {
- case 'G', 'g':
- nc.ps.state = OP_PONG
- default:
- goto parseErr
- }
- case OP_PONG:
- switch b {
- case '\n':
- nc.processPong()
- nc.ps.drop, nc.ps.state = 0, OP_START
- }
- case OP_PI:
- switch b {
- case 'N', 'n':
- nc.ps.state = OP_PIN
- default:
- goto parseErr
- }
- case OP_PIN:
- switch b {
- case 'G', 'g':
- nc.ps.state = OP_PING
- default:
- goto parseErr
- }
- case OP_PING:
- switch b {
- case '\n':
- nc.processPing()
- nc.ps.drop, nc.ps.state = 0, OP_START
- }
- case OP_I:
- switch b {
- case 'N', 'n':
- nc.ps.state = OP_IN
- default:
- goto parseErr
- }
- case OP_IN:
- switch b {
- case 'F', 'f':
- nc.ps.state = OP_INF
- default:
- goto parseErr
- }
- case OP_INF:
- switch b {
- case 'O', 'o':
- nc.ps.state = OP_INFO
- default:
- goto parseErr
- }
- case OP_INFO:
- switch b {
- case ' ', '\t':
- nc.ps.state = OP_INFO_SPC
- default:
- goto parseErr
- }
- case OP_INFO_SPC:
- switch b {
- case ' ', '\t':
- continue
- default:
- nc.ps.state = INFO_ARG
- nc.ps.as = i
- }
- case INFO_ARG:
- switch b {
- case '\r':
- nc.ps.drop = 1
- case '\n':
- var arg []byte
- if nc.ps.argBuf != nil {
- arg = nc.ps.argBuf
- nc.ps.argBuf = nil
- } else {
- arg = buf[nc.ps.as : i-nc.ps.drop]
- }
- nc.processAsyncInfo(arg)
- nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
- default:
- if nc.ps.argBuf != nil {
- nc.ps.argBuf = append(nc.ps.argBuf, b)
- }
- }
- default:
- goto parseErr
- }
- }
- // Check for split buffer scenarios
- if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil {
- nc.ps.argBuf = nc.ps.scratch[:0]
- nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...)
- // FIXME, check max len
- }
- // Check for split msg
- if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil {
- // We need to clone the msgArg if it is still referencing the
- // read buffer and we are not able to process the msg.
- if nc.ps.argBuf == nil {
- nc.cloneMsgArg()
- }
-
- // If we will overflow the scratch buffer, just create a
- // new buffer to hold the split message.
- if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) {
- lrem := len(buf[nc.ps.as:])
-
- nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size)
- copy(nc.ps.msgBuf, buf[nc.ps.as:])
- nc.ps.msgCopied = true
- } else {
- nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)]
- nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...)
- }
- }
-
- return nil
-
-parseErr:
- return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:])
-}
-
-// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but
-// we need to hold onto it into the next read.
-func (nc *Conn) cloneMsgArg() {
- nc.ps.argBuf = nc.ps.scratch[:0]
- nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...)
- nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...)
- nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)]
- if nc.ps.ma.reply != nil {
- nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):]
- }
-}
-
-const argsLenMax = 4
-
-func (nc *Conn) processMsgArgs(arg []byte) error {
- // Use separate function for header based messages.
- if nc.ps.hdr >= 0 {
- return nc.processHeaderMsgArgs(arg)
- }
-
- // Unroll splitArgs to avoid runtime/heap issues
- a := [argsLenMax][]byte{}
- args := a[:0]
- start := -1
- for i, b := range arg {
- switch b {
- case ' ', '\t', '\r', '\n':
- if start >= 0 {
- args = append(args, arg[start:i])
- start = -1
- }
- default:
- if start < 0 {
- start = i
- }
- }
- }
- if start >= 0 {
- args = append(args, arg[start:])
- }
-
- switch len(args) {
- case 3:
- nc.ps.ma.subject = args[0]
- nc.ps.ma.sid = parseInt64(args[1])
- nc.ps.ma.reply = nil
- nc.ps.ma.size = int(parseInt64(args[2]))
- case 4:
- nc.ps.ma.subject = args[0]
- nc.ps.ma.sid = parseInt64(args[1])
- nc.ps.ma.reply = args[2]
- nc.ps.ma.size = int(parseInt64(args[3]))
- default:
- return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg)
- }
- if nc.ps.ma.sid < 0 {
- return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg)
- }
- if nc.ps.ma.size < 0 {
- return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg)
- }
- return nil
-}
-
-// processHeaderMsgArgs is for a header based message.
-func (nc *Conn) processHeaderMsgArgs(arg []byte) error {
- // Unroll splitArgs to avoid runtime/heap issues
- a := [argsLenMax][]byte{}
- args := a[:0]
- start := -1
- for i, b := range arg {
- switch b {
- case ' ', '\t', '\r', '\n':
- if start >= 0 {
- args = append(args, arg[start:i])
- start = -1
- }
- default:
- if start < 0 {
- start = i
- }
- }
- }
- if start >= 0 {
- args = append(args, arg[start:])
- }
-
- switch len(args) {
- case 4:
- nc.ps.ma.subject = args[0]
- nc.ps.ma.sid = parseInt64(args[1])
- nc.ps.ma.reply = nil
- nc.ps.ma.hdr = int(parseInt64(args[2]))
- nc.ps.ma.size = int(parseInt64(args[3]))
- case 5:
- nc.ps.ma.subject = args[0]
- nc.ps.ma.sid = parseInt64(args[1])
- nc.ps.ma.reply = args[2]
- nc.ps.ma.hdr = int(parseInt64(args[3]))
- nc.ps.ma.size = int(parseInt64(args[4]))
- default:
- return fmt.Errorf("nats: processHeaderMsgArgs Parse Error: '%s'", arg)
- }
- if nc.ps.ma.sid < 0 {
- return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Sid: '%s'", arg)
- }
- if nc.ps.ma.hdr < 0 || nc.ps.ma.hdr > nc.ps.ma.size {
- return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Header Size: '%s'", arg)
- }
- if nc.ps.ma.size < 0 {
- return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Size: '%s'", arg)
- }
- return nil
-}
-
-// ASCII numbers 0-9
-const (
- ascii_0 = 48
- ascii_9 = 57
-)
-
-// parseInt64 expects decimal positive numbers. We
-// return -1 to signal error
-func parseInt64(d []byte) (n int64) {
- if len(d) == 0 {
- return -1
- }
- for _, dec := range d {
- if dec < ascii_0 || dec > ascii_9 {
- return -1
- }
- n = n*10 + (int64(dec) - ascii_0)
- }
- return n
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/rand.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/rand.go
deleted file mode 100644
index 0cdee0a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/rand.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.20
-// +build !go1.20
-
-// A Go client for the NATS messaging system (https://nats.io).
-package nats
-
-import (
- "math/rand"
- "time"
-)
-
-func init() {
- // This is not needed since Go 1.20 because now rand.Seed always happens
- // by default (uses runtime.fastrand64 instead as source).
- rand.Seed(time.Now().UnixNano())
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/testing_internal.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/testing_internal.go
deleted file mode 100644
index 1839702..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/testing_internal.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build internal_testing
-// +build internal_testing
-
-// Functions in this file are only available when building nats.go with the
-// internal_testing build tag. They are used by the nats.go test suite.
-package nats
-
-// AddMsgFilter adds a message filter for the given subject
-// to the connection. The filter will be called for each
-// message received on the subject. If the filter returns
-// nil, the message will be dropped.
-func (nc *Conn) AddMsgFilter(subject string, filter msgFilter) {
- nc.subsMu.Lock()
- defer nc.subsMu.Unlock()
-
- if nc.filters == nil {
- nc.filters = make(map[string]msgFilter)
- }
- nc.filters[subject] = filter
-}
-
-// RemoveMsgFilter removes a message filter for the given subject.
-func (nc *Conn) RemoveMsgFilter(subject string) {
- nc.subsMu.Lock()
- defer nc.subsMu.Unlock()
-
- if nc.filters != nil {
- delete(nc.filters, subject)
- if len(nc.filters) == 0 {
- nc.filters = nil
- }
- }
-}
-
-// IsJSControlMessage returns true if the message is a JetStream control message.
-func IsJSControlMessage(msg *Msg) (bool, int) {
- return isJSControlMessage(msg)
-}
-
-// CloseTCPConn closes the underlying TCP connection.
-// It can be used to simulate a disconnect.
-func (nc *Conn) CloseTCPConn() {
- nc.mu.Lock()
- defer nc.mu.Unlock()
- nc.conn.Close()
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/timer.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/timer.go
deleted file mode 100644
index 6edeb4c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/timer.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017-2022 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "sync"
- "time"
-)
-
-// global pool of *time.Timer's. can be used by multiple goroutines concurrently.
-var globalTimerPool timerPool
-
-// timerPool provides GC-able pooling of *time.Timer's.
-// can be used by multiple goroutines concurrently.
-type timerPool struct {
- p sync.Pool
-}
-
-// Get returns a timer that completes after the given duration.
-func (tp *timerPool) Get(d time.Duration) *time.Timer {
- if t, ok := tp.p.Get().(*time.Timer); ok && t != nil {
- t.Reset(d)
- return t
- }
-
- return time.NewTimer(d)
-}
-
-// Put pools the given timer.
-//
-// There is no need to call t.Stop() before calling Put.
-//
-// Put will try to stop the timer before pooling. If the
-// given timer already expired, Put will read the unreceived
-// value if there is one.
-func (tp *timerPool) Put(t *time.Timer) {
- if !t.Stop() {
- select {
- case <-t.C:
- default:
- }
- }
-
- tp.p.Put(t)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/util/tls.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/util/tls.go
deleted file mode 100644
index af9f51f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/util/tls.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2017-2022 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.8
-// +build go1.8
-
-package util
-
-import "crypto/tls"
-
-// CloneTLSConfig returns a copy of c.
-func CloneTLSConfig(c *tls.Config) *tls.Config {
- if c == nil {
- return &tls.Config{}
- }
-
- return c.Clone()
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/util/tls_go17.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/util/tls_go17.go
deleted file mode 100644
index 44d46b4..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/util/tls_go17.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2016-2022 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.7 && !go1.8
-// +build go1.7,!go1.8
-
-package util
-
-import (
- "crypto/tls"
-)
-
-// CloneTLSConfig returns a copy of c. Only the exported fields are copied.
-// This is temporary, until this is provided by the language.
-// https://go-review.googlesource.com/#/c/28075/
-func CloneTLSConfig(c *tls.Config) *tls.Config {
- return &tls.Config{
- Rand: c.Rand,
- Time: c.Time,
- Certificates: c.Certificates,
- NameToCertificate: c.NameToCertificate,
- GetCertificate: c.GetCertificate,
- RootCAs: c.RootCAs,
- NextProtos: c.NextProtos,
- ServerName: c.ServerName,
- ClientAuth: c.ClientAuth,
- ClientCAs: c.ClientCAs,
- InsecureSkipVerify: c.InsecureSkipVerify,
- CipherSuites: c.CipherSuites,
- PreferServerCipherSuites: c.PreferServerCipherSuites,
- SessionTicketsDisabled: c.SessionTicketsDisabled,
- SessionTicketKey: c.SessionTicketKey,
- ClientSessionCache: c.ClientSessionCache,
- MinVersion: c.MinVersion,
- MaxVersion: c.MaxVersion,
- CurvePreferences: c.CurvePreferences,
- DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
- Renegotiation: c.Renegotiation,
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/ws.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/ws.go
deleted file mode 100644
index 2c2d421..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nats.go/ws.go
+++ /dev/null
@@ -1,780 +0,0 @@
-// Copyright 2021-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nats
-
-import (
- "bufio"
- "bytes"
- "crypto/rand"
- "crypto/sha1"
- "encoding/base64"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- mrand "math/rand"
- "net/http"
- "net/url"
- "strings"
- "time"
- "unicode/utf8"
-
- "github.com/klauspost/compress/flate"
-)
-
-type wsOpCode int
-
-const (
- // From https://tools.ietf.org/html/rfc6455#section-5.2
- wsTextMessage = wsOpCode(1)
- wsBinaryMessage = wsOpCode(2)
- wsCloseMessage = wsOpCode(8)
- wsPingMessage = wsOpCode(9)
- wsPongMessage = wsOpCode(10)
-
- wsFinalBit = 1 << 7
- wsRsv1Bit = 1 << 6 // Used for compression, from https://tools.ietf.org/html/rfc7692#section-6
- wsRsv2Bit = 1 << 5
- wsRsv3Bit = 1 << 4
-
- wsMaskBit = 1 << 7
-
- wsContinuationFrame = 0
- wsMaxFrameHeaderSize = 14
- wsMaxControlPayloadSize = 125
- wsCloseSatusSize = 2
-
- // From https://tools.ietf.org/html/rfc6455#section-11.7
- wsCloseStatusNormalClosure = 1000
- wsCloseStatusNoStatusReceived = 1005
- wsCloseStatusAbnormalClosure = 1006
- wsCloseStatusInvalidPayloadData = 1007
-
- wsScheme = "ws"
- wsSchemeTLS = "wss"
-
- wsPMCExtension = "permessage-deflate" // per-message compression
- wsPMCSrvNoCtx = "server_no_context_takeover"
- wsPMCCliNoCtx = "client_no_context_takeover"
- wsPMCReqHeaderValue = wsPMCExtension + "; " + wsPMCSrvNoCtx + "; " + wsPMCCliNoCtx
-)
-
-// From https://tools.ietf.org/html/rfc6455#section-1.3
-var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
-
-var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff}
-
-type websocketReader struct {
- r io.Reader
- pending [][]byte
- ib []byte
- ff bool
- fc bool
- nl bool
- dc *wsDecompressor
- nc *Conn
-}
-
-type wsDecompressor struct {
- flate io.ReadCloser
- bufs [][]byte
- off int
-}
-
-type websocketWriter struct {
- w io.Writer
- compress bool
- compressor *flate.Writer
- ctrlFrames [][]byte // pending frames that should be sent at the next Write()
- cm []byte // close message that needs to be sent when everything else has been sent
- cmDone bool // a close message has been added or sent (never going back to false)
- noMoreSend bool // if true, even if there is a Write() call, we should not send anything
-}
-
-func (d *wsDecompressor) Read(dst []byte) (int, error) {
- if len(dst) == 0 {
- return 0, nil
- }
- if len(d.bufs) == 0 {
- return 0, io.EOF
- }
- copied := 0
- rem := len(dst)
- for buf := d.bufs[0]; buf != nil && rem > 0; {
- n := len(buf[d.off:])
- if n > rem {
- n = rem
- }
- copy(dst[copied:], buf[d.off:d.off+n])
- copied += n
- rem -= n
- d.off += n
- buf = d.nextBuf()
- }
- return copied, nil
-}
-
-func (d *wsDecompressor) nextBuf() []byte {
- // We still have remaining data in the first buffer
- if d.off != len(d.bufs[0]) {
- return d.bufs[0]
- }
- // We read the full first buffer. Reset offset.
- d.off = 0
- // We were at the last buffer, so we are done.
- if len(d.bufs) == 1 {
- d.bufs = nil
- return nil
- }
- // Here we move to the next buffer.
- d.bufs = d.bufs[1:]
- return d.bufs[0]
-}
-
-func (d *wsDecompressor) ReadByte() (byte, error) {
- if len(d.bufs) == 0 {
- return 0, io.EOF
- }
- b := d.bufs[0][d.off]
- d.off++
- d.nextBuf()
- return b, nil
-}
-
-func (d *wsDecompressor) addBuf(b []byte) {
- d.bufs = append(d.bufs, b)
-}
-
-func (d *wsDecompressor) decompress() ([]byte, error) {
- d.off = 0
- // As per https://tools.ietf.org/html/rfc7692#section-7.2.2
- // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader
- // does not report unexpected EOF.
- d.bufs = append(d.bufs, compressFinalBlock)
- // Create or reset the decompressor with his object (wsDecompressor)
- // that provides Read() and ReadByte() APIs that will consume from
- // the compressed buffers (d.bufs).
- if d.flate == nil {
- d.flate = flate.NewReader(d)
- } else {
- d.flate.(flate.Resetter).Reset(d, nil)
- }
- b, err := io.ReadAll(d.flate)
- // Now reset the compressed buffers list
- d.bufs = nil
- return b, err
-}
-
-func wsNewReader(r io.Reader) *websocketReader {
- return &websocketReader{r: r, ff: true}
-}
-
-// From now on, reads will be from the readLoop and we will need to
-// acquire the connection lock should we have to send/write a control
-// message from handleControlFrame.
-//
-// Note: this runs under the connection lock.
-func (r *websocketReader) doneWithConnect() {
- r.nl = true
-}
-
-func (r *websocketReader) Read(p []byte) (int, error) {
- var err error
- var buf []byte
-
- if l := len(r.ib); l > 0 {
- buf = r.ib
- r.ib = nil
- } else {
- if len(r.pending) > 0 {
- return r.drainPending(p), nil
- }
-
- // Get some data from the underlying reader.
- n, err := r.r.Read(p)
- if err != nil {
- return 0, err
- }
- buf = p[:n]
- }
-
- // Now parse this and decode frames. We will possibly read more to
- // ensure that we get a full frame.
- var (
- tmpBuf []byte
- pos int
- max = len(buf)
- rem = 0
- )
- for pos < max {
- b0 := buf[pos]
- frameType := wsOpCode(b0 & 0xF)
- final := b0&wsFinalBit != 0
- compressed := b0&wsRsv1Bit != 0
- pos++
-
- tmpBuf, pos, err = wsGet(r.r, buf, pos, 1)
- if err != nil {
- return 0, err
- }
- b1 := tmpBuf[0]
-
- // Store size in case it is < 125
- rem = int(b1 & 0x7F)
-
- switch frameType {
- case wsPingMessage, wsPongMessage, wsCloseMessage:
- if rem > wsMaxControlPayloadSize {
- return 0, fmt.Errorf(
- fmt.Sprintf("control frame length bigger than maximum allowed of %v bytes",
- wsMaxControlPayloadSize))
- }
- if compressed {
- return 0, errors.New("control frame should not be compressed")
- }
- if !final {
- return 0, errors.New("control frame does not have final bit set")
- }
- case wsTextMessage, wsBinaryMessage:
- if !r.ff {
- return 0, errors.New("new message started before final frame for previous message was received")
- }
- r.ff = final
- r.fc = compressed
- case wsContinuationFrame:
- // Compressed bit must be only set in the first frame
- if r.ff || compressed {
- return 0, errors.New("invalid continuation frame")
- }
- r.ff = final
- default:
- return 0, fmt.Errorf("unknown opcode %v", frameType)
- }
-
- // If the encoded size is <= 125, then `rem` is simply the remainder size of the
- // frame. If it is 126, then the actual size is encoded as a uint16. For larger
- // frames, `rem` will initially be 127 and the actual size is encoded as a uint64.
- switch rem {
- case 126:
- tmpBuf, pos, err = wsGet(r.r, buf, pos, 2)
- if err != nil {
- return 0, err
- }
- rem = int(binary.BigEndian.Uint16(tmpBuf))
- case 127:
- tmpBuf, pos, err = wsGet(r.r, buf, pos, 8)
- if err != nil {
- return 0, err
- }
- rem = int(binary.BigEndian.Uint64(tmpBuf))
- }
-
- // Handle control messages in place...
- if wsIsControlFrame(frameType) {
- pos, err = r.handleControlFrame(frameType, buf, pos, rem)
- if err != nil {
- return 0, err
- }
- rem = 0
- continue
- }
-
- var b []byte
- // This ensures that we get the full payload for this frame.
- b, pos, err = wsGet(r.r, buf, pos, rem)
- if err != nil {
- return 0, err
- }
- // We read the full frame.
- rem = 0
- addToPending := true
- if r.fc {
- // Don't add to pending if we are not dealing with the final frame.
- addToPending = r.ff
- // Add the compressed payload buffer to the list.
- r.addCBuf(b)
- // Decompress only when this is the final frame.
- if r.ff {
- b, err = r.dc.decompress()
- if err != nil {
- return 0, err
- }
- r.fc = false
- }
- }
- // Add to the pending list if dealing with uncompressed frames or
- // after we have received the full compressed message and decompressed it.
- if addToPending {
- r.pending = append(r.pending, b)
- }
- }
- // In case of compression, there may be nothing to drain
- if len(r.pending) > 0 {
- return r.drainPending(p), nil
- }
- return 0, nil
-}
-
-func (r *websocketReader) addCBuf(b []byte) {
- if r.dc == nil {
- r.dc = &wsDecompressor{}
- }
- // Add a copy of the incoming buffer to the list of compressed buffers.
- r.dc.addBuf(append([]byte(nil), b...))
-}
-
-func (r *websocketReader) drainPending(p []byte) int {
- var n int
- var max = len(p)
-
- for i, buf := range r.pending {
- if n+len(buf) <= max {
- copy(p[n:], buf)
- n += len(buf)
- } else {
- // Is there room left?
- if n < max {
- // Write the partial and update this slice.
- rem := max - n
- copy(p[n:], buf[:rem])
- n += rem
- r.pending[i] = buf[rem:]
- }
- // These are the remaining slices that will need to be used at
- // the next Read() call.
- r.pending = r.pending[i:]
- return n
- }
- }
- r.pending = r.pending[:0]
- return n
-}
-
-func wsGet(r io.Reader, buf []byte, pos, needed int) ([]byte, int, error) {
- avail := len(buf) - pos
- if avail >= needed {
- return buf[pos : pos+needed], pos + needed, nil
- }
- b := make([]byte, needed)
- start := copy(b, buf[pos:])
- for start != needed {
- n, err := r.Read(b[start:cap(b)])
- start += n
- if err != nil {
- return b, start, err
- }
- }
- return b, pos + avail, nil
-}
-
-func (r *websocketReader) handleControlFrame(frameType wsOpCode, buf []byte, pos, rem int) (int, error) {
- var payload []byte
- var err error
-
- if rem > 0 {
- payload, pos, err = wsGet(r.r, buf, pos, rem)
- if err != nil {
- return pos, err
- }
- }
- switch frameType {
- case wsCloseMessage:
- status := wsCloseStatusNoStatusReceived
- var body string
- lp := len(payload)
- // If there is a payload, the status is represented as a 2-byte
- // unsigned integer (in network byte order). Then, there may be an
- // optional body.
- hasStatus, hasBody := lp >= wsCloseSatusSize, lp > wsCloseSatusSize
- if hasStatus {
- // Decode the status
- status = int(binary.BigEndian.Uint16(payload[:wsCloseSatusSize]))
- // Now if there is a body, capture it and make sure this is a valid UTF-8.
- if hasBody {
- body = string(payload[wsCloseSatusSize:])
- if !utf8.ValidString(body) {
- // https://tools.ietf.org/html/rfc6455#section-5.5.1
- // If body is present, it must be a valid utf8
- status = wsCloseStatusInvalidPayloadData
- body = "invalid utf8 body in close frame"
- }
- }
- }
- r.nc.wsEnqueueCloseMsg(r.nl, status, body)
- // Return io.EOF so that readLoop will close the connection as client closed
- // after processing pending buffers.
- return pos, io.EOF
- case wsPingMessage:
- r.nc.wsEnqueueControlMsg(r.nl, wsPongMessage, payload)
- case wsPongMessage:
- // Nothing to do..
- }
- return pos, nil
-}
-
-func (w *websocketWriter) Write(p []byte) (int, error) {
- if w.noMoreSend {
- return 0, nil
- }
- var total int
- var n int
- var err error
- // If there are control frames, they can be sent now. Actually spec says
- // that they should be sent ASAP, so we will send before any application data.
- if len(w.ctrlFrames) > 0 {
- n, err = w.writeCtrlFrames()
- if err != nil {
- return n, err
- }
- total += n
- }
- // Do the following only if there is something to send.
- // We will end with checking for need to send close message.
- if len(p) > 0 {
- if w.compress {
- buf := &bytes.Buffer{}
- if w.compressor == nil {
- w.compressor, _ = flate.NewWriter(buf, flate.BestSpeed)
- } else {
- w.compressor.Reset(buf)
- }
- if n, err = w.compressor.Write(p); err != nil {
- return n, err
- }
- if err = w.compressor.Flush(); err != nil {
- return n, err
- }
- b := buf.Bytes()
- p = b[:len(b)-4]
- }
- fh, key := wsCreateFrameHeader(w.compress, wsBinaryMessage, len(p))
- wsMaskBuf(key, p)
- n, err = w.w.Write(fh)
- total += n
- if err == nil {
- n, err = w.w.Write(p)
- total += n
- }
- }
- if err == nil && w.cm != nil {
- n, err = w.writeCloseMsg()
- total += n
- }
- return total, err
-}
-
-func (w *websocketWriter) writeCtrlFrames() (int, error) {
- var (
- n int
- total int
- i int
- err error
- )
- for ; i < len(w.ctrlFrames); i++ {
- buf := w.ctrlFrames[i]
- n, err = w.w.Write(buf)
- total += n
- if err != nil {
- break
- }
- }
- if i != len(w.ctrlFrames) {
- w.ctrlFrames = w.ctrlFrames[i+1:]
- } else {
- w.ctrlFrames = w.ctrlFrames[:0]
- }
- return total, err
-}
-
-func (w *websocketWriter) writeCloseMsg() (int, error) {
- n, err := w.w.Write(w.cm)
- w.cm, w.noMoreSend = nil, true
- return n, err
-}
-
-func wsMaskBuf(key, buf []byte) {
- for i := 0; i < len(buf); i++ {
- buf[i] ^= key[i&3]
- }
-}
-
-// Create the frame header.
-// Encodes the frame type and optional compression flag, and the size of the payload.
-func wsCreateFrameHeader(compressed bool, frameType wsOpCode, l int) ([]byte, []byte) {
- fh := make([]byte, wsMaxFrameHeaderSize)
- n, key := wsFillFrameHeader(fh, compressed, frameType, l)
- return fh[:n], key
-}
-
-func wsFillFrameHeader(fh []byte, compressed bool, frameType wsOpCode, l int) (int, []byte) {
- var n int
- b := byte(frameType)
- b |= wsFinalBit
- if compressed {
- b |= wsRsv1Bit
- }
- b1 := byte(wsMaskBit)
- switch {
- case l <= 125:
- n = 2
- fh[0] = b
- fh[1] = b1 | byte(l)
- case l < 65536:
- n = 4
- fh[0] = b
- fh[1] = b1 | 126
- binary.BigEndian.PutUint16(fh[2:], uint16(l))
- default:
- n = 10
- fh[0] = b
- fh[1] = b1 | 127
- binary.BigEndian.PutUint64(fh[2:], uint64(l))
- }
- var key []byte
- var keyBuf [4]byte
- if _, err := io.ReadFull(rand.Reader, keyBuf[:4]); err != nil {
- kv := mrand.Int31()
- binary.LittleEndian.PutUint32(keyBuf[:4], uint32(kv))
- }
- copy(fh[n:], keyBuf[:4])
- key = fh[n : n+4]
- n += 4
- return n, key
-}
-
-func (nc *Conn) wsInitHandshake(u *url.URL) error {
- compress := nc.Opts.Compression
- tlsRequired := u.Scheme == wsSchemeTLS || nc.Opts.Secure || nc.Opts.TLSConfig != nil || nc.Opts.TLSCertCB != nil || nc.Opts.RootCAsCB != nil
- // Do TLS here as needed.
- if tlsRequired {
- if err := nc.makeTLSConn(); err != nil {
- return err
- }
- } else {
- nc.bindToNewConn()
- }
-
- var err error
-
- // For http request, we need the passed URL to contain either http or https scheme.
- scheme := "http"
- if tlsRequired {
- scheme = "https"
- }
- ustr := fmt.Sprintf("%s://%s", scheme, u.Host)
-
- if nc.Opts.ProxyPath != "" {
- proxyPath := nc.Opts.ProxyPath
- if !strings.HasPrefix(proxyPath, "/") {
- proxyPath = "/" + proxyPath
- }
- ustr += proxyPath
- }
-
- u, err = url.Parse(ustr)
- if err != nil {
- return err
- }
- req := &http.Request{
- Method: "GET",
- URL: u,
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- Header: make(http.Header),
- Host: u.Host,
- }
- wsKey, err := wsMakeChallengeKey()
- if err != nil {
- return err
- }
-
- req.Header["Upgrade"] = []string{"websocket"}
- req.Header["Connection"] = []string{"Upgrade"}
- req.Header["Sec-WebSocket-Key"] = []string{wsKey}
- req.Header["Sec-WebSocket-Version"] = []string{"13"}
- if compress {
- req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue)
- }
- if err := req.Write(nc.conn); err != nil {
- return err
- }
-
- var resp *http.Response
-
- br := bufio.NewReaderSize(nc.conn, 4096)
- nc.conn.SetReadDeadline(time.Now().Add(nc.Opts.Timeout))
- resp, err = http.ReadResponse(br, req)
- if err == nil &&
- (resp.StatusCode != 101 ||
- !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
- !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
- resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) {
-
- err = fmt.Errorf("invalid websocket connection")
- }
- // Check compression extension...
- if err == nil && compress {
- // Check that not only permessage-deflate extension is present, but that
- // we also have server and client no context take over.
- srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header)
-
- // If server does not support compression, then simply disable it in our side.
- if !srvCompress {
- compress = false
- } else if !noCtxTakeover {
- err = fmt.Errorf("compression negotiation error")
- }
- }
- if resp != nil {
- resp.Body.Close()
- }
- nc.conn.SetReadDeadline(time.Time{})
- if err != nil {
- return err
- }
-
- wsr := wsNewReader(nc.br.r)
- wsr.nc = nc
- // We have to slurp whatever is in the bufio reader and copy to br.r
- if n := br.Buffered(); n != 0 {
- wsr.ib, _ = br.Peek(n)
- }
- nc.br.r = wsr
- nc.bw.w = &websocketWriter{w: nc.bw.w, compress: compress}
- nc.ws = true
- return nil
-}
-
-func (nc *Conn) wsClose() {
- nc.mu.Lock()
- defer nc.mu.Unlock()
- if !nc.ws {
- return
- }
- nc.wsEnqueueCloseMsgLocked(wsCloseStatusNormalClosure, _EMPTY_)
-}
-
-func (nc *Conn) wsEnqueueCloseMsg(needsLock bool, status int, payload string) {
- // In some low-level unit tests it will happen...
- if nc == nil {
- return
- }
- if needsLock {
- nc.mu.Lock()
- defer nc.mu.Unlock()
- }
- nc.wsEnqueueCloseMsgLocked(status, payload)
-}
-
-func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) {
- wr, ok := nc.bw.w.(*websocketWriter)
- if !ok || wr.cmDone {
- return
- }
- statusAndPayloadLen := 2 + len(payload)
- frame := make([]byte, 2+4+statusAndPayloadLen)
- n, key := wsFillFrameHeader(frame, false, wsCloseMessage, statusAndPayloadLen)
- // Set the status
- binary.BigEndian.PutUint16(frame[n:], uint16(status))
- // If there is a payload, copy
- if len(payload) > 0 {
- copy(frame[n+2:], payload)
- }
- // Mask status + payload
- wsMaskBuf(key, frame[n:n+statusAndPayloadLen])
- wr.cm = frame
- wr.cmDone = true
- nc.bw.flush()
- if c := wr.compressor; c != nil {
- c.Close()
- }
-}
-
-func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload []byte) {
- // In some low-level unit tests it will happen...
- if nc == nil {
- return
- }
- if needsLock {
- nc.mu.Lock()
- defer nc.mu.Unlock()
- }
- wr, ok := nc.bw.w.(*websocketWriter)
- if !ok {
- return
- }
- fh, key := wsCreateFrameHeader(false, frameType, len(payload))
- wr.ctrlFrames = append(wr.ctrlFrames, fh)
- if len(payload) > 0 {
- wsMaskBuf(key, payload)
- wr.ctrlFrames = append(wr.ctrlFrames, payload)
- }
- nc.bw.flush()
-}
-
-func wsPMCExtensionSupport(header http.Header) (bool, bool) {
- for _, extensionList := range header["Sec-Websocket-Extensions"] {
- extensions := strings.Split(extensionList, ",")
- for _, extension := range extensions {
- extension = strings.Trim(extension, " \t")
- params := strings.Split(extension, ";")
- for i, p := range params {
- p = strings.Trim(p, " \t")
- if strings.EqualFold(p, wsPMCExtension) {
- var snc bool
- var cnc bool
- for j := i + 1; j < len(params); j++ {
- p = params[j]
- p = strings.Trim(p, " \t")
- if strings.EqualFold(p, wsPMCSrvNoCtx) {
- snc = true
- } else if strings.EqualFold(p, wsPMCCliNoCtx) {
- cnc = true
- }
- if snc && cnc {
- return true, true
- }
- }
- return true, false
- }
- }
- }
- }
- return false, false
-}
-
-func wsMakeChallengeKey() (string, error) {
- p := make([]byte, 16)
- if _, err := io.ReadFull(rand.Reader, p); err != nil {
- return "", err
- }
- return base64.StdEncoding.EncodeToString(p), nil
-}
-
-func wsAcceptKey(key string) string {
- h := sha1.New()
- h.Write([]byte(key))
- h.Write(wsGUID)
- return base64.StdEncoding.EncodeToString(h.Sum(nil))
-}
-
-// Returns true if the op code corresponds to a control frame.
-func wsIsControlFrame(frameType wsOpCode) bool {
- return frameType >= wsCloseMessage
-}
-
-func isWebsocketScheme(u *url.URL) bool {
- return u.Scheme == wsScheme || u.Scheme == wsSchemeTLS
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/.gitignore b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/.gitignore
deleted file mode 100644
index d23676d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/.gitignore
+++ /dev/null
@@ -1,16 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-build/
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
-.idea/
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/.goreleaser.yml b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/.goreleaser.yml
deleted file mode 100644
index e5c4f15..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/.goreleaser.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-project_name: nkeys
-release:
- github:
- owner: nats-io
- name: nkeys
- name_template: '{{.Tag}}'
- draft: true
-builds:
- - id: nk
- main: ./nk/main.go
- ldflags: "-X main.Version={{.Tag}}_{{.Commit}}"
- binary: nk
- goos:
- - darwin
- - linux
- - windows
- - freebsd
- goarch:
- - amd64
- - arm
- - arm64
- - 386
- - mips64le
- - s390x
- goarm:
- - 6
- - 7
- ignore:
- - goos: darwin
- goarch: 386
- - goos: freebsd
- goarch: arm
- - goos: freebsd
- goarch: arm64
- - goos: freebsd
- goarch: 386
-
-dist: build
-
-archives:
- - name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm
- }}v{{ .Arm }}{{ end }}'
- wrap_in_directory: true
- format: zip
- files:
- - README.md
- - LICENSE
-
-checksum:
- name_template: '{{ .ProjectName }}-v{{ .Version }}-checksums.txt'
-
-snapshot:
- name_template: 'dev'
-
-nfpms:
- - file_name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Arch }}{{ if .Arm
- }}v{{ .Arm }}{{ end }}'
- maintainer: nats.io
- description: NKeys utility cli program
- vendor: nats-io
- bindir: /usr/local/bin
- formats:
- - deb
\ No newline at end of file
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/GOVERNANCE.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/GOVERNANCE.md
deleted file mode 100644
index 744d3bc..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/GOVERNANCE.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# NATS NKEYS Governance
-
-NATS NKEYS is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md).
\ No newline at end of file
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/LICENSE b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/MAINTAINERS.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/MAINTAINERS.md
deleted file mode 100644
index 2321465..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/MAINTAINERS.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Maintainers
-
-Maintainership is on a per project basis.
-
-### Maintainers
- - Derek Collison [@derekcollison](https://github.com/derekcollison)
- - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic)
- - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs)
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/README.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/README.md
deleted file mode 100644
index 17e3a8e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/README.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# NKEYS
-
-[](https://www.apache.org/licenses/LICENSE-2.0)
-[](https://goreportcard.com/report/github.com/nats-io/nkeys)
-[](https://github.com/nats-io/nkeys/actions/workflows/release.yaml/badge.svg)
-[](https://godoc.org/github.com/nats-io/nkeys)
-[](https://coveralls.io/github/nats-io/nkeys?branch=main)
-
-A public-key signature system based on [Ed25519](https://ed25519.cr.yp.to/) for the NATS ecosystem.
-
-## About
-
-The NATS ecosystem will be moving to [Ed25519](https://ed25519.cr.yp.to/) keys for identity, authentication and authorization for entities such as Accounts, Users, Servers and Clusters.
-
-Ed25519 is fast and resistant to side channel attacks. Generation of a seed key is all that is needed to be stored and kept safe, as the seed can generate both the public and private keys.
-
-The NATS system will utilize Ed25519 keys, meaning that NATS systems will never store or even have access to any private keys. Authentication will utilize a random challenge response mechanism.
-
-Dealing with 32 byte and 64 byte raw keys can be challenging. NKEYS is designed to formulate keys in a much friendlier fashion and references work done in cryptocurrencies, specifically [Stellar](https://www.stellar.org/). Bitcoin and others used a form of Base58 (or Base58Check) to encode raw keys. Stellar utilized a more traditional Base32 with a CRC16 and a version or prefix byte. NKEYS utilizes a similar format where the prefix will be 1 byte for public and private keys and will be 2 bytes for seeds. The base32 encoding of these prefixes will yield friendly human readable prefixes, e.g. '**N**' = server, '**C**' = cluster, '**O**' = operator, '**A**' = account, and '**U**' = user. '**P**' is used for private keys. For seeds, the first encoded prefix is '**S**', and the second character will be the type for the public key, e.g. "**SU**" is a seed for a user key pair, "**SA**" is a seed for an account key pair.
-
-## Installation
-
-Use the `go` command:
-
- $ go get github.com/nats-io/nkeys
-
-## nk - Command Line Utility
-
-Located under the nk [directory](https://github.com/nats-io/nkeys/tree/master/nk).
-
-## Basic API Usage
-```go
-
-// Create a new User KeyPair
-user, _ := nkeys.CreateUser()
-
-// Sign some data with a full key pair user.
-data := []byte("Hello World")
-sig, _ := user.Sign(data)
-
-// Verify the signature.
-err = user.Verify(data, sig)
-
-// Access the seed, the only thing that needs to be stored and kept safe.
-// seed = "SUAKYRHVIOREXV7EUZTBHUHL7NUMHPMAS7QMDU3GTIUWEI5LDNOXD43IZY"
-seed, _ := user.Seed()
-
-// Access the public key which can be shared.
-// publicKey = "UD466L6EBCM3YY5HEGHJANNTN4LSKTSUXTH7RILHCKEQMQHTBNLHJJXT"
-publicKey, _ := user.PublicKey()
-
-// Create a full User who can sign and verify from a private seed.
-user, _ = nkeys.FromSeed(seed)
-
-// Create a User who can only verify signatures via a public key.
-user, _ = nkeys.FromPublicKey(publicKey)
-
-// Create a User KeyPair with our own random data.
-var rawSeed [32]byte
-_, err := io.ReadFull(rand.Reader, rawSeed[:]) // Or some other random source.
-user2, _ := nkeys.FromRawSeed(PrefixByteUser, rawSeed)
-
-```
-
-## License
-
-Unless otherwise noted, the NATS source files are distributed
-under the Apache Version 2.0 license found in the LICENSE file.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/TODO.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/TODO.md
deleted file mode 100644
index 2649c9e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/TODO.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# General
-
-- [ ] Child key derivation
-- [ ] Hardware support, e.g. YubiHSM
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/crc16.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/crc16.go
deleted file mode 100644
index fbe38fb..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/crc16.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nkeys
-
-// An implementation of crc16 according to CCITT standards for XMODEM.
-
-var crc16tab = [256]uint16{
- 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
- 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
- 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
- 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
- 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
- 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
- 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
- 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
- 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
- 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
- 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
- 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
- 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
- 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
- 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
- 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
- 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
- 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
- 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
- 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
- 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
- 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
- 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
- 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
- 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
- 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
- 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
- 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
- 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
- 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
- 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
- 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
-}
-
-// crc16 returns the 2-byte crc for the data provided.
-func crc16(data []byte) uint16 {
- var crc uint16
- for _, b := range data {
- crc = ((crc << 8) & 0xffff) ^ crc16tab[((crc>>8)^uint16(b))&0x00FF]
- }
- return crc
-}
-
-// validate will check the calculated crc16 checksum for data against the expected.
-func validate(data []byte, expected uint16) error {
- if crc16(data) != expected {
- return ErrInvalidChecksum
- }
- return nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/creds_utils.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/creds_utils.go
deleted file mode 100644
index ecd9463..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/creds_utils.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package nkeys
-
-import (
- "bytes"
- "regexp"
- "strings"
-)
-
-var userConfigRE = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}\r?\n))`)
-
-// ParseDecoratedJWT takes a creds file and returns the JWT portion.
-func ParseDecoratedJWT(contents []byte) (string, error) {
- items := userConfigRE.FindAllSubmatch(contents, -1)
- if len(items) == 0 {
- return string(contents), nil
- }
- // First result should be the user JWT.
- // We copy here so that if the file contained a seed file too we wipe appropriately.
- raw := items[0][1]
- tmp := make([]byte, len(raw))
- copy(tmp, raw)
- return strings.TrimSpace(string(tmp)), nil
-}
-
-// ParseDecoratedNKey takes a creds file, finds the NKey portion and creates a
-// key pair from it.
-func ParseDecoratedNKey(contents []byte) (KeyPair, error) {
- var seed []byte
-
- items := userConfigRE.FindAllSubmatch(contents, -1)
- if len(items) > 1 {
- seed = items[1][1]
- } else {
- lines := bytes.Split(contents, []byte("\n"))
- for _, line := range lines {
- if bytes.HasPrefix(bytes.TrimSpace(line), []byte("SO")) ||
- bytes.HasPrefix(bytes.TrimSpace(line), []byte("SA")) ||
- bytes.HasPrefix(bytes.TrimSpace(line), []byte("SU")) {
- seed = line
- break
- }
- }
- }
- if seed == nil {
- return nil, ErrNoSeedFound
- }
- if !bytes.HasPrefix(seed, []byte("SO")) &&
- !bytes.HasPrefix(seed, []byte("SA")) &&
- !bytes.HasPrefix(seed, []byte("SU")) {
- return nil, ErrInvalidNkeySeed
- }
- kp, err := FromSeed(seed)
- if err != nil {
- return nil, err
- }
- return kp, nil
-}
-
-// ParseDecoratedUserNKey takes a creds file, finds the NKey portion and creates a
-// key pair from it. Similar to ParseDecoratedNKey but fails for non-user keys.
-func ParseDecoratedUserNKey(contents []byte) (KeyPair, error) {
- nk, err := ParseDecoratedNKey(contents)
- if err != nil {
- return nil, err
- }
- seed, err := nk.Seed()
- if err != nil {
- return nil, err
- }
- if !bytes.HasPrefix(seed, []byte("SU")) {
- return nil, ErrInvalidUserSeed
- }
- kp, err := FromSeed(seed)
- if err != nil {
- return nil, err
- }
- return kp, nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/dependencies.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/dependencies.md
deleted file mode 100644
index 370184a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/dependencies.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# External Dependencies
-
-This file lists the dependencies used in this repository.
-
-| Dependency | License |
-|-|-|
-| Go | BSD 3-Clause "New" or "Revised" License |
-| golang.org/x/crypto v0.3.0 | BSD 3-Clause "New" or "Revised" License |
-| golang.org/x/net v0.2.0 | BSD 3-Clause "New" or "Revised" License |
-| golang.org/x/sys v0.2.0 | BSD 3-Clause "New" or "Revised" License |
-| golang.org/x/term v0.2.0 | BSD 3-Clause "New" or "Revised" License |
-| golang.org/x/text v0.4.0 | BSD 3-Clause "New" or "Revised" License |
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/errors.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/errors.go
deleted file mode 100644
index a30bb96..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/errors.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2022 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nkeys
-
-// Errors
-const (
- ErrInvalidPrefixByte = nkeysError("nkeys: invalid prefix byte")
- ErrInvalidKey = nkeysError("nkeys: invalid key")
- ErrInvalidPublicKey = nkeysError("nkeys: invalid public key")
- ErrInvalidPrivateKey = nkeysError("nkeys: invalid private key")
- ErrInvalidSeedLen = nkeysError("nkeys: invalid seed length")
- ErrInvalidSeed = nkeysError("nkeys: invalid seed")
- ErrInvalidEncoding = nkeysError("nkeys: invalid encoded key")
- ErrInvalidSignature = nkeysError("nkeys: signature verification failed")
- ErrCannotSign = nkeysError("nkeys: can not sign, no private key available")
- ErrPublicKeyOnly = nkeysError("nkeys: no seed or private key available")
- ErrIncompatibleKey = nkeysError("nkeys: incompatible key")
- ErrInvalidChecksum = nkeysError("nkeys: invalid checksum")
- ErrNoSeedFound = nkeysError("nkeys: no nkey seed found")
- ErrInvalidNkeySeed = nkeysError("nkeys: doesn't contain a seed nkey")
- ErrInvalidUserSeed = nkeysError("nkeys: doesn't contain an user seed nkey")
- ErrInvalidRecipient = nkeysError("nkeys: not a valid recipient public curve key")
- ErrInvalidSender = nkeysError("nkeys: not a valid sender public curve key")
- ErrInvalidCurveKey = nkeysError("nkeys: not a valid curve key")
- ErrInvalidCurveSeed = nkeysError("nkeys: not a valid curve seed")
- ErrInvalidEncrypted = nkeysError("nkeys: encrypted input is not valid")
- ErrInvalidEncVersion = nkeysError("nkeys: encrypted input wrong version")
- ErrCouldNotDecrypt = nkeysError("nkeys: could not decrypt input")
- ErrInvalidCurveKeyOperation = nkeysError("nkeys: curve key is not valid for sign/verify")
- ErrInvalidNKeyOperation = nkeysError("nkeys: only curve key can seal/open")
- ErrCannotOpen = nkeysError("nkeys: cannot open no private curve key available")
- ErrCannotSeal = nkeysError("nkeys: cannot seal no private curve key available")
-)
-
-type nkeysError string
-
-func (e nkeysError) Error() string {
- return string(e)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/keypair.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/keypair.go
deleted file mode 100644
index 9d05518..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/keypair.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2018-2022 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nkeys
-
-import (
- "bytes"
- "crypto/rand"
- "io"
-
- "golang.org/x/crypto/ed25519"
-)
-
-// kp is the internal struct for a kepypair using seed.
-type kp struct {
- seed []byte
-}
-
-// All seeds are 32 bytes long.
-const seedLen = 32
-
-// CreatePair will create a KeyPair based on the rand entropy and a type/prefix byte.
-func CreatePair(prefix PrefixByte) (KeyPair, error) {
- return CreatePairWithRand(prefix, rand.Reader)
-}
-
-// CreatePair will create a KeyPair based on the rand reader and a type/prefix byte. rand can be nil.
-func CreatePairWithRand(prefix PrefixByte, rr io.Reader) (KeyPair, error) {
- if prefix == PrefixByteCurve {
- return CreateCurveKeysWithRand(rr)
- }
- if rr == nil {
- rr = rand.Reader
- }
- var rawSeed [seedLen]byte
-
- _, err := io.ReadFull(rr, rawSeed[:])
- if err != nil {
- return nil, err
- }
-
- seed, err := EncodeSeed(prefix, rawSeed[:])
- if err != nil {
- return nil, err
- }
- return &kp{seed}, nil
-}
-
-// rawSeed will return the raw, decoded 64 byte seed.
-func (pair *kp) rawSeed() ([]byte, error) {
- _, raw, err := DecodeSeed(pair.seed)
- return raw, err
-}
-
-// keys will return a 32 byte public key and a 64 byte private key utilizing the seed.
-func (pair *kp) keys() (ed25519.PublicKey, ed25519.PrivateKey, error) {
- raw, err := pair.rawSeed()
- if err != nil {
- return nil, nil, err
- }
- return ed25519.GenerateKey(bytes.NewReader(raw))
-}
-
-// Wipe will randomize the contents of the seed key
-func (pair *kp) Wipe() {
- io.ReadFull(rand.Reader, pair.seed)
- pair.seed = nil
-}
-
-// Seed will return the encoded seed.
-func (pair *kp) Seed() ([]byte, error) {
- return pair.seed, nil
-}
-
-// PublicKey will return the encoded public key associated with the KeyPair.
-// All KeyPairs have a public key.
-func (pair *kp) PublicKey() (string, error) {
- public, raw, err := DecodeSeed(pair.seed)
- if err != nil {
- return "", err
- }
- pub, _, err := ed25519.GenerateKey(bytes.NewReader(raw))
- if err != nil {
- return "", err
- }
- pk, err := Encode(public, pub)
- if err != nil {
- return "", err
- }
- return string(pk), nil
-}
-
-// PrivateKey will return the encoded private key for KeyPair.
-func (pair *kp) PrivateKey() ([]byte, error) {
- _, priv, err := pair.keys()
- if err != nil {
- return nil, err
- }
- return Encode(PrefixBytePrivate, priv)
-}
-
-// Sign will sign the input with KeyPair's private key.
-func (pair *kp) Sign(input []byte) ([]byte, error) {
- _, priv, err := pair.keys()
- if err != nil {
- return nil, err
- }
- return ed25519.Sign(priv, input), nil
-}
-
-// Verify will verify the input against a signature utilizing the public key.
-func (pair *kp) Verify(input []byte, sig []byte) error {
- pub, _, err := pair.keys()
- if err != nil {
- return err
- }
- if !ed25519.Verify(pub, input, sig) {
- return ErrInvalidSignature
- }
- return nil
-}
-
-// Seal is only supported on CurveKeyPair
-func (pair *kp) Seal(input []byte, recipient string) ([]byte, error) {
- return nil, ErrInvalidNKeyOperation
-}
-
-// SealWithRand is only supported on CurveKeyPair
-func (pair *kp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) {
- return nil, ErrInvalidNKeyOperation
-}
-
-// Open is only supported on CurveKey
-func (pair *kp) Open(input []byte, sender string) ([]byte, error) {
- return nil, ErrInvalidNKeyOperation
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/nkeys.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/nkeys.go
deleted file mode 100644
index 6f1ba20..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/nkeys.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2018-2019 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package nkeys is an Ed25519 based public-key signature system that simplifies keys and seeds
-// and performs signing and verification.
-// It also supports encryption via x25519 keys and is compatible with https://pkg.go.dev/golang.org/x/crypto/nacl/box.
-package nkeys
-
-import "io"
-
-// Version is our current version
-const Version = "0.4.7"
-
-// KeyPair provides the central interface to nkeys.
-type KeyPair interface {
- Seed() ([]byte, error)
- PublicKey() (string, error)
- PrivateKey() ([]byte, error)
- // Sign is only supported on Non CurveKeyPairs
- Sign(input []byte) ([]byte, error)
- // Verify is only supported on Non CurveKeyPairs
- Verify(input []byte, sig []byte) error
- Wipe()
- // Seal is only supported on CurveKeyPair
- Seal(input []byte, recipient string) ([]byte, error)
- // SealWithRand is only supported on CurveKeyPair
- SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error)
- // Open is only supported on CurveKey
- Open(input []byte, sender string) ([]byte, error)
-}
-
-// CreateUser will create a User typed KeyPair.
-func CreateUser() (KeyPair, error) {
- return CreatePair(PrefixByteUser)
-}
-
-// CreateAccount will create an Account typed KeyPair.
-func CreateAccount() (KeyPair, error) {
- return CreatePair(PrefixByteAccount)
-}
-
-// CreateServer will create a Server typed KeyPair.
-func CreateServer() (KeyPair, error) {
- return CreatePair(PrefixByteServer)
-}
-
-// CreateCluster will create a Cluster typed KeyPair.
-func CreateCluster() (KeyPair, error) {
- return CreatePair(PrefixByteCluster)
-}
-
-// CreateOperator will create an Operator typed KeyPair.
-func CreateOperator() (KeyPair, error) {
- return CreatePair(PrefixByteOperator)
-}
-
-// FromPublicKey will create a KeyPair capable of verifying signatures.
-func FromPublicKey(public string) (KeyPair, error) {
- raw, err := decode([]byte(public))
- if err != nil {
- return nil, err
- }
- pre := PrefixByte(raw[0])
- if err := checkValidPublicPrefixByte(pre); err != nil {
- return nil, ErrInvalidPublicKey
- }
- return &pub{pre, raw[1:]}, nil
-}
-
-// FromSeed will create a KeyPair capable of signing and verifying signatures.
-func FromSeed(seed []byte) (KeyPair, error) {
- prefix, _, err := DecodeSeed(seed)
- if err != nil {
- return nil, err
- }
- if prefix == PrefixByteCurve {
- return FromCurveSeed(seed)
- }
- copy := append([]byte{}, seed...)
- return &kp{copy}, nil
-}
-
-// FromRawSeed will create a KeyPair from the raw 32 byte seed for a given type.
-func FromRawSeed(prefix PrefixByte, rawSeed []byte) (KeyPair, error) {
- seed, err := EncodeSeed(prefix, rawSeed)
- if err != nil {
- return nil, err
- }
- return &kp{seed}, nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/public.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/public.go
deleted file mode 100644
index c3cd21e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/public.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nkeys
-
-import (
- "crypto/rand"
- "io"
-
- "golang.org/x/crypto/ed25519"
-)
-
-// A KeyPair from a public key capable of verifying only.
-type pub struct {
- pre PrefixByte
- pub ed25519.PublicKey
-}
-
-// PublicKey will return the encoded public key associated with the KeyPair.
-// All KeyPairs have a public key.
-func (p *pub) PublicKey() (string, error) {
- pk, err := Encode(p.pre, p.pub)
- if err != nil {
- return "", err
- }
- return string(pk), nil
-}
-
-// Seed will return an error since this is not available for public key only KeyPairs.
-func (p *pub) Seed() ([]byte, error) {
- return nil, ErrPublicKeyOnly
-}
-
-// PrivateKey will return an error since this is not available for public key only KeyPairs.
-func (p *pub) PrivateKey() ([]byte, error) {
- return nil, ErrPublicKeyOnly
-}
-
-// Sign will return an error since this is not available for public key only KeyPairs.
-func (p *pub) Sign(input []byte) ([]byte, error) {
- return nil, ErrCannotSign
-}
-
-// Verify will verify the input against a signature utilizing the public key.
-func (p *pub) Verify(input []byte, sig []byte) error {
- if !ed25519.Verify(p.pub, input, sig) {
- return ErrInvalidSignature
- }
- return nil
-}
-
-// Wipe will randomize the public key and erase the pre byte.
-func (p *pub) Wipe() {
- p.pre = '0'
- io.ReadFull(rand.Reader, p.pub)
-}
-
-func (p *pub) Seal(input []byte, recipient string) ([]byte, error) {
- if p.pre == PrefixByteCurve {
- return nil, ErrCannotSeal
- }
- return nil, ErrInvalidNKeyOperation
-}
-func (p *pub) SealWithRand(input []byte, _recipient string, rr io.Reader) ([]byte, error) {
- if p.pre == PrefixByteCurve {
- return nil, ErrCannotSeal
- }
- return nil, ErrInvalidNKeyOperation
-}
-
-func (p *pub) Open(input []byte, sender string) ([]byte, error) {
- if p.pre == PrefixByteCurve {
- return nil, ErrCannotOpen
- }
- return nil, ErrInvalidNKeyOperation
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/strkey.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/strkey.go
deleted file mode 100644
index 8ae3311..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/strkey.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright 2018-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nkeys
-
-import (
- "bytes"
- "encoding/base32"
- "encoding/binary"
-)
-
-// PrefixByte is a lead byte representing the type.
-type PrefixByte byte
-
-const (
- // PrefixByteSeed is the version byte used for encoded NATS Seeds
- PrefixByteSeed PrefixByte = 18 << 3 // Base32-encodes to 'S...'
-
- // PrefixBytePrivate is the version byte used for encoded NATS Private keys
- PrefixBytePrivate PrefixByte = 15 << 3 // Base32-encodes to 'P...'
-
- // PrefixByteServer is the version byte used for encoded NATS Servers
- PrefixByteServer PrefixByte = 13 << 3 // Base32-encodes to 'N...'
-
- // PrefixByteCluster is the version byte used for encoded NATS Clusters
- PrefixByteCluster PrefixByte = 2 << 3 // Base32-encodes to 'C...'
-
- // PrefixByteOperator is the version byte used for encoded NATS Operators
- PrefixByteOperator PrefixByte = 14 << 3 // Base32-encodes to 'O...'
-
- // PrefixByteAccount is the version byte used for encoded NATS Accounts
- PrefixByteAccount PrefixByte = 0 // Base32-encodes to 'A...'
-
- // PrefixByteUser is the version byte used for encoded NATS Users
- PrefixByteUser PrefixByte = 20 << 3 // Base32-encodes to 'U...'
-
- // PrefixByteCurve is the version byte used for encoded CurveKeys (X25519)
- PrefixByteCurve PrefixByte = 23 << 3 // Base32-encodes to 'X...'
-
- // PrefixByteUnknown is for unknown prefixes.
- PrefixByteUnknown PrefixByte = 25 << 3 // Base32-encodes to 'Z...'
-)
-
-// Set our encoding to not include padding '=='
-var b32Enc = base32.StdEncoding.WithPadding(base32.NoPadding)
-
-// Encode will encode a raw key or seed with the prefix and crc16 and then base32 encoded.
-func Encode(prefix PrefixByte, src []byte) ([]byte, error) {
- if err := checkValidPrefixByte(prefix); err != nil {
- return nil, err
- }
-
- var raw bytes.Buffer
-
- // write prefix byte
- if err := raw.WriteByte(byte(prefix)); err != nil {
- return nil, err
- }
-
- // write payload
- if _, err := raw.Write(src); err != nil {
- return nil, err
- }
-
- // Calculate and write crc16 checksum
- err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes()))
- if err != nil {
- return nil, err
- }
-
- data := raw.Bytes()
- buf := make([]byte, b32Enc.EncodedLen(len(data)))
- b32Enc.Encode(buf, data)
- return buf[:], nil
-}
-
-// EncodeSeed will encode a raw key with the prefix and then seed prefix and crc16 and then base32 encoded.
-// `src` must be 32 bytes long (ed25519.SeedSize).
-func EncodeSeed(public PrefixByte, src []byte) ([]byte, error) {
- if err := checkValidPublicPrefixByte(public); err != nil {
- return nil, err
- }
-
- if len(src) != seedLen {
- return nil, ErrInvalidSeedLen
- }
-
- // In order to make this human printable for both bytes, we need to do a little
- // bit manipulation to setup for base32 encoding which takes 5 bits at a time.
- b1 := byte(PrefixByteSeed) | (byte(public) >> 5)
- b2 := (byte(public) & 31) << 3 // 31 = 00011111
-
- var raw bytes.Buffer
-
- raw.WriteByte(b1)
- raw.WriteByte(b2)
-
- // write payload
- if _, err := raw.Write(src); err != nil {
- return nil, err
- }
-
- // Calculate and write crc16 checksum
- err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes()))
- if err != nil {
- return nil, err
- }
-
- data := raw.Bytes()
- buf := make([]byte, b32Enc.EncodedLen(len(data)))
- b32Enc.Encode(buf, data)
- return buf, nil
-}
-
-// IsValidEncoding will tell you if the encoding is a valid key.
-func IsValidEncoding(src []byte) bool {
- _, err := decode(src)
- return err == nil
-}
-
-// decode will decode the base32 and check crc16 and the prefix for validity.
-func decode(src []byte) ([]byte, error) {
- raw := make([]byte, b32Enc.DecodedLen(len(src)))
- n, err := b32Enc.Decode(raw, src)
- if err != nil {
- return nil, err
- }
- raw = raw[:n]
-
- if n < 4 {
- return nil, ErrInvalidEncoding
- }
-
- crc := binary.LittleEndian.Uint16(raw[n-2:])
-
- // ensure checksum is valid
- if err := validate(raw[0:n-2], crc); err != nil {
- return nil, err
- }
-
- return raw[:n-2], nil
-}
-
-// Decode will decode the base32 string and check crc16 and enforce the prefix is what is expected.
-func Decode(expectedPrefix PrefixByte, src []byte) ([]byte, error) {
- if err := checkValidPrefixByte(expectedPrefix); err != nil {
- return nil, err
- }
- raw, err := decode(src)
- if err != nil {
- return nil, err
- }
- b1 := raw[0] & 248 // 248 = 11111000
- if prefix := PrefixByte(b1); prefix != expectedPrefix {
- return nil, ErrInvalidPrefixByte
- }
- return raw[1:], nil
-}
-
-// DecodeSeed will decode the base32 string and check crc16 and enforce the prefix is a seed
-// and the subsequent type is a valid type.
-func DecodeSeed(src []byte) (PrefixByte, []byte, error) {
- raw, err := decode(src)
- if err != nil {
- return PrefixByteSeed, nil, err
- }
- // Need to do the reverse here to get back to internal representation.
- b1 := raw[0] & 248 // 248 = 11111000
- b2 := (raw[0]&7)<<5 | ((raw[1] & 248) >> 3) // 7 = 00000111
-
- if PrefixByte(b1) != PrefixByteSeed {
- return PrefixByteSeed, nil, ErrInvalidSeed
- }
- if checkValidPublicPrefixByte(PrefixByte(b2)) != nil {
- return PrefixByteSeed, nil, ErrInvalidSeed
- }
- return PrefixByte(b2), raw[2:], nil
-}
-
-// Prefix returns PrefixBytes of its input
-func Prefix(src string) PrefixByte {
- b, err := decode([]byte(src))
- if err != nil {
- return PrefixByteUnknown
- }
- prefix := PrefixByte(b[0])
- err = checkValidPrefixByte(prefix)
- if err == nil {
- return prefix
- }
- // Might be a seed.
- b1 := b[0] & 248
- if PrefixByte(b1) == PrefixByteSeed {
- return PrefixByteSeed
- }
- return PrefixByteUnknown
-}
-
-// IsValidPublicKey will decode and verify that the string is a valid encoded public key.
-func IsValidPublicKey(src string) bool {
- b, err := decode([]byte(src))
- if err != nil {
- return false
- }
- if prefix := PrefixByte(b[0]); checkValidPublicPrefixByte(prefix) != nil {
- return false
- }
- return true
-}
-
-// IsValidPublicUserKey will decode and verify the string is a valid encoded Public User Key.
-func IsValidPublicUserKey(src string) bool {
- _, err := Decode(PrefixByteUser, []byte(src))
- return err == nil
-}
-
-// IsValidPublicAccountKey will decode and verify the string is a valid encoded Public Account Key.
-func IsValidPublicAccountKey(src string) bool {
- _, err := Decode(PrefixByteAccount, []byte(src))
- return err == nil
-}
-
-// IsValidPublicServerKey will decode and verify the string is a valid encoded Public Server Key.
-func IsValidPublicServerKey(src string) bool {
- _, err := Decode(PrefixByteServer, []byte(src))
- return err == nil
-}
-
-// IsValidPublicClusterKey will decode and verify the string is a valid encoded Public Cluster Key.
-func IsValidPublicClusterKey(src string) bool {
- _, err := Decode(PrefixByteCluster, []byte(src))
- return err == nil
-}
-
-// IsValidPublicOperatorKey will decode and verify the string is a valid encoded Public Operator Key.
-func IsValidPublicOperatorKey(src string) bool {
- _, err := Decode(PrefixByteOperator, []byte(src))
- return err == nil
-}
-
-// IsValidPublicCurveKey will decode and verify the string is a valid encoded Public Curve Key.
-func IsValidPublicCurveKey(src string) bool {
- _, err := Decode(PrefixByteCurve, []byte(src))
- return err == nil
-}
-
-// checkValidPrefixByte returns an error if the provided value
-// is not one of the defined valid prefix byte constants.
-func checkValidPrefixByte(prefix PrefixByte) error {
- switch prefix {
- case PrefixByteOperator, PrefixByteServer, PrefixByteCluster,
- PrefixByteAccount, PrefixByteUser, PrefixByteSeed, PrefixBytePrivate, PrefixByteCurve:
- return nil
- }
- return ErrInvalidPrefixByte
-}
-
-// checkValidPublicPrefixByte returns an error if the provided value
-// is not one of the public defined valid prefix byte constants.
-func checkValidPublicPrefixByte(prefix PrefixByte) error {
- switch prefix {
- case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, PrefixByteAccount, PrefixByteUser, PrefixByteCurve:
- return nil
- }
- return ErrInvalidPrefixByte
-}
-
-func (p PrefixByte) String() string {
- switch p {
- case PrefixByteOperator:
- return "operator"
- case PrefixByteServer:
- return "server"
- case PrefixByteCluster:
- return "cluster"
- case PrefixByteAccount:
- return "account"
- case PrefixByteUser:
- return "user"
- case PrefixByteSeed:
- return "seed"
- case PrefixBytePrivate:
- return "private"
- case PrefixByteCurve:
- return "x25519"
- }
- return "unknown"
-}
-
-// CompatibleKeyPair returns an error if the KeyPair doesn't match expected PrefixByte(s)
-func CompatibleKeyPair(kp KeyPair, expected ...PrefixByte) error {
- pk, err := kp.PublicKey()
- if err != nil {
- return err
- }
- pkType := Prefix(pk)
- for _, k := range expected {
- if pkType == k {
- return nil
- }
- }
-
- return ErrIncompatibleKey
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/xkeys.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/xkeys.go
deleted file mode 100644
index 78f8b99..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nkeys/xkeys.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2022-2023 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nkeys
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/binary"
- "io"
-
- "golang.org/x/crypto/curve25519"
- "golang.org/x/crypto/nacl/box"
-)
-
-// This package will support safe use of X25519 keys for asymmetric encryption.
-// We will be compatible with nacl.Box, but generate random nonces automatically.
-// We may add more advanced options in the future for group recipients and better
-// end to end algorithms.
-
-const (
- curveKeyLen = 32
- curveDecodeLen = 35
- curveNonceLen = 24
-)
-
-type ckp struct {
- seed [curveKeyLen]byte // Private raw key.
-}
-
-// CreateCurveKeys will create a Curve typed KeyPair.
-func CreateCurveKeys() (KeyPair, error) {
- return CreateCurveKeysWithRand(rand.Reader)
-}
-
-// CreateCurveKeysWithRand will create a Curve typed KeyPair
-// with specified rand source.
-func CreateCurveKeysWithRand(rr io.Reader) (KeyPair, error) {
- var kp ckp
- _, err := io.ReadFull(rr, kp.seed[:])
- if err != nil {
- return nil, err
- }
- return &kp, nil
-}
-
-// Will create a curve key pair from seed.
-func FromCurveSeed(seed []byte) (KeyPair, error) {
- pb, raw, err := DecodeSeed(seed)
- if err != nil {
- return nil, err
- }
- if pb != PrefixByteCurve || len(raw) != curveKeyLen {
- return nil, ErrInvalidCurveSeed
- }
- var kp ckp
- copy(kp.seed[:], raw)
- return &kp, nil
-}
-
-// Seed will return the encoded seed.
-func (pair *ckp) Seed() ([]byte, error) {
- return EncodeSeed(PrefixByteCurve, pair.seed[:])
-}
-
-// PublicKey will return the encoded public key.
-func (pair *ckp) PublicKey() (string, error) {
- var pub [curveKeyLen]byte
- curve25519.ScalarBaseMult(&pub, &pair.seed)
- key, err := Encode(PrefixByteCurve, pub[:])
- return string(key), err
-}
-
-// PrivateKey will return the encoded private key.
-func (pair *ckp) PrivateKey() ([]byte, error) {
- return Encode(PrefixBytePrivate, pair.seed[:])
-}
-
-func decodePubCurveKey(src string, dest []byte) error {
- var raw [curveDecodeLen]byte // should always be 35
- n, err := b32Enc.Decode(raw[:], []byte(src))
- if err != nil {
- return err
- }
- if n != curveDecodeLen {
- return ErrInvalidCurveKey
- }
- // Make sure it is what we expected.
- if prefix := PrefixByte(raw[0]); prefix != PrefixByteCurve {
- return ErrInvalidPublicKey
- }
- var crc uint16
- end := n - 2
- sum := raw[end:n]
- checksum := bytes.NewReader(sum)
- if err := binary.Read(checksum, binary.LittleEndian, &crc); err != nil {
- return err
- }
-
- // ensure checksum is valid
- if err := validate(raw[:end], crc); err != nil {
- return err
- }
-
- // Copy over, ignore prefix byte.
- copy(dest, raw[1:end])
- return nil
-}
-
-// Only version for now, but could add in X3DH in the future, etc.
-const XKeyVersionV1 = "xkv1"
-const vlen = len(XKeyVersionV1)
-
-// Seal is compatible with nacl.Box.Seal() and can be used in similar situations for small messages.
-// We generate the nonce from crypto rand by default.
-func (pair *ckp) Seal(input []byte, recipient string) ([]byte, error) {
- return pair.SealWithRand(input, recipient, rand.Reader)
-}
-
-func (pair *ckp) SealWithRand(input []byte, recipient string, rr io.Reader) ([]byte, error) {
- var (
- rpub [curveKeyLen]byte
- nonce [curveNonceLen]byte
- out [vlen + curveNonceLen]byte
- err error
- )
-
- if err = decodePubCurveKey(recipient, rpub[:]); err != nil {
- return nil, ErrInvalidRecipient
- }
- if _, err := io.ReadFull(rr, nonce[:]); err != nil {
- return nil, err
- }
- copy(out[:vlen], []byte(XKeyVersionV1))
- copy(out[vlen:], nonce[:])
- return box.Seal(out[:], input, &nonce, &rpub, &pair.seed), nil
-}
-
-func (pair *ckp) Open(input []byte, sender string) ([]byte, error) {
- if len(input) <= vlen+curveNonceLen {
- return nil, ErrInvalidEncrypted
- }
- var (
- spub [curveKeyLen]byte
- nonce [curveNonceLen]byte
- err error
- )
- if !bytes.Equal(input[:vlen], []byte(XKeyVersionV1)) {
- return nil, ErrInvalidEncVersion
- }
- copy(nonce[:], input[vlen:vlen+curveNonceLen])
-
- if err = decodePubCurveKey(sender, spub[:]); err != nil {
- return nil, ErrInvalidSender
- }
-
- decrypted, ok := box.Open(nil, input[vlen+curveNonceLen:], &nonce, &spub, &pair.seed)
- if !ok {
- return nil, ErrCouldNotDecrypt
- }
- return decrypted, nil
-}
-
-// Wipe will randomize the contents of the secret key
-func (pair *ckp) Wipe() {
- io.ReadFull(rand.Reader, pair.seed[:])
-}
-
-func (pair *ckp) Sign(_ []byte) ([]byte, error) {
- return nil, ErrInvalidCurveKeyOperation
-}
-
-func (pair *ckp) Verify(_ []byte, _ []byte) error {
- return ErrInvalidCurveKeyOperation
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/.gitignore b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/.gitignore
deleted file mode 100644
index daf913b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/.travis.yml b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/.travis.yml
deleted file mode 100644
index 52be726..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-sudo: false
-go:
-- 1.9.x
-- 1.10.x
-
-install:
-- go get -t ./...
-- go get github.com/mattn/goveralls
-
-script:
-- go fmt ./...
-- go vet ./...
-- go test -v
-- go test -v --race
-- go test -v -covermode=count -coverprofile=coverage.out
-- $HOME/gopath/bin/goveralls -coverprofile coverage.out -service travis-ci
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/GOVERNANCE.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/GOVERNANCE.md
deleted file mode 100644
index 01aee70..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/GOVERNANCE.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# NATS NUID Governance
-
-NATS NUID is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md).
\ No newline at end of file
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/LICENSE b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/MAINTAINERS.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/MAINTAINERS.md
deleted file mode 100644
index 6d0ed3e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/MAINTAINERS.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Maintainers
-
-Maintainership is on a per project basis.
-
-### Core-maintainers
- - Derek Collison [@derekcollison](https://github.com/derekcollison)
\ No newline at end of file
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/README.md b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/README.md
deleted file mode 100644
index 16e5394..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/README.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# NUID
-
-[](https://www.apache.org/licenses/LICENSE-2.0)
-[](http://goreportcard.com/report/nats-io/nuid)
-[](http://travis-ci.org/nats-io/nuid)
-[](https://github.com/nats-io/nuid/releases/tag/v1.0.1)
-[](http://godoc.org/github.com/nats-io/nuid)
-[](https://coveralls.io/github/nats-io/nuid?branch=master)
-
-A highly performant unique identifier generator.
-
-## Installation
-
-Use the `go` command:
-
- $ go get github.com/nats-io/nuid
-
-## Basic Usage
-```go
-
-// Utilize the global locked instance
-nuid := nuid.Next()
-
-// Create an instance, these are not locked.
-n := nuid.New()
-nuid = n.Next()
-
-// Generate a new crypto/rand seeded prefix.
-// Generally not needed, happens automatically.
-n.RandomizePrefix()
-```
-
-## Performance
-NUID needs to be very fast to generate and be truly unique, all while being entropy pool friendly.
-NUID uses 12 bytes of crypto generated data (entropy draining), and 10 bytes of pseudo-random
-sequential data that increments with a pseudo-random increment.
-
-Total length of a NUID string is 22 bytes of base 62 ascii text, so 62^22 or
-2707803647802660400290261537185326956544 possibilities.
-
-NUID can generate identifiers as fast as 60ns, or ~16 million per second. There is an associated
-benchmark you can use to test performance on your own hardware.
-
-## License
-
-Unless otherwise noted, the NATS source files are distributed
-under the Apache Version 2.0 license found in the LICENSE file.
diff --git a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/nuid.go b/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/nuid.go
deleted file mode 100644
index 8134c76..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/github.com/nats-io/nuid/nuid.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2016-2019 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly.
-package nuid
-
-import (
- "crypto/rand"
- "fmt"
- "math"
- "math/big"
- "sync"
- "time"
-
- prand "math/rand"
-)
-
-// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly.
-// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data
-// that is started at a pseudo random number and increments with a pseudo-random increment.
-// Total is 22 bytes of base 62 ascii text :)
-
-// Version of the library
-const Version = "1.0.1"
-
-const (
- digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
- base = 62
- preLen = 12
- seqLen = 10
- maxSeq = int64(839299365868340224) // base^seqLen == 62^10
- minInc = int64(33)
- maxInc = int64(333)
- totalLen = preLen + seqLen
-)
-
-type NUID struct {
- pre []byte
- seq int64
- inc int64
-}
-
-type lockedNUID struct {
- sync.Mutex
- *NUID
-}
-
-// Global NUID
-var globalNUID *lockedNUID
-
-// Seed sequential random with crypto or math/random and current time
-// and generate crypto prefix.
-func init() {
- r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
- if err != nil {
- prand.Seed(time.Now().UnixNano())
- } else {
- prand.Seed(r.Int64())
- }
- globalNUID = &lockedNUID{NUID: New()}
- globalNUID.RandomizePrefix()
-}
-
-// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment.
-func New() *NUID {
- n := &NUID{
- seq: prand.Int63n(maxSeq),
- inc: minInc + prand.Int63n(maxInc-minInc),
- pre: make([]byte, preLen),
- }
- n.RandomizePrefix()
- return n
-}
-
-// Generate the next NUID string from the global locked NUID instance.
-func Next() string {
- globalNUID.Lock()
- nuid := globalNUID.Next()
- globalNUID.Unlock()
- return nuid
-}
-
-// Generate the next NUID string.
-func (n *NUID) Next() string {
- // Increment and capture.
- n.seq += n.inc
- if n.seq >= maxSeq {
- n.RandomizePrefix()
- n.resetSequential()
- }
- seq := n.seq
-
- // Copy prefix
- var b [totalLen]byte
- bs := b[:preLen]
- copy(bs, n.pre)
-
- // copy in the seq in base62.
- for i, l := len(b), seq; i > preLen; l /= base {
- i -= 1
- b[i] = digits[l%base]
- }
- return string(b[:])
-}
-
-// Resets the sequential portion of the NUID.
-func (n *NUID) resetSequential() {
- n.seq = prand.Int63n(maxSeq)
- n.inc = minInc + prand.Int63n(maxInc-minInc)
-}
-
-// Generate a new prefix from crypto/rand.
-// This call *can* drain entropy and will be called automatically when we exhaust the sequential range.
-// Will panic if it gets an error from rand.Int()
-func (n *NUID) RandomizePrefix() {
- var cb [preLen]byte
- cbs := cb[:]
- if nb, err := rand.Read(cbs); nb != preLen || err != nil {
- panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err))
- }
-
- for i := 0; i < preLen; i++ {
- n.pre[i] = digits[int(cbs[i])%base]
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/LICENSE b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/LICENSE
deleted file mode 100644
index 6a66aea..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/PATENTS b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/PATENTS
deleted file mode 100644
index 7330990..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b.go
deleted file mode 100644
index d2e98d4..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693
-// and the extendable output function (XOF) BLAKE2Xb.
-//
-// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and
-// produces digests of any size between 1 and 64 bytes.
-// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf
-// and for BLAKE2Xb see https://blake2.net/blake2x.pdf
-//
-// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512).
-// If you need a secret-key MAC (message authentication code), use the New512
-// function with a non-nil key.
-//
-// BLAKE2X is a construction to compute hash values larger than 64 bytes. It
-// can produce hash values between 0 and 4 GiB.
-package blake2b
-
-import (
- "encoding/binary"
- "errors"
- "hash"
-)
-
-const (
- // The blocksize of BLAKE2b in bytes.
- BlockSize = 128
- // The hash size of BLAKE2b-512 in bytes.
- Size = 64
- // The hash size of BLAKE2b-384 in bytes.
- Size384 = 48
- // The hash size of BLAKE2b-256 in bytes.
- Size256 = 32
-)
-
-var (
- useAVX2 bool
- useAVX bool
- useSSE4 bool
-)
-
-var (
- errKeySize = errors.New("blake2b: invalid key size")
- errHashSize = errors.New("blake2b: invalid hash size")
-)
-
-var iv = [8]uint64{
- 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
- 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
-}
-
-// Sum512 returns the BLAKE2b-512 checksum of the data.
-func Sum512(data []byte) [Size]byte {
- var sum [Size]byte
- checkSum(&sum, Size, data)
- return sum
-}
-
-// Sum384 returns the BLAKE2b-384 checksum of the data.
-func Sum384(data []byte) [Size384]byte {
- var sum [Size]byte
- var sum384 [Size384]byte
- checkSum(&sum, Size384, data)
- copy(sum384[:], sum[:Size384])
- return sum384
-}
-
-// Sum256 returns the BLAKE2b-256 checksum of the data.
-func Sum256(data []byte) [Size256]byte {
- var sum [Size]byte
- var sum256 [Size256]byte
- checkSum(&sum, Size256, data)
- copy(sum256[:], sum[:Size256])
- return sum256
-}
-
-// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
-// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
-
-// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
-// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
-
-// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
-// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
-
-// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length.
-// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long.
-// The hash size can be a value between 1 and 64 but it is highly recommended to use
-// values equal or greater than:
-// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long).
-// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long).
-// When the key is nil, the returned hash.Hash implements BinaryMarshaler
-// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash.
-func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) }
-
-func newDigest(hashSize int, key []byte) (*digest, error) {
- if hashSize < 1 || hashSize > Size {
- return nil, errHashSize
- }
- if len(key) > Size {
- return nil, errKeySize
- }
- d := &digest{
- size: hashSize,
- keyLen: len(key),
- }
- copy(d.key[:], key)
- d.Reset()
- return d, nil
-}
-
-func checkSum(sum *[Size]byte, hashSize int, data []byte) {
- h := iv
- h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24)
- var c [2]uint64
-
- if length := len(data); length > BlockSize {
- n := length &^ (BlockSize - 1)
- if length == n {
- n -= BlockSize
- }
- hashBlocks(&h, &c, 0, data[:n])
- data = data[n:]
- }
-
- var block [BlockSize]byte
- offset := copy(block[:], data)
- remaining := uint64(BlockSize - offset)
- if c[0] < remaining {
- c[1]--
- }
- c[0] -= remaining
-
- hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
-
- for i, v := range h[:(hashSize+7)/8] {
- binary.LittleEndian.PutUint64(sum[8*i:], v)
- }
-}
-
-type digest struct {
- h [8]uint64
- c [2]uint64
- size int
- block [BlockSize]byte
- offset int
-
- key [BlockSize]byte
- keyLen int
-}
-
-const (
- magic = "b2b"
- marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1
-)
-
-func (d *digest) MarshalBinary() ([]byte, error) {
- if d.keyLen != 0 {
- return nil, errors.New("crypto/blake2b: cannot marshal MACs")
- }
- b := make([]byte, 0, marshaledSize)
- b = append(b, magic...)
- for i := 0; i < 8; i++ {
- b = appendUint64(b, d.h[i])
- }
- b = appendUint64(b, d.c[0])
- b = appendUint64(b, d.c[1])
- // Maximum value for size is 64
- b = append(b, byte(d.size))
- b = append(b, d.block[:]...)
- b = append(b, byte(d.offset))
- return b, nil
-}
-
-func (d *digest) UnmarshalBinary(b []byte) error {
- if len(b) < len(magic) || string(b[:len(magic)]) != magic {
- return errors.New("crypto/blake2b: invalid hash state identifier")
- }
- if len(b) != marshaledSize {
- return errors.New("crypto/blake2b: invalid hash state size")
- }
- b = b[len(magic):]
- for i := 0; i < 8; i++ {
- b, d.h[i] = consumeUint64(b)
- }
- b, d.c[0] = consumeUint64(b)
- b, d.c[1] = consumeUint64(b)
- d.size = int(b[0])
- b = b[1:]
- copy(d.block[:], b[:BlockSize])
- b = b[BlockSize:]
- d.offset = int(b[0])
- return nil
-}
-
-func (d *digest) BlockSize() int { return BlockSize }
-
-func (d *digest) Size() int { return d.size }
-
-func (d *digest) Reset() {
- d.h = iv
- d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24)
- d.offset, d.c[0], d.c[1] = 0, 0, 0
- if d.keyLen > 0 {
- d.block = d.key
- d.offset = BlockSize
- }
-}
-
-func (d *digest) Write(p []byte) (n int, err error) {
- n = len(p)
-
- if d.offset > 0 {
- remaining := BlockSize - d.offset
- if n <= remaining {
- d.offset += copy(d.block[d.offset:], p)
- return
- }
- copy(d.block[d.offset:], p[:remaining])
- hashBlocks(&d.h, &d.c, 0, d.block[:])
- d.offset = 0
- p = p[remaining:]
- }
-
- if length := len(p); length > BlockSize {
- nn := length &^ (BlockSize - 1)
- if length == nn {
- nn -= BlockSize
- }
- hashBlocks(&d.h, &d.c, 0, p[:nn])
- p = p[nn:]
- }
-
- if len(p) > 0 {
- d.offset += copy(d.block[:], p)
- }
-
- return
-}
-
-func (d *digest) Sum(sum []byte) []byte {
- var hash [Size]byte
- d.finalize(&hash)
- return append(sum, hash[:d.size]...)
-}
-
-func (d *digest) finalize(hash *[Size]byte) {
- var block [BlockSize]byte
- copy(block[:], d.block[:d.offset])
- remaining := uint64(BlockSize - d.offset)
-
- c := d.c
- if c[0] < remaining {
- c[1]--
- }
- c[0] -= remaining
-
- h := d.h
- hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
-
- for i, v := range h {
- binary.LittleEndian.PutUint64(hash[8*i:], v)
- }
-}
-
-func appendUint64(b []byte, x uint64) []byte {
- var a [8]byte
- binary.BigEndian.PutUint64(a[:], x)
- return append(b, a[:]...)
-}
-
-func appendUint32(b []byte, x uint32) []byte {
- var a [4]byte
- binary.BigEndian.PutUint32(a[:], x)
- return append(b, a[:]...)
-}
-
-func consumeUint64(b []byte) ([]byte, uint64) {
- x := binary.BigEndian.Uint64(b)
- return b[8:], x
-}
-
-func consumeUint32(b []byte) ([]byte, uint32) {
- x := binary.BigEndian.Uint32(b)
- return b[4:], x
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
deleted file mode 100644
index 199c21d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-package blake2b
-
-import "golang.org/x/sys/cpu"
-
-func init() {
- useAVX2 = cpu.X86.HasAVX2
- useAVX = cpu.X86.HasAVX
- useSSE4 = cpu.X86.HasSSE41
-}
-
-//go:noescape
-func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-
-//go:noescape
-func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-
-//go:noescape
-func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-
-func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
- switch {
- case useAVX2:
- hashBlocksAVX2(h, c, flag, blocks)
- case useAVX:
- hashBlocksAVX(h, c, flag, blocks)
- case useSSE4:
- hashBlocksSSE4(h, c, flag, blocks)
- default:
- hashBlocksGeneric(h, c, flag, blocks)
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
deleted file mode 100644
index 9ae8206..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
+++ /dev/null
@@ -1,744 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-#include "textflag.h"
-
-DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
-DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
-GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
-DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
-DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
-DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
-GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
-
-#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
-#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
-#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
-#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
-#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
-
-#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
- VPADDQ m0, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFD $-79, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPSHUFB c40, Y1, Y1; \
- VPADDQ m1, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFB c48, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPADDQ Y1, Y1, t; \
- VPSRLQ $63, Y1, Y1; \
- VPXOR t, Y1, Y1; \
- VPERMQ_0x39_Y1_Y1; \
- VPERMQ_0x4E_Y2_Y2; \
- VPERMQ_0x93_Y3_Y3; \
- VPADDQ m2, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFD $-79, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPSHUFB c40, Y1, Y1; \
- VPADDQ m3, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFB c48, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPADDQ Y1, Y1, t; \
- VPSRLQ $63, Y1, Y1; \
- VPXOR t, Y1, Y1; \
- VPERMQ_0x39_Y3_Y3; \
- VPERMQ_0x4E_Y2_Y2; \
- VPERMQ_0x93_Y1_Y1
-
-#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
-#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
-#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
-#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
-#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
-
-#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
-#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
-#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
-#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
-#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
-
-#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
-#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
-#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
-#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
-#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
-
-#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
-
-#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
-#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
-
-// load msg: Y12 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
- VMOVQ_SI_X12(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X12(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y12, Y12
-
-// load msg: Y13 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
- VMOVQ_SI_X13(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X13(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y13, Y13
-
-// load msg: Y14 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
- VMOVQ_SI_X14(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X14(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y14, Y14
-
-// load msg: Y15 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
- VMOVQ_SI_X15(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X15(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
- VMOVQ_SI_X12_0; \
- VMOVQ_SI_X11(4*8); \
- VPINSRQ_1_SI_X12(2*8); \
- VPINSRQ_1_SI_X11(6*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
- LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
- LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
-
-#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
- LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
- LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
- VMOVQ_SI_X11(11*8); \
- VPSHUFD $0x4E, 0*8(SI), X14; \
- VPINSRQ_1_SI_X11(5*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
-
-#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
- VMOVQ_SI_X11(5*8); \
- VMOVDQU 11*8(SI), X12; \
- VPINSRQ_1_SI_X11(15*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- VMOVQ_SI_X13(8*8); \
- VMOVQ_SI_X11(2*8); \
- VPINSRQ_1_SI_X13_0; \
- VPINSRQ_1_SI_X11(13*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
- LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
-
-#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
- LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
- LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
- LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
- VMOVQ_SI_X15(6*8); \
- VMOVQ_SI_X11_0; \
- VPINSRQ_1_SI_X15(10*8); \
- VPINSRQ_1_SI_X11(8*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
- LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
- VMOVQ_SI_X13_0; \
- VMOVQ_SI_X11(4*8); \
- VPINSRQ_1_SI_X13(7*8); \
- VPINSRQ_1_SI_X11(15*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
- LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
-
-#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X11_0; \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X11(8*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
- LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
- LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
-
-#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
- LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
- LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
- VMOVQ_SI_X14_0; \
- VPSHUFD $0x4E, 8*8(SI), X11; \
- VPINSRQ_1_SI_X14(6*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
-
-#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
- LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
- LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
- LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
- VMOVQ_SI_X15_0; \
- VMOVQ_SI_X11(6*8); \
- VPINSRQ_1_SI_X15(4*8); \
- VPINSRQ_1_SI_X11(10*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
- VMOVQ_SI_X12(6*8); \
- VMOVQ_SI_X11(11*8); \
- VPINSRQ_1_SI_X12(14*8); \
- VPINSRQ_1_SI_X11_0; \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
- VMOVQ_SI_X11(1*8); \
- VMOVDQU 12*8(SI), X14; \
- VPINSRQ_1_SI_X11(10*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- VMOVQ_SI_X15(2*8); \
- VMOVDQU 4*8(SI), X11; \
- VPINSRQ_1_SI_X15(7*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
- LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
- VMOVQ_SI_X13(2*8); \
- VPSHUFD $0x4E, 5*8(SI), X11; \
- VPINSRQ_1_SI_X13(4*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
- VMOVQ_SI_X15(11*8); \
- VMOVQ_SI_X11(12*8); \
- VPINSRQ_1_SI_X15(14*8); \
- VPINSRQ_1_SI_X11_0; \
- VINSERTI128 $1, X11, Y15, Y15
-
-// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, DX
- ADDQ $31, DX
- ANDQ $~31, DX
-
- MOVQ CX, 16(DX)
- XORQ CX, CX
- MOVQ CX, 24(DX)
-
- VMOVDQU ·AVX2_c40<>(SB), Y4
- VMOVDQU ·AVX2_c48<>(SB), Y5
-
- VMOVDQU 0(AX), Y8
- VMOVDQU 32(AX), Y9
- VMOVDQU ·AVX2_iv0<>(SB), Y6
- VMOVDQU ·AVX2_iv1<>(SB), Y7
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
- MOVQ R9, 8(DX)
-
-loop:
- ADDQ $128, R8
- MOVQ R8, 0(DX)
- CMPQ R8, $128
- JGE noinc
- INCQ R9
- MOVQ R9, 8(DX)
-
-noinc:
- VMOVDQA Y8, Y0
- VMOVDQA Y9, Y1
- VMOVDQA Y6, Y2
- VPXOR 0(DX), Y7, Y3
-
- LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
- VMOVDQA Y12, 32(DX)
- VMOVDQA Y13, 64(DX)
- VMOVDQA Y14, 96(DX)
- VMOVDQA Y15, 128(DX)
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
- VMOVDQA Y12, 160(DX)
- VMOVDQA Y13, 192(DX)
- VMOVDQA Y14, 224(DX)
- VMOVDQA Y15, 256(DX)
-
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-
- ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5)
- ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5)
-
- VPXOR Y0, Y8, Y8
- VPXOR Y1, Y9, Y9
- VPXOR Y2, Y8, Y8
- VPXOR Y3, Y9, Y9
-
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
-
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
-
- VMOVDQU Y8, 0(AX)
- VMOVDQU Y9, 32(AX)
- VZEROUPPER
-
- RET
-
-#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
-#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
-#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
-#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
-#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
-
-#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
-#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
-#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
-#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
-#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
-#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
-#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
-#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
-
-#define SHUFFLE_AVX() \
- VMOVDQA X6, X13; \
- VMOVDQA X2, X14; \
- VMOVDQA X4, X6; \
- VPUNPCKLQDQ_X13_X13_X15; \
- VMOVDQA X5, X4; \
- VMOVDQA X6, X5; \
- VPUNPCKHQDQ_X15_X7_X6; \
- VPUNPCKLQDQ_X7_X7_X15; \
- VPUNPCKHQDQ_X15_X13_X7; \
- VPUNPCKLQDQ_X3_X3_X15; \
- VPUNPCKHQDQ_X15_X2_X2; \
- VPUNPCKLQDQ_X14_X14_X15; \
- VPUNPCKHQDQ_X15_X3_X3; \
-
-#define SHUFFLE_AVX_INV() \
- VMOVDQA X2, X13; \
- VMOVDQA X4, X14; \
- VPUNPCKLQDQ_X2_X2_X15; \
- VMOVDQA X5, X4; \
- VPUNPCKHQDQ_X15_X3_X2; \
- VMOVDQA X14, X5; \
- VPUNPCKLQDQ_X3_X3_X15; \
- VMOVDQA X6, X14; \
- VPUNPCKHQDQ_X15_X13_X3; \
- VPUNPCKLQDQ_X7_X7_X15; \
- VPUNPCKHQDQ_X15_X6_X6; \
- VPUNPCKLQDQ_X14_X14_X15; \
- VPUNPCKHQDQ_X15_X7_X7; \
-
-#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
- VPADDQ m0, v0, v0; \
- VPADDQ v2, v0, v0; \
- VPADDQ m1, v1, v1; \
- VPADDQ v3, v1, v1; \
- VPXOR v0, v6, v6; \
- VPXOR v1, v7, v7; \
- VPSHUFD $-79, v6, v6; \
- VPSHUFD $-79, v7, v7; \
- VPADDQ v6, v4, v4; \
- VPADDQ v7, v5, v5; \
- VPXOR v4, v2, v2; \
- VPXOR v5, v3, v3; \
- VPSHUFB c40, v2, v2; \
- VPSHUFB c40, v3, v3; \
- VPADDQ m2, v0, v0; \
- VPADDQ v2, v0, v0; \
- VPADDQ m3, v1, v1; \
- VPADDQ v3, v1, v1; \
- VPXOR v0, v6, v6; \
- VPXOR v1, v7, v7; \
- VPSHUFB c48, v6, v6; \
- VPSHUFB c48, v7, v7; \
- VPADDQ v6, v4, v4; \
- VPADDQ v7, v5, v5; \
- VPXOR v4, v2, v2; \
- VPXOR v5, v3, v3; \
- VPADDQ v2, v2, t0; \
- VPSRLQ $63, v2, v2; \
- VPXOR t0, v2, v2; \
- VPADDQ v3, v3, t0; \
- VPSRLQ $63, v3, v3; \
- VPXOR t0, v3, v3
-
-// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
-// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
-#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
- VMOVQ_SI_X12(i0*8); \
- VMOVQ_SI_X13(i2*8); \
- VMOVQ_SI_X14(i4*8); \
- VMOVQ_SI_X15(i6*8); \
- VPINSRQ_1_SI_X12(i1*8); \
- VPINSRQ_1_SI_X13(i3*8); \
- VPINSRQ_1_SI_X14(i5*8); \
- VPINSRQ_1_SI_X15(i7*8)
-
-// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
-#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
- VMOVQ_SI_X12_0; \
- VMOVQ_SI_X13(4*8); \
- VMOVQ_SI_X14(1*8); \
- VMOVQ_SI_X15(5*8); \
- VPINSRQ_1_SI_X12(2*8); \
- VPINSRQ_1_SI_X13(6*8); \
- VPINSRQ_1_SI_X14(3*8); \
- VPINSRQ_1_SI_X15(7*8)
-
-// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
-#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
- VPSHUFD $0x4E, 0*8(SI), X12; \
- VMOVQ_SI_X13(11*8); \
- VMOVQ_SI_X14(12*8); \
- VMOVQ_SI_X15(7*8); \
- VPINSRQ_1_SI_X13(5*8); \
- VPINSRQ_1_SI_X14(2*8); \
- VPINSRQ_1_SI_X15(3*8)
-
-// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
-#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
- VMOVDQU 11*8(SI), X12; \
- VMOVQ_SI_X13(5*8); \
- VMOVQ_SI_X14(8*8); \
- VMOVQ_SI_X15(2*8); \
- VPINSRQ_1_SI_X13(15*8); \
- VPINSRQ_1_SI_X14_0; \
- VPINSRQ_1_SI_X15(13*8)
-
-// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
-#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X13(4*8); \
- VMOVQ_SI_X14(6*8); \
- VMOVQ_SI_X15_0; \
- VPINSRQ_1_SI_X12(5*8); \
- VPINSRQ_1_SI_X13(15*8); \
- VPINSRQ_1_SI_X14(10*8); \
- VPINSRQ_1_SI_X15(8*8)
-
-// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
-#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
- VMOVQ_SI_X12(9*8); \
- VMOVQ_SI_X13(2*8); \
- VMOVQ_SI_X14_0; \
- VMOVQ_SI_X15(4*8); \
- VPINSRQ_1_SI_X12(5*8); \
- VPINSRQ_1_SI_X13(10*8); \
- VPINSRQ_1_SI_X14(7*8); \
- VPINSRQ_1_SI_X15(15*8)
-
-// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
-#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X13_0; \
- VMOVQ_SI_X14(12*8); \
- VMOVQ_SI_X15(11*8); \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X13(8*8); \
- VPINSRQ_1_SI_X14(10*8); \
- VPINSRQ_1_SI_X15(3*8)
-
-// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
-#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
- MOVQ 0*8(SI), X12; \
- VPSHUFD $0x4E, 8*8(SI), X13; \
- MOVQ 7*8(SI), X14; \
- MOVQ 2*8(SI), X15; \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X14(3*8); \
- VPINSRQ_1_SI_X15(11*8)
-
-// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
-#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
- MOVQ 6*8(SI), X12; \
- MOVQ 11*8(SI), X13; \
- MOVQ 15*8(SI), X14; \
- MOVQ 3*8(SI), X15; \
- VPINSRQ_1_SI_X12(14*8); \
- VPINSRQ_1_SI_X13_0; \
- VPINSRQ_1_SI_X14(9*8); \
- VPINSRQ_1_SI_X15(8*8)
-
-// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
-#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
- MOVQ 5*8(SI), X12; \
- MOVQ 8*8(SI), X13; \
- MOVQ 0*8(SI), X14; \
- MOVQ 6*8(SI), X15; \
- VPINSRQ_1_SI_X12(15*8); \
- VPINSRQ_1_SI_X13(2*8); \
- VPINSRQ_1_SI_X14(4*8); \
- VPINSRQ_1_SI_X15(10*8)
-
-// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
-#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
- VMOVDQU 12*8(SI), X12; \
- MOVQ 1*8(SI), X13; \
- MOVQ 2*8(SI), X14; \
- VPINSRQ_1_SI_X13(10*8); \
- VPINSRQ_1_SI_X14(7*8); \
- VMOVDQU 4*8(SI), X15
-
-// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
-#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
- MOVQ 15*8(SI), X12; \
- MOVQ 3*8(SI), X13; \
- MOVQ 11*8(SI), X14; \
- MOVQ 12*8(SI), X15; \
- VPINSRQ_1_SI_X12(9*8); \
- VPINSRQ_1_SI_X13(13*8); \
- VPINSRQ_1_SI_X14(14*8); \
- VPINSRQ_1_SI_X15_0
-
-// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, R10
- ADDQ $15, R10
- ANDQ $~15, R10
-
- VMOVDQU ·AVX_c40<>(SB), X0
- VMOVDQU ·AVX_c48<>(SB), X1
- VMOVDQA X0, X8
- VMOVDQA X1, X9
-
- VMOVDQU ·AVX_iv3<>(SB), X0
- VMOVDQA X0, 0(R10)
- XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0)
-
- VMOVDQU 0(AX), X10
- VMOVDQU 16(AX), X11
- VMOVDQU 32(AX), X2
- VMOVDQU 48(AX), X3
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
-
-loop:
- ADDQ $128, R8
- CMPQ R8, $128
- JGE noinc
- INCQ R9
-
-noinc:
- VMOVQ_R8_X15
- VPINSRQ_1_R9_X15
-
- VMOVDQA X10, X0
- VMOVDQA X11, X1
- VMOVDQU ·AVX_iv0<>(SB), X4
- VMOVDQU ·AVX_iv1<>(SB), X5
- VMOVDQU ·AVX_iv2<>(SB), X6
-
- VPXOR X15, X6, X6
- VMOVDQA 0(R10), X7
-
- LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
- VMOVDQA X12, 16(R10)
- VMOVDQA X13, 32(R10)
- VMOVDQA X14, 48(R10)
- VMOVDQA X15, 64(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
- VMOVDQA X12, 80(R10)
- VMOVDQA X13, 96(R10)
- VMOVDQA X14, 112(R10)
- VMOVDQA X15, 128(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
- VMOVDQA X12, 144(R10)
- VMOVDQA X13, 160(R10)
- VMOVDQA X14, 176(R10)
- VMOVDQA X15, 192(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
- VMOVDQA X12, 208(R10)
- VMOVDQA X13, 224(R10)
- VMOVDQA X14, 240(R10)
- VMOVDQA X15, 256(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9)
- SHUFFLE_AVX()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9)
- SHUFFLE_AVX()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- VMOVDQU 32(AX), X14
- VMOVDQU 48(AX), X15
- VPXOR X0, X10, X10
- VPXOR X1, X11, X11
- VPXOR X2, X14, X14
- VPXOR X3, X15, X15
- VPXOR X4, X10, X10
- VPXOR X5, X11, X11
- VPXOR X6, X14, X2
- VPXOR X7, X15, X3
- VMOVDQU X2, 32(AX)
- VMOVDQU X3, 48(AX)
-
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
-
- VMOVDQU X10, 0(AX)
- VMOVDQU X11, 16(AX)
-
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
- VZEROUPPER
-
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
deleted file mode 100644
index adfac00..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && gc && !purego
-
-#include "textflag.h"
-
-DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
-DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
-GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
-
-#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v6, t1; \
- PUNPCKLQDQ v6, t2; \
- PUNPCKHQDQ v7, v6; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ v7, t2; \
- MOVO t1, v7; \
- MOVO v2, t1; \
- PUNPCKHQDQ t2, v7; \
- PUNPCKLQDQ v3, t2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v3
-
-#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v2, t1; \
- PUNPCKLQDQ v2, t2; \
- PUNPCKHQDQ v3, v2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ v3, t2; \
- MOVO t1, v3; \
- MOVO v6, t1; \
- PUNPCKHQDQ t2, v3; \
- PUNPCKLQDQ v7, t2; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v7
-
-#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
- PADDQ m0, v0; \
- PADDQ m1, v1; \
- PADDQ v2, v0; \
- PADDQ v3, v1; \
- PXOR v0, v6; \
- PXOR v1, v7; \
- PSHUFD $0xB1, v6, v6; \
- PSHUFD $0xB1, v7, v7; \
- PADDQ v6, v4; \
- PADDQ v7, v5; \
- PXOR v4, v2; \
- PXOR v5, v3; \
- PSHUFB c40, v2; \
- PSHUFB c40, v3; \
- PADDQ m2, v0; \
- PADDQ m3, v1; \
- PADDQ v2, v0; \
- PADDQ v3, v1; \
- PXOR v0, v6; \
- PXOR v1, v7; \
- PSHUFB c48, v6; \
- PSHUFB c48, v7; \
- PADDQ v6, v4; \
- PADDQ v7, v5; \
- PXOR v4, v2; \
- PXOR v5, v3; \
- MOVOU v2, t0; \
- PADDQ v2, t0; \
- PSRLQ $63, v2; \
- PXOR t0, v2; \
- MOVOU v3, t0; \
- PADDQ v3, t0; \
- PSRLQ $63, v3; \
- PXOR t0, v3
-
-#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
- MOVQ i0*8(src), m0; \
- PINSRQ $1, i1*8(src), m0; \
- MOVQ i2*8(src), m1; \
- PINSRQ $1, i3*8(src), m1; \
- MOVQ i4*8(src), m2; \
- PINSRQ $1, i5*8(src), m2; \
- MOVQ i6*8(src), m3; \
- PINSRQ $1, i7*8(src), m3
-
-// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, R10
- ADDQ $15, R10
- ANDQ $~15, R10
-
- MOVOU ·iv3<>(SB), X0
- MOVO X0, 0(R10)
- XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0)
-
- MOVOU ·c40<>(SB), X13
- MOVOU ·c48<>(SB), X14
-
- MOVOU 0(AX), X12
- MOVOU 16(AX), X15
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
-
-loop:
- ADDQ $128, R8
- CMPQ R8, $128
- JGE noinc
- INCQ R9
-
-noinc:
- MOVQ R8, X8
- PINSRQ $1, R9, X8
-
- MOVO X12, X0
- MOVO X15, X1
- MOVOU 32(AX), X2
- MOVOU 48(AX), X3
- MOVOU ·iv0<>(SB), X4
- MOVOU ·iv1<>(SB), X5
- MOVOU ·iv2<>(SB), X6
-
- PXOR X8, X6
- MOVO 0(R10), X7
-
- LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
- MOVO X8, 16(R10)
- MOVO X9, 32(R10)
- MOVO X10, 48(R10)
- MOVO X11, 64(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
- MOVO X8, 80(R10)
- MOVO X9, 96(R10)
- MOVO X10, 112(R10)
- MOVO X11, 128(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
- MOVO X8, 144(R10)
- MOVO X9, 160(R10)
- MOVO X10, 176(R10)
- MOVO X11, 192(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
- MOVO X8, 208(R10)
- MOVO X9, 224(R10)
- MOVO X10, 240(R10)
- MOVO X11, 256(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- MOVOU 32(AX), X10
- MOVOU 48(AX), X11
- PXOR X0, X12
- PXOR X1, X15
- PXOR X2, X10
- PXOR X3, X11
- PXOR X4, X12
- PXOR X5, X15
- PXOR X6, X10
- PXOR X7, X11
- MOVOU X10, 32(AX)
- MOVOU X11, 48(AX)
-
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
-
- MOVOU X12, 0(AX)
- MOVOU X15, 16(AX)
-
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
-
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
deleted file mode 100644
index 3168a8a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package blake2b
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// the precomputed values for BLAKE2b
-// there are 12 16-byte arrays - one for each round
-// the entries are calculated from the sigma constants.
-var precomputed = [12][16]byte{
- {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15},
- {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3},
- {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4},
- {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8},
- {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13},
- {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9},
- {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11},
- {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10},
- {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5},
- {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
- {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first
- {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second
-}
-
-func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
- var m [16]uint64
- c0, c1 := c[0], c[1]
-
- for i := 0; i < len(blocks); {
- c0 += BlockSize
- if c0 < BlockSize {
- c1++
- }
-
- v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]
- v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7]
- v12 ^= c0
- v13 ^= c1
- v14 ^= flag
-
- for j := range m {
- m[j] = binary.LittleEndian.Uint64(blocks[i:])
- i += 8
- }
-
- for j := range precomputed {
- s := &(precomputed[j])
-
- v0 += m[s[0]]
- v0 += v4
- v12 ^= v0
- v12 = bits.RotateLeft64(v12, -32)
- v8 += v12
- v4 ^= v8
- v4 = bits.RotateLeft64(v4, -24)
- v1 += m[s[1]]
- v1 += v5
- v13 ^= v1
- v13 = bits.RotateLeft64(v13, -32)
- v9 += v13
- v5 ^= v9
- v5 = bits.RotateLeft64(v5, -24)
- v2 += m[s[2]]
- v2 += v6
- v14 ^= v2
- v14 = bits.RotateLeft64(v14, -32)
- v10 += v14
- v6 ^= v10
- v6 = bits.RotateLeft64(v6, -24)
- v3 += m[s[3]]
- v3 += v7
- v15 ^= v3
- v15 = bits.RotateLeft64(v15, -32)
- v11 += v15
- v7 ^= v11
- v7 = bits.RotateLeft64(v7, -24)
-
- v0 += m[s[4]]
- v0 += v4
- v12 ^= v0
- v12 = bits.RotateLeft64(v12, -16)
- v8 += v12
- v4 ^= v8
- v4 = bits.RotateLeft64(v4, -63)
- v1 += m[s[5]]
- v1 += v5
- v13 ^= v1
- v13 = bits.RotateLeft64(v13, -16)
- v9 += v13
- v5 ^= v9
- v5 = bits.RotateLeft64(v5, -63)
- v2 += m[s[6]]
- v2 += v6
- v14 ^= v2
- v14 = bits.RotateLeft64(v14, -16)
- v10 += v14
- v6 ^= v10
- v6 = bits.RotateLeft64(v6, -63)
- v3 += m[s[7]]
- v3 += v7
- v15 ^= v3
- v15 = bits.RotateLeft64(v15, -16)
- v11 += v15
- v7 ^= v11
- v7 = bits.RotateLeft64(v7, -63)
-
- v0 += m[s[8]]
- v0 += v5
- v15 ^= v0
- v15 = bits.RotateLeft64(v15, -32)
- v10 += v15
- v5 ^= v10
- v5 = bits.RotateLeft64(v5, -24)
- v1 += m[s[9]]
- v1 += v6
- v12 ^= v1
- v12 = bits.RotateLeft64(v12, -32)
- v11 += v12
- v6 ^= v11
- v6 = bits.RotateLeft64(v6, -24)
- v2 += m[s[10]]
- v2 += v7
- v13 ^= v2
- v13 = bits.RotateLeft64(v13, -32)
- v8 += v13
- v7 ^= v8
- v7 = bits.RotateLeft64(v7, -24)
- v3 += m[s[11]]
- v3 += v4
- v14 ^= v3
- v14 = bits.RotateLeft64(v14, -32)
- v9 += v14
- v4 ^= v9
- v4 = bits.RotateLeft64(v4, -24)
-
- v0 += m[s[12]]
- v0 += v5
- v15 ^= v0
- v15 = bits.RotateLeft64(v15, -16)
- v10 += v15
- v5 ^= v10
- v5 = bits.RotateLeft64(v5, -63)
- v1 += m[s[13]]
- v1 += v6
- v12 ^= v1
- v12 = bits.RotateLeft64(v12, -16)
- v11 += v12
- v6 ^= v11
- v6 = bits.RotateLeft64(v6, -63)
- v2 += m[s[14]]
- v2 += v7
- v13 ^= v2
- v13 = bits.RotateLeft64(v13, -16)
- v8 += v13
- v7 ^= v8
- v7 = bits.RotateLeft64(v7, -63)
- v3 += m[s[15]]
- v3 += v4
- v14 ^= v3
- v14 = bits.RotateLeft64(v14, -16)
- v9 += v14
- v4 ^= v9
- v4 = bits.RotateLeft64(v4, -63)
-
- }
-
- h[0] ^= v0 ^ v8
- h[1] ^= v1 ^ v9
- h[2] ^= v2 ^ v10
- h[3] ^= v3 ^ v11
- h[4] ^= v4 ^ v12
- h[5] ^= v5 ^ v13
- h[6] ^= v6 ^ v14
- h[7] ^= v7 ^ v15
- }
- c[0], c[1] = c0, c1
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
deleted file mode 100644
index 6e28668..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || purego || !gc
-
-package blake2b
-
-func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
- hashBlocksGeneric(h, c, flag, blocks)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2x.go
deleted file mode 100644
index 52c414d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/blake2x.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package blake2b
-
-import (
- "encoding/binary"
- "errors"
- "io"
-)
-
-// XOF defines the interface to hash functions that
-// support arbitrary-length output.
-type XOF interface {
- // Write absorbs more data into the hash's state. It panics if called
- // after Read.
- io.Writer
-
- // Read reads more output from the hash. It returns io.EOF if the limit
- // has been reached.
- io.Reader
-
- // Clone returns a copy of the XOF in its current state.
- Clone() XOF
-
- // Reset resets the XOF to its initial state.
- Reset()
-}
-
-// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
-// the length of the output is not known in advance.
-const OutputLengthUnknown = 0
-
-// magicUnknownOutputLength is a magic value for the output size that indicates
-// an unknown number of output bytes.
-const magicUnknownOutputLength = (1 << 32) - 1
-
-// maxOutputLength is the absolute maximum number of bytes to produce when the
-// number of output bytes is unknown.
-const maxOutputLength = (1 << 32) * 64
-
-// NewXOF creates a new variable-output-length hash. The hash either produce a
-// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes
-// (size == OutputLengthUnknown). In the latter case, an absolute limit of
-// 256GiB applies.
-//
-// A non-nil key turns the hash into a MAC. The key must between
-// zero and 32 bytes long.
-func NewXOF(size uint32, key []byte) (XOF, error) {
- if len(key) > Size {
- return nil, errKeySize
- }
- if size == magicUnknownOutputLength {
- // 2^32-1 indicates an unknown number of bytes and thus isn't a
- // valid length.
- return nil, errors.New("blake2b: XOF length too large")
- }
- if size == OutputLengthUnknown {
- size = magicUnknownOutputLength
- }
- x := &xof{
- d: digest{
- size: Size,
- keyLen: len(key),
- },
- length: size,
- }
- copy(x.d.key[:], key)
- x.Reset()
- return x, nil
-}
-
-type xof struct {
- d digest
- length uint32
- remaining uint64
- cfg, root, block [Size]byte
- offset int
- nodeOffset uint32
- readMode bool
-}
-
-func (x *xof) Write(p []byte) (n int, err error) {
- if x.readMode {
- panic("blake2b: write to XOF after read")
- }
- return x.d.Write(p)
-}
-
-func (x *xof) Clone() XOF {
- clone := *x
- return &clone
-}
-
-func (x *xof) Reset() {
- x.cfg[0] = byte(Size)
- binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length
- binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length
- x.cfg[17] = byte(Size) // inner hash size
-
- x.d.Reset()
- x.d.h[1] ^= uint64(x.length) << 32
-
- x.remaining = uint64(x.length)
- if x.remaining == magicUnknownOutputLength {
- x.remaining = maxOutputLength
- }
- x.offset, x.nodeOffset = 0, 0
- x.readMode = false
-}
-
-func (x *xof) Read(p []byte) (n int, err error) {
- if !x.readMode {
- x.d.finalize(&x.root)
- x.readMode = true
- }
-
- if x.remaining == 0 {
- return 0, io.EOF
- }
-
- n = len(p)
- if uint64(n) > x.remaining {
- n = int(x.remaining)
- p = p[:n]
- }
-
- if x.offset > 0 {
- blockRemaining := Size - x.offset
- if n < blockRemaining {
- x.offset += copy(p, x.block[x.offset:])
- x.remaining -= uint64(n)
- return
- }
- copy(p, x.block[x.offset:])
- p = p[blockRemaining:]
- x.offset = 0
- x.remaining -= uint64(blockRemaining)
- }
-
- for len(p) >= Size {
- binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
- x.nodeOffset++
-
- x.d.initConfig(&x.cfg)
- x.d.Write(x.root[:])
- x.d.finalize(&x.block)
-
- copy(p, x.block[:])
- p = p[Size:]
- x.remaining -= uint64(Size)
- }
-
- if todo := len(p); todo > 0 {
- if x.remaining < uint64(Size) {
- x.cfg[0] = byte(x.remaining)
- }
- binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
- x.nodeOffset++
-
- x.d.initConfig(&x.cfg)
- x.d.Write(x.root[:])
- x.d.finalize(&x.block)
-
- x.offset = copy(p, x.block[:todo])
- x.remaining -= uint64(todo)
- }
- return
-}
-
-func (d *digest) initConfig(cfg *[Size]byte) {
- d.offset, d.c[0], d.c[1] = 0, 0, 0
- for i := range d.h {
- d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:])
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/register.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/register.go
deleted file mode 100644
index 54e446e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/blake2b/register.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package blake2b
-
-import (
- "crypto"
- "hash"
-)
-
-func init() {
- newHash256 := func() hash.Hash {
- h, _ := New256(nil)
- return h
- }
- newHash384 := func() hash.Hash {
- h, _ := New384(nil)
- return h
- }
-
- newHash512 := func() hash.Hash {
- h, _ := New512(nil)
- return h
- }
-
- crypto.RegisterHash(crypto.BLAKE2b_256, newHash256)
- crypto.RegisterHash(crypto.BLAKE2b_384, newHash384)
- crypto.RegisterHash(crypto.BLAKE2b_512, newHash512)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519.go
deleted file mode 100644
index 00f963e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package curve25519 provides an implementation of the X25519 function, which
-// performs scalar multiplication on the elliptic curve known as Curve25519.
-// See RFC 7748.
-//
-// Starting in Go 1.20, this package is a wrapper for the X25519 implementation
-// in the crypto/ecdh package.
-package curve25519 // import "golang.org/x/crypto/curve25519"
-
-// ScalarMult sets dst to the product scalar * point.
-//
-// Deprecated: when provided a low-order point, ScalarMult will set dst to all
-// zeroes, irrespective of the scalar. Instead, use the X25519 function, which
-// will return an error.
-func ScalarMult(dst, scalar, point *[32]byte) {
- scalarMult(dst, scalar, point)
-}
-
-// ScalarBaseMult sets dst to the product scalar * base where base is the
-// standard generator.
-//
-// It is recommended to use the X25519 function with Basepoint instead, as
-// copying into fixed size arrays can lead to unexpected bugs.
-func ScalarBaseMult(dst, scalar *[32]byte) {
- scalarBaseMult(dst, scalar)
-}
-
-const (
- // ScalarSize is the size of the scalar input to X25519.
- ScalarSize = 32
- // PointSize is the size of the point input to X25519.
- PointSize = 32
-)
-
-// Basepoint is the canonical Curve25519 generator.
-var Basepoint []byte
-
-var basePoint = [32]byte{9}
-
-func init() { Basepoint = basePoint[:] }
-
-// X25519 returns the result of the scalar multiplication (scalar * point),
-// according to RFC 7748, Section 5. scalar, point and the return value are
-// slices of 32 bytes.
-//
-// scalar can be generated at random, for example with crypto/rand. point should
-// be either Basepoint or the output of another X25519 call.
-//
-// If point is Basepoint (but not if it's a different slice with the same
-// contents) a precomputed implementation might be used for performance.
-func X25519(scalar, point []byte) ([]byte, error) {
- // Outline the body of function, to let the allocation be inlined in the
- // caller, and possibly avoid escaping to the heap.
- var dst [32]byte
- return x25519(&dst, scalar, point)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go
deleted file mode 100644
index ba647e8..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519_compat.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.20
-
-package curve25519
-
-import (
- "crypto/subtle"
- "errors"
- "strconv"
-
- "golang.org/x/crypto/curve25519/internal/field"
-)
-
-func scalarMult(dst, scalar, point *[32]byte) {
- var e [32]byte
-
- copy(e[:], scalar[:])
- e[0] &= 248
- e[31] &= 127
- e[31] |= 64
-
- var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element
- x1.SetBytes(point[:])
- x2.One()
- x3.Set(&x1)
- z3.One()
-
- swap := 0
- for pos := 254; pos >= 0; pos-- {
- b := e[pos/8] >> uint(pos&7)
- b &= 1
- swap ^= int(b)
- x2.Swap(&x3, swap)
- z2.Swap(&z3, swap)
- swap = int(b)
-
- tmp0.Subtract(&x3, &z3)
- tmp1.Subtract(&x2, &z2)
- x2.Add(&x2, &z2)
- z2.Add(&x3, &z3)
- z3.Multiply(&tmp0, &x2)
- z2.Multiply(&z2, &tmp1)
- tmp0.Square(&tmp1)
- tmp1.Square(&x2)
- x3.Add(&z3, &z2)
- z2.Subtract(&z3, &z2)
- x2.Multiply(&tmp1, &tmp0)
- tmp1.Subtract(&tmp1, &tmp0)
- z2.Square(&z2)
-
- z3.Mult32(&tmp1, 121666)
- x3.Square(&x3)
- tmp0.Add(&tmp0, &z3)
- z3.Multiply(&x1, &z2)
- z2.Multiply(&tmp1, &tmp0)
- }
-
- x2.Swap(&x3, swap)
- z2.Swap(&z3, swap)
-
- z2.Invert(&z2)
- x2.Multiply(&x2, &z2)
- copy(dst[:], x2.Bytes())
-}
-
-func scalarBaseMult(dst, scalar *[32]byte) {
- checkBasepoint()
- scalarMult(dst, scalar, &basePoint)
-}
-
-func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
- var in [32]byte
- if l := len(scalar); l != 32 {
- return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
- }
- if l := len(point); l != 32 {
- return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
- }
- copy(in[:], scalar)
- if &point[0] == &Basepoint[0] {
- scalarBaseMult(dst, &in)
- } else {
- var base, zero [32]byte
- copy(base[:], point)
- scalarMult(dst, &in, &base)
- if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
- return nil, errors.New("bad input point: low order point")
- }
- }
- return dst[:], nil
-}
-
-func checkBasepoint() {
- if subtle.ConstantTimeCompare(Basepoint, []byte{
- 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- }) != 1 {
- panic("curve25519: global Basepoint value was modified")
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go
deleted file mode 100644
index 627df49..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/curve25519_go120.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-
-package curve25519
-
-import "crypto/ecdh"
-
-func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
- curve := ecdh.X25519()
- pub, err := curve.NewPublicKey(point)
- if err != nil {
- return nil, err
- }
- priv, err := curve.NewPrivateKey(scalar)
- if err != nil {
- return nil, err
- }
- out, err := priv.ECDH(pub)
- if err != nil {
- return nil, err
- }
- copy(dst[:], out)
- return dst[:], nil
-}
-
-func scalarMult(dst, scalar, point *[32]byte) {
- if _, err := x25519(dst, scalar[:], point[:]); err != nil {
- // The only error condition for x25519 when the inputs are 32 bytes long
- // is if the output would have been the all-zero value.
- for i := range dst {
- dst[i] = 0
- }
- }
-}
-
-func scalarBaseMult(dst, scalar *[32]byte) {
- curve := ecdh.X25519()
- priv, err := curve.NewPrivateKey(scalar[:])
- if err != nil {
- panic("curve25519: internal error: scalarBaseMult was not 32 bytes")
- }
- copy(dst[:], priv.PublicKey().Bytes())
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/README b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/README
deleted file mode 100644
index e25bca7..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/README
+++ /dev/null
@@ -1,7 +0,0 @@
-This package is kept in sync with crypto/ed25519/internal/edwards25519/field in
-the standard library.
-
-If there are any changes in the standard library that need to be synced to this
-package, run sync.sh. It will not overwrite any local changes made since the
-previous sync, so it's ok to land changes in this package first, and then sync
-to the standard library later.
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
deleted file mode 100644
index ca841ad..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package field implements fast arithmetic modulo 2^255-19.
-package field
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "math/bits"
-)
-
-// Element represents an element of the field GF(2^255-19). Note that this
-// is not a cryptographically secure group, and should only be used to interact
-// with edwards25519.Point coordinates.
-//
-// This type works similarly to math/big.Int, and all arguments and receivers
-// are allowed to alias.
-//
-// The zero value is a valid zero element.
-type Element struct {
- // An element t represents the integer
- // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
- //
- // Between operations, all limbs are expected to be lower than 2^52.
- l0 uint64
- l1 uint64
- l2 uint64
- l3 uint64
- l4 uint64
-}
-
-const maskLow51Bits uint64 = (1 << 51) - 1
-
-var feZero = &Element{0, 0, 0, 0, 0}
-
-// Zero sets v = 0, and returns v.
-func (v *Element) Zero() *Element {
- *v = *feZero
- return v
-}
-
-var feOne = &Element{1, 0, 0, 0, 0}
-
-// One sets v = 1, and returns v.
-func (v *Element) One() *Element {
- *v = *feOne
- return v
-}
-
-// reduce reduces v modulo 2^255 - 19 and returns it.
-func (v *Element) reduce() *Element {
- v.carryPropagate()
-
- // After the light reduction we now have a field element representation
- // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
-
- // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
- // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
- c := (v.l0 + 19) >> 51
- c = (v.l1 + c) >> 51
- c = (v.l2 + c) >> 51
- c = (v.l3 + c) >> 51
- c = (v.l4 + c) >> 51
-
- // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
- // effectively applying the reduction identity to the carry.
- v.l0 += 19 * c
-
- v.l1 += v.l0 >> 51
- v.l0 = v.l0 & maskLow51Bits
- v.l2 += v.l1 >> 51
- v.l1 = v.l1 & maskLow51Bits
- v.l3 += v.l2 >> 51
- v.l2 = v.l2 & maskLow51Bits
- v.l4 += v.l3 >> 51
- v.l3 = v.l3 & maskLow51Bits
- // no additional carry
- v.l4 = v.l4 & maskLow51Bits
-
- return v
-}
-
-// Add sets v = a + b, and returns v.
-func (v *Element) Add(a, b *Element) *Element {
- v.l0 = a.l0 + b.l0
- v.l1 = a.l1 + b.l1
- v.l2 = a.l2 + b.l2
- v.l3 = a.l3 + b.l3
- v.l4 = a.l4 + b.l4
- // Using the generic implementation here is actually faster than the
- // assembly. Probably because the body of this function is so simple that
- // the compiler can figure out better optimizations by inlining the carry
- // propagation. TODO
- return v.carryPropagateGeneric()
-}
-
-// Subtract sets v = a - b, and returns v.
-func (v *Element) Subtract(a, b *Element) *Element {
- // We first add 2 * p, to guarantee the subtraction won't underflow, and
- // then subtract b (which can be up to 2^255 + 2^13 * 19).
- v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
- v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
- v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
- v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
- v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
- return v.carryPropagate()
-}
-
-// Negate sets v = -a, and returns v.
-func (v *Element) Negate(a *Element) *Element {
- return v.Subtract(feZero, a)
-}
-
-// Invert sets v = 1/z mod p, and returns v.
-//
-// If z == 0, Invert returns v = 0.
-func (v *Element) Invert(z *Element) *Element {
- // Inversion is implemented as exponentiation with exponent p − 2. It uses the
- // same sequence of 255 squarings and 11 multiplications as [Curve25519].
- var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
-
- z2.Square(z) // 2
- t.Square(&z2) // 4
- t.Square(&t) // 8
- z9.Multiply(&t, z) // 9
- z11.Multiply(&z9, &z2) // 11
- t.Square(&z11) // 22
- z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
-
- t.Square(&z2_5_0) // 2^6 - 2^1
- for i := 0; i < 4; i++ {
- t.Square(&t) // 2^10 - 2^5
- }
- z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
-
- t.Square(&z2_10_0) // 2^11 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^20 - 2^10
- }
- z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
-
- t.Square(&z2_20_0) // 2^21 - 2^1
- for i := 0; i < 19; i++ {
- t.Square(&t) // 2^40 - 2^20
- }
- t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
-
- t.Square(&t) // 2^41 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^50 - 2^10
- }
- z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
-
- t.Square(&z2_50_0) // 2^51 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^100 - 2^50
- }
- z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
-
- t.Square(&z2_100_0) // 2^101 - 2^1
- for i := 0; i < 99; i++ {
- t.Square(&t) // 2^200 - 2^100
- }
- t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
-
- t.Square(&t) // 2^201 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^250 - 2^50
- }
- t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
-
- t.Square(&t) // 2^251 - 2^1
- t.Square(&t) // 2^252 - 2^2
- t.Square(&t) // 2^253 - 2^3
- t.Square(&t) // 2^254 - 2^4
- t.Square(&t) // 2^255 - 2^5
-
- return v.Multiply(&t, &z11) // 2^255 - 21
-}
-
-// Set sets v = a, and returns v.
-func (v *Element) Set(a *Element) *Element {
- *v = *a
- return v
-}
-
-// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
-//
-// Consistent with RFC 7748, the most significant bit (the high bit of the
-// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
-// are accepted. Note that this is laxer than specified by RFC 8032.
-func (v *Element) SetBytes(x []byte) *Element {
- if len(x) != 32 {
- panic("edwards25519: invalid field element input size")
- }
-
- // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
- v.l0 = binary.LittleEndian.Uint64(x[0:8])
- v.l0 &= maskLow51Bits
- // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
- v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
- v.l1 &= maskLow51Bits
- // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
- v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
- v.l2 &= maskLow51Bits
- // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
- v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
- v.l3 &= maskLow51Bits
- // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
- // Note: not bytes 25:33, shift 4, to avoid overread.
- v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
- v.l4 &= maskLow51Bits
-
- return v
-}
-
-// Bytes returns the canonical 32-byte little-endian encoding of v.
-func (v *Element) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var out [32]byte
- return v.bytes(&out)
-}
-
-func (v *Element) bytes(out *[32]byte) []byte {
- t := *v
- t.reduce()
-
- var buf [8]byte
- for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
- bitsOffset := i * 51
- binary.LittleEndian.PutUint64(buf[:], l<= len(out) {
- break
- }
- out[off] |= bb
- }
- }
-
- return out[:]
-}
-
-// Equal returns 1 if v and u are equal, and 0 otherwise.
-func (v *Element) Equal(u *Element) int {
- sa, sv := u.Bytes(), v.Bytes()
- return subtle.ConstantTimeCompare(sa, sv)
-}
-
-// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
-func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
-
-// Select sets v to a if cond == 1, and to b if cond == 0.
-func (v *Element) Select(a, b *Element, cond int) *Element {
- m := mask64Bits(cond)
- v.l0 = (m & a.l0) | (^m & b.l0)
- v.l1 = (m & a.l1) | (^m & b.l1)
- v.l2 = (m & a.l2) | (^m & b.l2)
- v.l3 = (m & a.l3) | (^m & b.l3)
- v.l4 = (m & a.l4) | (^m & b.l4)
- return v
-}
-
-// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
-func (v *Element) Swap(u *Element, cond int) {
- m := mask64Bits(cond)
- t := m & (v.l0 ^ u.l0)
- v.l0 ^= t
- u.l0 ^= t
- t = m & (v.l1 ^ u.l1)
- v.l1 ^= t
- u.l1 ^= t
- t = m & (v.l2 ^ u.l2)
- v.l2 ^= t
- u.l2 ^= t
- t = m & (v.l3 ^ u.l3)
- v.l3 ^= t
- u.l3 ^= t
- t = m & (v.l4 ^ u.l4)
- v.l4 ^= t
- u.l4 ^= t
-}
-
-// IsNegative returns 1 if v is negative, and 0 otherwise.
-func (v *Element) IsNegative() int {
- return int(v.Bytes()[0] & 1)
-}
-
-// Absolute sets v to |u|, and returns v.
-func (v *Element) Absolute(u *Element) *Element {
- return v.Select(new(Element).Negate(u), u, u.IsNegative())
-}
-
-// Multiply sets v = x * y, and returns v.
-func (v *Element) Multiply(x, y *Element) *Element {
- feMul(v, x, y)
- return v
-}
-
-// Square sets v = x * x, and returns v.
-func (v *Element) Square(x *Element) *Element {
- feSquare(v, x)
- return v
-}
-
-// Mult32 sets v = x * y, and returns v.
-func (v *Element) Mult32(x *Element, y uint32) *Element {
- x0lo, x0hi := mul51(x.l0, y)
- x1lo, x1hi := mul51(x.l1, y)
- x2lo, x2hi := mul51(x.l2, y)
- x3lo, x3hi := mul51(x.l3, y)
- x4lo, x4hi := mul51(x.l4, y)
- v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
- v.l1 = x1lo + x0hi
- v.l2 = x2lo + x1hi
- v.l3 = x3lo + x2hi
- v.l4 = x4lo + x3hi
- // The hi portions are going to be only 32 bits, plus any previous excess,
- // so we can skip the carry propagation.
- return v
-}
-
-// mul51 returns lo + hi * 2⁵¹ = a * b.
-func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
- mh, ml := bits.Mul64(a, uint64(b))
- lo = ml & maskLow51Bits
- hi = (mh << 13) | (ml >> 51)
- return
-}
-
-// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
-func (v *Element) Pow22523(x *Element) *Element {
- var t0, t1, t2 Element
-
- t0.Square(x) // x^2
- t1.Square(&t0) // x^4
- t1.Square(&t1) // x^8
- t1.Multiply(x, &t1) // x^9
- t0.Multiply(&t0, &t1) // x^11
- t0.Square(&t0) // x^22
- t0.Multiply(&t1, &t0) // x^31
- t1.Square(&t0) // x^62
- for i := 1; i < 5; i++ { // x^992
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
- t1.Square(&t0) // 2^11 - 2
- for i := 1; i < 10; i++ { // 2^20 - 2^10
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^20 - 1
- t2.Square(&t1) // 2^21 - 2
- for i := 1; i < 20; i++ { // 2^40 - 2^20
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^40 - 1
- t1.Square(&t1) // 2^41 - 2
- for i := 1; i < 10; i++ { // 2^50 - 2^10
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^50 - 1
- t1.Square(&t0) // 2^51 - 2
- for i := 1; i < 50; i++ { // 2^100 - 2^50
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^100 - 1
- t2.Square(&t1) // 2^101 - 2
- for i := 1; i < 100; i++ { // 2^200 - 2^100
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^200 - 1
- t1.Square(&t1) // 2^201 - 2
- for i := 1; i < 50; i++ { // 2^250 - 2^50
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^250 - 1
- t0.Square(&t0) // 2^251 - 2
- t0.Square(&t0) // 2^252 - 4
- return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
-}
-
-// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
-var sqrtM1 = &Element{1718705420411056, 234908883556509,
- 2233514472574048, 2117202627021982, 765476049583133}
-
-// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
-//
-// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
-// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
-// and returns r and 0.
-func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
- var a, b Element
-
- // r = (u * v3) * (u * v7)^((p-5)/8)
- v2 := a.Square(v)
- uv3 := b.Multiply(u, b.Multiply(v2, v))
- uv7 := a.Multiply(uv3, a.Square(v2))
- r.Multiply(uv3, r.Pow22523(uv7))
-
- check := a.Multiply(v, a.Square(r)) // check = v * r^2
-
- uNeg := b.Negate(u)
- correctSignSqrt := check.Equal(u)
- flippedSignSqrt := check.Equal(uNeg)
- flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
-
- rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
- // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
- r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
-
- r.Absolute(r) // Choose the nonnegative square root.
- return r, correctSignSqrt | flippedSignSqrt
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
deleted file mode 100644
index 70c5416..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-//go:build amd64 && gc && !purego
-
-package field
-
-// feMul sets out = a * b. It works like feMulGeneric.
-//
-//go:noescape
-func feMul(out *Element, a *Element, b *Element)
-
-// feSquare sets out = a * a. It works like feSquareGeneric.
-//
-//go:noescape
-func feSquare(out *Element, a *Element)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
deleted file mode 100644
index 60817ac..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s
+++ /dev/null
@@ -1,378 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-//go:build amd64 && gc && !purego
-
-#include "textflag.h"
-
-// func feMul(out *Element, a *Element, b *Element)
-TEXT ·feMul(SB), NOSPLIT, $0-24
- MOVQ a+8(FP), CX
- MOVQ b+16(FP), BX
-
- // r0 = a0×b0
- MOVQ (CX), AX
- MULQ (BX)
- MOVQ AX, DI
- MOVQ DX, SI
-
- // r0 += 19×a1×b4
- MOVQ 8(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a2×b3
- MOVQ 16(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a3×b2
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 16(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a4×b1
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 8(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r1 = a0×b1
- MOVQ (CX), AX
- MULQ 8(BX)
- MOVQ AX, R9
- MOVQ DX, R8
-
- // r1 += a1×b0
- MOVQ 8(CX), AX
- MULQ (BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a2×b4
- MOVQ 16(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a3×b3
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a4×b2
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 16(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r2 = a0×b2
- MOVQ (CX), AX
- MULQ 16(BX)
- MOVQ AX, R11
- MOVQ DX, R10
-
- // r2 += a1×b1
- MOVQ 8(CX), AX
- MULQ 8(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += a2×b0
- MOVQ 16(CX), AX
- MULQ (BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += 19×a3×b4
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += 19×a4×b3
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r3 = a0×b3
- MOVQ (CX), AX
- MULQ 24(BX)
- MOVQ AX, R13
- MOVQ DX, R12
-
- // r3 += a1×b2
- MOVQ 8(CX), AX
- MULQ 16(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += a2×b1
- MOVQ 16(CX), AX
- MULQ 8(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += a3×b0
- MOVQ 24(CX), AX
- MULQ (BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += 19×a4×b4
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r4 = a0×b4
- MOVQ (CX), AX
- MULQ 32(BX)
- MOVQ AX, R15
- MOVQ DX, R14
-
- // r4 += a1×b3
- MOVQ 8(CX), AX
- MULQ 24(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a2×b2
- MOVQ 16(CX), AX
- MULQ 16(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a3×b1
- MOVQ 24(CX), AX
- MULQ 8(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a4×b0
- MOVQ 32(CX), AX
- MULQ (BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // First reduction chain
- MOVQ $0x0007ffffffffffff, AX
- SHLQ $0x0d, DI, SI
- SHLQ $0x0d, R9, R8
- SHLQ $0x0d, R11, R10
- SHLQ $0x0d, R13, R12
- SHLQ $0x0d, R15, R14
- ANDQ AX, DI
- IMUL3Q $0x13, R14, R14
- ADDQ R14, DI
- ANDQ AX, R9
- ADDQ SI, R9
- ANDQ AX, R11
- ADDQ R8, R11
- ANDQ AX, R13
- ADDQ R10, R13
- ANDQ AX, R15
- ADDQ R12, R15
-
- // Second reduction chain (carryPropagate)
- MOVQ DI, SI
- SHRQ $0x33, SI
- MOVQ R9, R8
- SHRQ $0x33, R8
- MOVQ R11, R10
- SHRQ $0x33, R10
- MOVQ R13, R12
- SHRQ $0x33, R12
- MOVQ R15, R14
- SHRQ $0x33, R14
- ANDQ AX, DI
- IMUL3Q $0x13, R14, R14
- ADDQ R14, DI
- ANDQ AX, R9
- ADDQ SI, R9
- ANDQ AX, R11
- ADDQ R8, R11
- ANDQ AX, R13
- ADDQ R10, R13
- ANDQ AX, R15
- ADDQ R12, R15
-
- // Store output
- MOVQ out+0(FP), AX
- MOVQ DI, (AX)
- MOVQ R9, 8(AX)
- MOVQ R11, 16(AX)
- MOVQ R13, 24(AX)
- MOVQ R15, 32(AX)
- RET
-
-// func feSquare(out *Element, a *Element)
-TEXT ·feSquare(SB), NOSPLIT, $0-16
- MOVQ a+8(FP), CX
-
- // r0 = l0×l0
- MOVQ (CX), AX
- MULQ (CX)
- MOVQ AX, SI
- MOVQ DX, BX
-
- // r0 += 38×l1×l4
- MOVQ 8(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, SI
- ADCQ DX, BX
-
- // r0 += 38×l2×l3
- MOVQ 16(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 24(CX)
- ADDQ AX, SI
- ADCQ DX, BX
-
- // r1 = 2×l0×l1
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 8(CX)
- MOVQ AX, R8
- MOVQ DX, DI
-
- // r1 += 38×l2×l4
- MOVQ 16(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, R8
- ADCQ DX, DI
-
- // r1 += 19×l3×l3
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(CX)
- ADDQ AX, R8
- ADCQ DX, DI
-
- // r2 = 2×l0×l2
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 16(CX)
- MOVQ AX, R10
- MOVQ DX, R9
-
- // r2 += l1×l1
- MOVQ 8(CX), AX
- MULQ 8(CX)
- ADDQ AX, R10
- ADCQ DX, R9
-
- // r2 += 38×l3×l4
- MOVQ 24(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, R10
- ADCQ DX, R9
-
- // r3 = 2×l0×l3
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 24(CX)
- MOVQ AX, R12
- MOVQ DX, R11
-
- // r3 += 2×l1×l2
- MOVQ 8(CX), AX
- IMUL3Q $0x02, AX, AX
- MULQ 16(CX)
- ADDQ AX, R12
- ADCQ DX, R11
-
- // r3 += 19×l4×l4
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(CX)
- ADDQ AX, R12
- ADCQ DX, R11
-
- // r4 = 2×l0×l4
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 32(CX)
- MOVQ AX, R14
- MOVQ DX, R13
-
- // r4 += 2×l1×l3
- MOVQ 8(CX), AX
- IMUL3Q $0x02, AX, AX
- MULQ 24(CX)
- ADDQ AX, R14
- ADCQ DX, R13
-
- // r4 += l2×l2
- MOVQ 16(CX), AX
- MULQ 16(CX)
- ADDQ AX, R14
- ADCQ DX, R13
-
- // First reduction chain
- MOVQ $0x0007ffffffffffff, AX
- SHLQ $0x0d, SI, BX
- SHLQ $0x0d, R8, DI
- SHLQ $0x0d, R10, R9
- SHLQ $0x0d, R12, R11
- SHLQ $0x0d, R14, R13
- ANDQ AX, SI
- IMUL3Q $0x13, R13, R13
- ADDQ R13, SI
- ANDQ AX, R8
- ADDQ BX, R8
- ANDQ AX, R10
- ADDQ DI, R10
- ANDQ AX, R12
- ADDQ R9, R12
- ANDQ AX, R14
- ADDQ R11, R14
-
- // Second reduction chain (carryPropagate)
- MOVQ SI, BX
- SHRQ $0x33, BX
- MOVQ R8, DI
- SHRQ $0x33, DI
- MOVQ R10, R9
- SHRQ $0x33, R9
- MOVQ R12, R11
- SHRQ $0x33, R11
- MOVQ R14, R13
- SHRQ $0x33, R13
- ANDQ AX, SI
- IMUL3Q $0x13, R13, R13
- ADDQ R13, SI
- ANDQ AX, R8
- ADDQ BX, R8
- ANDQ AX, R10
- ADDQ DI, R10
- ANDQ AX, R12
- ADDQ R9, R12
- ANDQ AX, R14
- ADDQ R11, R14
-
- // Store output
- MOVQ out+0(FP), AX
- MOVQ SI, (AX)
- MOVQ R8, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R12, 24(AX)
- MOVQ R14, 32(AX)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
deleted file mode 100644
index 9da280d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright (c) 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || !gc || purego
-
-package field
-
-func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
-
-func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
deleted file mode 100644
index 075fe9b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright (c) 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build arm64 && gc && !purego
-
-package field
-
-//go:noescape
-func carryPropagate(v *Element)
-
-func (v *Element) carryPropagate() *Element {
- carryPropagate(v)
- return v
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
deleted file mode 100644
index 3126a43..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build arm64 && gc && !purego
-
-#include "textflag.h"
-
-// carryPropagate works exactly like carryPropagateGeneric and uses the
-// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
-// avoids loading R0-R4 twice and uses LDP and STP.
-//
-// See https://golang.org/issues/43145 for the main compiler issue.
-//
-// func carryPropagate(v *Element)
-TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
- MOVD v+0(FP), R20
-
- LDP 0(R20), (R0, R1)
- LDP 16(R20), (R2, R3)
- MOVD 32(R20), R4
-
- AND $0x7ffffffffffff, R0, R10
- AND $0x7ffffffffffff, R1, R11
- AND $0x7ffffffffffff, R2, R12
- AND $0x7ffffffffffff, R3, R13
- AND $0x7ffffffffffff, R4, R14
-
- ADD R0>>51, R11, R11
- ADD R1>>51, R12, R12
- ADD R2>>51, R13, R13
- ADD R3>>51, R14, R14
- // R4>>51 * 19 + R10 -> R10
- LSR $51, R4, R21
- MOVD $19, R22
- MADD R22, R10, R21, R10
-
- STP (R10, R11), 0(R20)
- STP (R12, R13), 16(R20)
- MOVD R14, 32(R20)
-
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
deleted file mode 100644
index fc029ac..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright (c) 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !arm64 || !gc || purego
-
-package field
-
-func (v *Element) carryPropagate() *Element {
- return v.carryPropagateGeneric()
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
deleted file mode 100644
index 2671217..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package field
-
-import "math/bits"
-
-// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
-// bits.Mul64 and bits.Add64 intrinsics.
-type uint128 struct {
- lo, hi uint64
-}
-
-// mul64 returns a * b.
-func mul64(a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- return uint128{lo, hi}
-}
-
-// addMul64 returns v + a * b.
-func addMul64(v uint128, a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- lo, c := bits.Add64(lo, v.lo, 0)
- hi, _ = bits.Add64(hi, v.hi, c)
- return uint128{lo, hi}
-}
-
-// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
-func shiftRightBy51(a uint128) uint64 {
- return (a.hi << (64 - 51)) | (a.lo >> 51)
-}
-
-func feMulGeneric(v, a, b *Element) {
- a0 := a.l0
- a1 := a.l1
- a2 := a.l2
- a3 := a.l3
- a4 := a.l4
-
- b0 := b.l0
- b1 := b.l1
- b2 := b.l2
- b3 := b.l3
- b4 := b.l4
-
- // Limb multiplication works like pen-and-paper columnar multiplication, but
- // with 51-bit limbs instead of digits.
- //
- // a4 a3 a2 a1 a0 x
- // b4 b3 b2 b1 b0 =
- // ------------------------
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a4b1 a3b1 a2b1 a1b1 a0b1 +
- // a4b2 a3b2 a2b2 a1b2 a0b2 +
- // a4b3 a3b3 a2b3 a1b3 a0b3 +
- // a4b4 a3b4 a2b4 a1b4 a0b4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
- // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
- // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
- //
- // Reduction can be carried out simultaneously to multiplication. For
- // example, we do not compute r5: whenever the result of a multiplication
- // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
- //
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
- // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
- // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
- // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // Finally we add up the columns into wide, overlapping limbs.
-
- a1_19 := a1 * 19
- a2_19 := a2 * 19
- a3_19 := a3 * 19
- a4_19 := a4 * 19
-
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- r0 := mul64(a0, b0)
- r0 = addMul64(r0, a1_19, b4)
- r0 = addMul64(r0, a2_19, b3)
- r0 = addMul64(r0, a3_19, b2)
- r0 = addMul64(r0, a4_19, b1)
-
- // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
- r1 := mul64(a0, b1)
- r1 = addMul64(r1, a1, b0)
- r1 = addMul64(r1, a2_19, b4)
- r1 = addMul64(r1, a3_19, b3)
- r1 = addMul64(r1, a4_19, b2)
-
- // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
- r2 := mul64(a0, b2)
- r2 = addMul64(r2, a1, b1)
- r2 = addMul64(r2, a2, b0)
- r2 = addMul64(r2, a3_19, b4)
- r2 = addMul64(r2, a4_19, b3)
-
- // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
- r3 := mul64(a0, b3)
- r3 = addMul64(r3, a1, b2)
- r3 = addMul64(r3, a2, b1)
- r3 = addMul64(r3, a3, b0)
- r3 = addMul64(r3, a4_19, b4)
-
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- r4 := mul64(a0, b4)
- r4 = addMul64(r4, a1, b3)
- r4 = addMul64(r4, a2, b2)
- r4 = addMul64(r4, a3, b1)
- r4 = addMul64(r4, a4, b0)
-
- // After the multiplication, we need to reduce (carry) the five coefficients
- // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
- // to respect the Element invariant.
- //
- // Overall, the reduction works the same as carryPropagate, except with
- // wider inputs: we take the carry for each coefficient by shifting it right
- // by 51, and add it to the limb above it. The top carry is multiplied by 19
- // according to the reduction identity and added to the lowest limb.
- //
- // The largest coefficient (r0) will be at most 111 bits, which guarantees
- // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
- //
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
- // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
- // r0 < 2⁷ × 2⁵² × 2⁵²
- // r0 < 2¹¹¹
- //
- // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
- // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
- // allows us to easily apply the reduction identity.
- //
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- // r4 < 5 × 2⁵² × 2⁵²
- // r4 < 2¹⁰⁷
- //
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- // Now all coefficients fit into 64-bit registers but are still too large to
- // be passed around as a Element. We therefore do one last carry chain,
- // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-func feSquareGeneric(v, a *Element) {
- l0 := a.l0
- l1 := a.l1
- l2 := a.l2
- l3 := a.l3
- l4 := a.l4
-
- // Squaring works precisely like multiplication above, but thanks to its
- // symmetry we get to group a few terms together.
- //
- // l4 l3 l2 l1 l0 x
- // l4 l3 l2 l1 l0 =
- // ------------------------
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l4l1 l3l1 l2l1 l1l1 l0l1 +
- // l4l2 l3l2 l2l2 l1l2 l0l2 +
- // l4l3 l3l3 l2l3 l1l3 l0l3 +
- // l4l4 l3l4 l2l4 l1l4 l0l4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
- // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
- // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
- // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
- // only three Mul64 and four Add64, instead of five and eight.
-
- l0_2 := l0 * 2
- l1_2 := l1 * 2
-
- l1_38 := l1 * 38
- l2_38 := l2 * 38
- l3_38 := l3 * 38
-
- l3_19 := l3 * 19
- l4_19 := l4 * 19
-
- // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
- r0 := mul64(l0, l0)
- r0 = addMul64(r0, l1_38, l4)
- r0 = addMul64(r0, l2_38, l3)
-
- // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
- r1 := mul64(l0_2, l1)
- r1 = addMul64(r1, l2_38, l4)
- r1 = addMul64(r1, l3_19, l3)
-
- // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
- r2 := mul64(l0_2, l2)
- r2 = addMul64(r2, l1, l1)
- r2 = addMul64(r2, l3_38, l4)
-
- // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
- r3 := mul64(l0_2, l3)
- r3 = addMul64(r3, l1_2, l2)
- r3 = addMul64(r3, l4_19, l4)
-
- // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
- r4 := mul64(l0_2, l4)
- r4 = addMul64(r4, l1_2, l3)
- r4 = addMul64(r4, l2, l2)
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
-// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline
-func (v *Element) carryPropagateGeneric() *Element {
- c0 := v.l0 >> 51
- c1 := v.l1 >> 51
- c2 := v.l2 >> 51
- c3 := v.l3 >> 51
- c4 := v.l4 >> 51
-
- v.l0 = v.l0&maskLow51Bits + c4*19
- v.l1 = v.l1&maskLow51Bits + c0
- v.l2 = v.l2&maskLow51Bits + c1
- v.l3 = v.l3&maskLow51Bits + c2
- v.l4 = v.l4&maskLow51Bits + c3
-
- return v
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint
deleted file mode 100644
index e3685f9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint
+++ /dev/null
@@ -1 +0,0 @@
-b0c49ae9f59d233526f8934262c5bbbe14d4358d
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh
deleted file mode 100644
index 1ba22a8..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#! /bin/bash
-set -euo pipefail
-
-cd "$(git rev-parse --show-toplevel)"
-
-STD_PATH=src/crypto/ed25519/internal/edwards25519/field
-LOCAL_PATH=curve25519/internal/field
-LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint)
-
-git fetch https://go.googlesource.com/go master
-
-if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then
- echo "No changes."
-else
- NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint)
- echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..."
- git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \
- git apply -3 --directory=$LOCAL_PATH
-fi
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/ed25519/ed25519.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/ed25519/ed25519.go
deleted file mode 100644
index a782834..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ed25519 implements the Ed25519 signature algorithm. See
-// https://ed25519.cr.yp.to/.
-//
-// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
-// representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the RFC
-// 8032 private key as the “seed”.
-//
-// Beginning with Go 1.13, the functionality of this package was moved to the
-// standard library as crypto/ed25519. This package only acts as a compatibility
-// wrapper.
-package ed25519
-
-import (
- "crypto/ed25519"
- "io"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-// PublicKey is the type of Ed25519 public keys.
-//
-// This type is an alias for crypto/ed25519's PublicKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PublicKey = ed25519.PublicKey
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-//
-// This type is an alias for crypto/ed25519's PrivateKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PrivateKey = ed25519.PrivateKey
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- return ed25519.GenerateKey(rand)
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- return ed25519.NewKeyFromSeed(seed)
-}
-
-// Sign signs the message with privateKey and returns a signature. It will
-// panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- return ed25519.Sign(privateKey, message)
-}
-
-// Verify reports whether sig is a valid signature of message by publicKey. It
-// will panic if len(publicKey) is not PublicKeySize.
-func Verify(publicKey PublicKey, message, sig []byte) bool {
- return ed25519.Verify(publicKey, message, sig)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/alias/alias.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/alias/alias.go
deleted file mode 100644
index 551ff0c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/alias/alias.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego
-
-// Package alias implements memory aliasing tests.
-package alias
-
-import "unsafe"
-
-// AnyOverlap reports whether x and y share memory at any (not necessarily
-// corresponding) index. The memory beyond the slice length is ignored.
-func AnyOverlap(x, y []byte) bool {
- return len(x) > 0 && len(y) > 0 &&
- uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
- uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
-}
-
-// InexactOverlap reports whether x and y share memory at any non-corresponding
-// index. The memory beyond the slice length is ignored. Note that x and y can
-// have different lengths and still not have any inexact overlap.
-//
-// InexactOverlap can be used to implement the requirements of the crypto/cipher
-// AEAD, Block, BlockMode and Stream interfaces.
-func InexactOverlap(x, y []byte) bool {
- if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
- return false
- }
- return AnyOverlap(x, y)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
deleted file mode 100644
index 6fe61b5..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego
-
-// Package alias implements memory aliasing tests.
-package alias
-
-// This is the Google App Engine standard variant based on reflect
-// because the unsafe package and cgo are disallowed.
-
-import "reflect"
-
-// AnyOverlap reports whether x and y share memory at any (not necessarily
-// corresponding) index. The memory beyond the slice length is ignored.
-func AnyOverlap(x, y []byte) bool {
- return len(x) > 0 && len(y) > 0 &&
- reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
- reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
-}
-
-// InexactOverlap reports whether x and y share memory at any non-corresponding
-// index. The memory beyond the slice length is ignored. Note that x and y can
-// have different lengths and still not have any inexact overlap.
-//
-// InexactOverlap can be used to implement the requirements of the crypto/cipher
-// AEAD, Block, BlockMode and Stream interfaces.
-func InexactOverlap(x, y []byte) bool {
- if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
- return false
- }
- return AnyOverlap(x, y)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
deleted file mode 100644
index 333da28..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
-
-package poly1305
-
-type mac struct{ macGeneric }
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
deleted file mode 100644
index 4aaea81..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package poly1305 implements Poly1305 one-time message authentication code as
-// specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
-//
-// Poly1305 is a fast, one-time authentication function. It is infeasible for an
-// attacker to generate an authenticator for a message without the key. However, a
-// key must only be used for a single message. Authenticating two different
-// messages with the same key allows an attacker to forge authenticators for other
-// messages with the same key.
-//
-// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
-// used with a fixed key in order to generate one-time keys from an nonce.
-// However, in this package AES isn't used and the one-time key is specified
-// directly.
-package poly1305
-
-import "crypto/subtle"
-
-// TagSize is the size, in bytes, of a poly1305 authenticator.
-const TagSize = 16
-
-// Sum generates an authenticator for msg using a one-time key and puts the
-// 16-byte result into out. Authenticating two different messages with the same
-// key allows an attacker to forge messages at will.
-func Sum(out *[16]byte, m []byte, key *[32]byte) {
- h := New(key)
- h.Write(m)
- h.Sum(out[:0])
-}
-
-// Verify returns true if mac is a valid authenticator for m with the given key.
-func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
- var tmp [16]byte
- Sum(&tmp, m, key)
- return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
-}
-
-// New returns a new MAC computing an authentication
-// tag of all data written to it with the given key.
-// This allows writing the message progressively instead
-// of passing it as a single slice. Common users should use
-// the Sum function instead.
-//
-// The key must be unique for each message, as authenticating
-// two different messages with the same key allows an attacker
-// to forge messages at will.
-func New(key *[32]byte) *MAC {
- m := &MAC{}
- initialize(key, &m.macState)
- return m
-}
-
-// MAC is an io.Writer computing an authentication tag
-// of the data written to it.
-//
-// MAC cannot be used like common hash.Hash implementations,
-// because using a poly1305 key twice breaks its security.
-// Therefore writing data to a running MAC after calling
-// Sum or Verify causes it to panic.
-type MAC struct {
- mac // platform-dependent implementation
-
- finalized bool
-}
-
-// Size returns the number of bytes Sum will return.
-func (h *MAC) Size() int { return TagSize }
-
-// Write adds more data to the running message authentication code.
-// It never returns an error.
-//
-// It must not be called after the first call of Sum or Verify.
-func (h *MAC) Write(p []byte) (n int, err error) {
- if h.finalized {
- panic("poly1305: write to MAC after Sum or Verify")
- }
- return h.mac.Write(p)
-}
-
-// Sum computes the authenticator of all data written to the
-// message authentication code.
-func (h *MAC) Sum(b []byte) []byte {
- var mac [TagSize]byte
- h.mac.Sum(&mac)
- h.finalized = true
- return append(b, mac[:]...)
-}
-
-// Verify returns whether the authenticator of all data written to
-// the message authentication code matches the expected value.
-func (h *MAC) Verify(expected []byte) bool {
- var mac [TagSize]byte
- h.mac.Sum(&mac)
- h.finalized = true
- return subtle.ConstantTimeCompare(expected, mac[:]) == 1
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
deleted file mode 100644
index 164cd47..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-package poly1305
-
-//go:noescape
-func update(state *macState, msg []byte)
-
-// mac is a wrapper for macGeneric that redirects calls that would have gone to
-// updateGeneric to update.
-//
-// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
-// using function pointers would carry a major performance cost.
-type mac struct{ macGeneric }
-
-func (h *mac) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- update(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- update(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-func (h *mac) Sum(out *[16]byte) {
- state := h.macState
- if h.offset > 0 {
- update(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
deleted file mode 100644
index e0d3c64..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-#include "textflag.h"
-
-#define POLY1305_ADD(msg, h0, h1, h2) \
- ADDQ 0(msg), h0; \
- ADCQ 8(msg), h1; \
- ADCQ $1, h2; \
- LEAQ 16(msg), msg
-
-#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
- MOVQ r0, AX; \
- MULQ h0; \
- MOVQ AX, t0; \
- MOVQ DX, t1; \
- MOVQ r0, AX; \
- MULQ h1; \
- ADDQ AX, t1; \
- ADCQ $0, DX; \
- MOVQ r0, t2; \
- IMULQ h2, t2; \
- ADDQ DX, t2; \
- \
- MOVQ r1, AX; \
- MULQ h0; \
- ADDQ AX, t1; \
- ADCQ $0, DX; \
- MOVQ DX, h0; \
- MOVQ r1, t3; \
- IMULQ h2, t3; \
- MOVQ r1, AX; \
- MULQ h1; \
- ADDQ AX, t2; \
- ADCQ DX, t3; \
- ADDQ h0, t2; \
- ADCQ $0, t3; \
- \
- MOVQ t0, h0; \
- MOVQ t1, h1; \
- MOVQ t2, h2; \
- ANDQ $3, h2; \
- MOVQ t2, t0; \
- ANDQ $0xFFFFFFFFFFFFFFFC, t0; \
- ADDQ t0, h0; \
- ADCQ t3, h1; \
- ADCQ $0, h2; \
- SHRQ $2, t3, t2; \
- SHRQ $2, t3; \
- ADDQ t2, h0; \
- ADCQ t3, h1; \
- ADCQ $0, h2
-
-// func update(state *[7]uint64, msg []byte)
-TEXT ·update(SB), $0-32
- MOVQ state+0(FP), DI
- MOVQ msg_base+8(FP), SI
- MOVQ msg_len+16(FP), R15
-
- MOVQ 0(DI), R8 // h0
- MOVQ 8(DI), R9 // h1
- MOVQ 16(DI), R10 // h2
- MOVQ 24(DI), R11 // r0
- MOVQ 32(DI), R12 // r1
-
- CMPQ R15, $16
- JB bytes_between_0_and_15
-
-loop:
- POLY1305_ADD(SI, R8, R9, R10)
-
-multiply:
- POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
- SUBQ $16, R15
- CMPQ R15, $16
- JAE loop
-
-bytes_between_0_and_15:
- TESTQ R15, R15
- JZ done
- MOVQ $1, BX
- XORQ CX, CX
- XORQ R13, R13
- ADDQ R15, SI
-
-flush_buffer:
- SHLQ $8, BX, CX
- SHLQ $8, BX
- MOVB -1(SI), R13
- XORQ R13, BX
- DECQ SI
- DECQ R15
- JNZ flush_buffer
-
- ADDQ BX, R8
- ADCQ CX, R9
- ADCQ $0, R10
- MOVQ $16, R15
- JMP multiply
-
-done:
- MOVQ R8, 0(DI)
- MOVQ R9, 8(DI)
- MOVQ R10, 16(DI)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
deleted file mode 100644
index ec2202b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides the generic implementation of Sum and MAC. Other files
-// might provide optimized assembly implementations of some of this code.
-
-package poly1305
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
-// for a 64 bytes message is approximately
-//
-// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
-//
-// for some secret r and s. It can be computed sequentially like
-//
-// for len(msg) > 0:
-// h += read(msg, 16)
-// h *= r
-// h %= 2¹³⁰ - 5
-// return h + s
-//
-// All the complexity is about doing performant constant-time math on numbers
-// larger than any available numeric type.
-
-func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
- h := newMACGeneric(key)
- h.Write(msg)
- h.Sum(out)
-}
-
-func newMACGeneric(key *[32]byte) macGeneric {
- m := macGeneric{}
- initialize(key, &m.macState)
- return m
-}
-
-// macState holds numbers in saturated 64-bit little-endian limbs. That is,
-// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
-type macState struct {
- // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
- // can grow larger during and after rounds. It must, however, remain below
- // 2 * (2¹³⁰ - 5).
- h [3]uint64
- // r and s are the private key components.
- r [2]uint64
- s [2]uint64
-}
-
-type macGeneric struct {
- macState
-
- buffer [TagSize]byte
- offset int
-}
-
-// Write splits the incoming message into TagSize chunks, and passes them to
-// update. It buffers incomplete chunks.
-func (h *macGeneric) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- updateGeneric(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- updateGeneric(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-// Sum flushes the last incomplete chunk from the buffer, if any, and generates
-// the MAC output. It does not modify its state, in order to allow for multiple
-// calls to Sum, even if no Write is allowed after Sum.
-func (h *macGeneric) Sum(out *[TagSize]byte) {
- state := h.macState
- if h.offset > 0 {
- updateGeneric(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
-
-// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
-// clears some bits of the secret coefficient to make it possible to implement
-// multiplication more efficiently.
-const (
- rMask0 = 0x0FFFFFFC0FFFFFFF
- rMask1 = 0x0FFFFFFC0FFFFFFC
-)
-
-// initialize loads the 256-bit key into the two 128-bit secret values r and s.
-func initialize(key *[32]byte, m *macState) {
- m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
- m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
- m.s[0] = binary.LittleEndian.Uint64(key[16:24])
- m.s[1] = binary.LittleEndian.Uint64(key[24:32])
-}
-
-// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
-// bits.Mul64 and bits.Add64 intrinsics.
-type uint128 struct {
- lo, hi uint64
-}
-
-func mul64(a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- return uint128{lo, hi}
-}
-
-func add128(a, b uint128) uint128 {
- lo, c := bits.Add64(a.lo, b.lo, 0)
- hi, c := bits.Add64(a.hi, b.hi, c)
- if c != 0 {
- panic("poly1305: unexpected overflow")
- }
- return uint128{lo, hi}
-}
-
-func shiftRightBy2(a uint128) uint128 {
- a.lo = a.lo>>2 | (a.hi&3)<<62
- a.hi = a.hi >> 2
- return a
-}
-
-// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
-// 128 bits of message, it computes
-//
-// h₊ = (h + m) * r mod 2¹³⁰ - 5
-//
-// If the msg length is not a multiple of TagSize, it assumes the last
-// incomplete chunk is the final one.
-func updateGeneric(state *macState, msg []byte) {
- h0, h1, h2 := state.h[0], state.h[1], state.h[2]
- r0, r1 := state.r[0], state.r[1]
-
- for len(msg) > 0 {
- var c uint64
-
- // For the first step, h + m, we use a chain of bits.Add64 intrinsics.
- // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
- // reduced at the end of the multiplication below.
- //
- // The spec requires us to set a bit just above the message size, not to
- // hide leading zeroes. For full chunks, that's 1 << 128, so we can just
- // add 1 to the most significant (2¹²⁸) limb, h2.
- if len(msg) >= TagSize {
- h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
- h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
- h2 += c + 1
-
- msg = msg[TagSize:]
- } else {
- var buf [TagSize]byte
- copy(buf[:], msg)
- buf[len(msg)] = 1
-
- h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
- h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
- h2 += c
-
- msg = nil
- }
-
- // Multiplication of big number limbs is similar to elementary school
- // columnar multiplication. Instead of digits, there are 64-bit limbs.
- //
- // We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
- //
- // h2 h1 h0 x
- // r1 r0 =
- // ----------------
- // h2r0 h1r0 h0r0 <-- individual 128-bit products
- // + h2r1 h1r1 h0r1
- // ------------------------
- // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
- // ------------------------
- // m3.hi m2.hi m1.hi m0.hi <-- carry propagation
- // + m3.lo m2.lo m1.lo m0.lo
- // -------------------------------
- // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
- //
- // The main difference from pen-and-paper multiplication is that we do
- // carry propagation in a separate step, as if we wrote two digit sums
- // at first (the 128-bit limbs), and then carried the tens all at once.
-
- h0r0 := mul64(h0, r0)
- h1r0 := mul64(h1, r0)
- h2r0 := mul64(h2, r0)
- h0r1 := mul64(h0, r1)
- h1r1 := mul64(h1, r1)
- h2r1 := mul64(h2, r1)
-
- // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
- // top 4 bits cleared by rMask{0,1}, we know that their product is not going
- // to overflow 64 bits, so we can ignore the high part of the products.
- //
- // This also means that the product doesn't have a fifth limb (t4).
- if h2r0.hi != 0 {
- panic("poly1305: unexpected overflow")
- }
- if h2r1.hi != 0 {
- panic("poly1305: unexpected overflow")
- }
-
- m0 := h0r0
- m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
- m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
- m3 := h2r1
-
- t0 := m0.lo
- t1, c := bits.Add64(m1.lo, m0.hi, 0)
- t2, c := bits.Add64(m2.lo, m1.hi, c)
- t3, _ := bits.Add64(m3.lo, m2.hi, c)
-
- // Now we have the result as 4 64-bit limbs, and we need to reduce it
- // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
- // a cheap partial reduction according to the reduction identity
- //
- // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
- //
- // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
- // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
- // assumptions we make about h in the rest of the code.
- //
- // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
-
- // We split the final result at the 2¹³⁰ mark into h and cc, the carry.
- // Note that the carry bits are effectively shifted left by 2, in other
- // words, cc = c * 4 for the c in the reduction identity.
- h0, h1, h2 = t0, t1, t2&maskLow2Bits
- cc := uint128{t2 & maskNotLow2Bits, t3}
-
- // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
-
- h0, c = bits.Add64(h0, cc.lo, 0)
- h1, c = bits.Add64(h1, cc.hi, c)
- h2 += c
-
- cc = shiftRightBy2(cc)
-
- h0, c = bits.Add64(h0, cc.lo, 0)
- h1, c = bits.Add64(h1, cc.hi, c)
- h2 += c
-
- // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
- //
- // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
- }
-
- state.h[0], state.h[1], state.h[2] = h0, h1, h2
-}
-
-const (
- maskLow2Bits uint64 = 0x0000000000000003
- maskNotLow2Bits uint64 = ^maskLow2Bits
-)
-
-// select64 returns x if v == 1 and y if v == 0, in constant time.
-func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
-
-// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
-const (
- p0 = 0xFFFFFFFFFFFFFFFB
- p1 = 0xFFFFFFFFFFFFFFFF
- p2 = 0x0000000000000003
-)
-
-// finalize completes the modular reduction of h and computes
-//
-// out = h + s mod 2¹²⁸
-func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
- h0, h1, h2 := h[0], h[1], h[2]
-
- // After the partial reduction in updateGeneric, h might be more than
- // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
- // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
- // result if the subtraction underflows, and t otherwise.
-
- hMinusP0, b := bits.Sub64(h0, p0, 0)
- hMinusP1, b := bits.Sub64(h1, p1, b)
- _, b = bits.Sub64(h2, p2, b)
-
- // h = h if h < p else h - p
- h0 = select64(b, h0, hMinusP0)
- h1 = select64(b, h1, hMinusP1)
-
- // Finally, we compute the last Poly1305 step
- //
- // tag = h + s mod 2¹²⁸
- //
- // by just doing a wide addition with the 128 low bits of h and discarding
- // the overflow.
- h0, c := bits.Add64(h0, s[0], 0)
- h1, _ = bits.Add64(h1, s[1], c)
-
- binary.LittleEndian.PutUint64(out[0:8], h0)
- binary.LittleEndian.PutUint64(out[8:16], h1)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
deleted file mode 100644
index 4aec487..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-package poly1305
-
-//go:noescape
-func update(state *macState, msg []byte)
-
-// mac is a wrapper for macGeneric that redirects calls that would have gone to
-// updateGeneric to update.
-//
-// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
-// using function pointers would carry a major performance cost.
-type mac struct{ macGeneric }
-
-func (h *mac) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- update(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- update(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-func (h *mac) Sum(out *[16]byte) {
- state := h.macState
- if h.offset > 0 {
- update(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
deleted file mode 100644
index b3c1699..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-#include "textflag.h"
-
-// This was ported from the amd64 implementation.
-
-#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
- MOVD (msg), t0; \
- MOVD 8(msg), t1; \
- MOVD $1, t2; \
- ADDC t0, h0, h0; \
- ADDE t1, h1, h1; \
- ADDE t2, h2; \
- ADD $16, msg
-
-#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
- MULLD r0, h0, t0; \
- MULHDU r0, h0, t1; \
- MULLD r0, h1, t4; \
- MULHDU r0, h1, t5; \
- ADDC t4, t1, t1; \
- MULLD r0, h2, t2; \
- MULHDU r1, h0, t4; \
- MULLD r1, h0, h0; \
- ADDE t5, t2, t2; \
- ADDC h0, t1, t1; \
- MULLD h2, r1, t3; \
- ADDZE t4, h0; \
- MULHDU r1, h1, t5; \
- MULLD r1, h1, t4; \
- ADDC t4, t2, t2; \
- ADDE t5, t3, t3; \
- ADDC h0, t2, t2; \
- MOVD $-4, t4; \
- ADDZE t3; \
- RLDICL $0, t2, $62, h2; \
- AND t2, t4, h0; \
- ADDC t0, h0, h0; \
- ADDE t3, t1, h1; \
- SLD $62, t3, t4; \
- SRD $2, t2; \
- ADDZE h2; \
- OR t4, t2, t2; \
- SRD $2, t3; \
- ADDC t2, h0, h0; \
- ADDE t3, h1, h1; \
- ADDZE h2
-
-DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
-DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
-GLOBL ·poly1305Mask<>(SB), RODATA, $16
-
-// func update(state *[7]uint64, msg []byte)
-TEXT ·update(SB), $0-32
- MOVD state+0(FP), R3
- MOVD msg_base+8(FP), R4
- MOVD msg_len+16(FP), R5
-
- MOVD 0(R3), R8 // h0
- MOVD 8(R3), R9 // h1
- MOVD 16(R3), R10 // h2
- MOVD 24(R3), R11 // r0
- MOVD 32(R3), R12 // r1
-
- CMP R5, $16
- BLT bytes_between_0_and_15
-
-loop:
- POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
-
- PCALIGN $16
-multiply:
- POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
- ADD $-16, R5
- CMP R5, $16
- BGE loop
-
-bytes_between_0_and_15:
- CMP R5, $0
- BEQ done
- MOVD $0, R16 // h0
- MOVD $0, R17 // h1
-
-flush_buffer:
- CMP R5, $8
- BLE just1
-
- MOVD $8, R21
- SUB R21, R5, R21
-
- // Greater than 8 -- load the rightmost remaining bytes in msg
- // and put into R17 (h1)
- MOVD (R4)(R21), R17
- MOVD $16, R22
-
- // Find the offset to those bytes
- SUB R5, R22, R22
- SLD $3, R22
-
- // Shift to get only the bytes in msg
- SRD R22, R17, R17
-
- // Put 1 at high end
- MOVD $1, R23
- SLD $3, R21
- SLD R21, R23, R23
- OR R23, R17, R17
-
- // Remainder is 8
- MOVD $8, R5
-
-just1:
- CMP R5, $8
- BLT less8
-
- // Exactly 8
- MOVD (R4), R16
-
- CMP R17, $0
-
- // Check if we've already set R17; if not
- // set 1 to indicate end of msg.
- BNE carry
- MOVD $1, R17
- BR carry
-
-less8:
- MOVD $0, R16 // h0
- MOVD $0, R22 // shift count
- CMP R5, $4
- BLT less4
- MOVWZ (R4), R16
- ADD $4, R4
- ADD $-4, R5
- MOVD $32, R22
-
-less4:
- CMP R5, $2
- BLT less2
- MOVHZ (R4), R21
- SLD R22, R21, R21
- OR R16, R21, R16
- ADD $16, R22
- ADD $-2, R5
- ADD $2, R4
-
-less2:
- CMP R5, $0
- BEQ insert1
- MOVBZ (R4), R21
- SLD R22, R21, R21
- OR R16, R21, R16
- ADD $8, R22
-
-insert1:
- // Insert 1 at end of msg
- MOVD $1, R21
- SLD R22, R21, R21
- OR R16, R21, R16
-
-carry:
- // Add new values to h0, h1, h2
- ADDC R16, R8
- ADDE R17, R9
- ADDZE R10, R10
- MOVD $16, R5
- ADD R5, R4
- BR multiply
-
-done:
- // Save h0, h1, h2 in state
- MOVD R8, 0(R3)
- MOVD R9, 8(R3)
- MOVD R10, 16(R3)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
deleted file mode 100644
index e1d033a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-package poly1305
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-// updateVX is an assembly implementation of Poly1305 that uses vector
-// instructions. It must only be called if the vector facility (vx) is
-// available.
-//
-//go:noescape
-func updateVX(state *macState, msg []byte)
-
-// mac is a replacement for macGeneric that uses a larger buffer and redirects
-// calls that would have gone to updateGeneric to updateVX if the vector
-// facility is installed.
-//
-// A larger buffer is required for good performance because the vector
-// implementation has a higher fixed cost per call than the generic
-// implementation.
-type mac struct {
- macState
-
- buffer [16 * TagSize]byte // size must be a multiple of block size (16)
- offset int
-}
-
-func (h *mac) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < len(h.buffer) {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- if cpu.S390X.HasVX {
- updateVX(&h.macState, h.buffer[:])
- } else {
- updateGeneric(&h.macState, h.buffer[:])
- }
- }
-
- tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
- body := len(p) - tail // number of bytes to process now
- if body > 0 {
- if cpu.S390X.HasVX {
- updateVX(&h.macState, p[:body])
- } else {
- updateGeneric(&h.macState, p[:body])
- }
- }
- h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
- return nn, nil
-}
-
-func (h *mac) Sum(out *[TagSize]byte) {
- state := h.macState
- remainder := h.buffer[:h.offset]
-
- // Use the generic implementation if we have 2 or fewer blocks left
- // to sum. The vector implementation has a higher startup time.
- if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
- updateVX(&state, remainder)
- } else if len(remainder) > 0 {
- updateGeneric(&state, remainder)
- }
- finalize(out, &state.h, &state.s)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
deleted file mode 100644
index 0fe3a7c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego
-
-#include "textflag.h"
-
-// This implementation of Poly1305 uses the vector facility (vx)
-// to process up to 2 blocks (32 bytes) per iteration using an
-// algorithm based on the one described in:
-//
-// NEON crypto, Daniel J. Bernstein & Peter Schwabe
-// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
-//
-// This algorithm uses 5 26-bit limbs to represent a 130-bit
-// value. These limbs are, for the most part, zero extended and
-// placed into 64-bit vector register elements. Each vector
-// register is 128-bits wide and so holds 2 of these elements.
-// Using 26-bit limbs allows us plenty of headroom to accommodate
-// accumulations before and after multiplication without
-// overflowing either 32-bits (before multiplication) or 64-bits
-// (after multiplication).
-//
-// In order to parallelise the operations required to calculate
-// the sum we use two separate accumulators and then sum those
-// in an extra final step. For compatibility with the generic
-// implementation we perform this summation at the end of every
-// updateVX call.
-//
-// To use two accumulators we must multiply the message blocks
-// by r² rather than r. Only the final message block should be
-// multiplied by r.
-//
-// Example:
-//
-// We want to calculate the sum (h) for a 64 byte message (m):
-//
-// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r
-//
-// To do this we split the calculation into the even indices
-// and odd indices of the message. These form our SIMD 'lanes':
-//
-// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0
-// m[16:32]r³ + m[48:64]r <- lane 1
-//
-// To calculate this iteratively we refactor so that both lanes
-// are written in terms of r² and r:
-//
-// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
-// (m[16:32]r² + m[48:64])r <- lane 1
-// ^ ^
-// | coefficients for second iteration
-// coefficients for first iteration
-//
-// So in this case we would have two iterations. In the first
-// both lanes are multiplied by r². In the second only the
-// first lane is multiplied by r² and the second lane is
-// instead multiplied by r. This gives use the odd and even
-// powers of r that we need from the original equation.
-//
-// Notation:
-//
-// h - accumulator
-// r - key
-// m - message
-//
-// [a, b] - SIMD register holding two 64-bit values
-// [a, b, c, d] - SIMD register holding four 32-bit values
-// xᵢ[n] - limb n of variable x with bit width i
-//
-// Limbs are expressed in little endian order, so for 26-bit
-// limbs x₂₆[4] will be the most significant limb and x₂₆[0]
-// will be the least significant limb.
-
-// masking constants
-#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
-#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
-
-// expansion constants (see EXPAND macro)
-#define EX0 V2
-#define EX1 V3
-#define EX2 V4
-
-// key (r², r or 1 depending on context)
-#define R_0 V5
-#define R_1 V6
-#define R_2 V7
-#define R_3 V8
-#define R_4 V9
-
-// precalculated coefficients (5r², 5r or 0 depending on context)
-#define R5_1 V10
-#define R5_2 V11
-#define R5_3 V12
-#define R5_4 V13
-
-// message block (m)
-#define M_0 V14
-#define M_1 V15
-#define M_2 V16
-#define M_3 V17
-#define M_4 V18
-
-// accumulator (h)
-#define H_0 V19
-#define H_1 V20
-#define H_2 V21
-#define H_3 V22
-#define H_4 V23
-
-// temporary registers (for short-lived values)
-#define T_0 V24
-#define T_1 V25
-#define T_2 V26
-#define T_3 V27
-#define T_4 V28
-
-GLOBL ·constants<>(SB), RODATA, $0x30
-// EX0
-DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
-DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
-// EX1
-DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
-DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
-// EX2
-DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
-DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
-
-// MULTIPLY multiplies each lane of f and g, partially reduced
-// modulo 2¹³⁰ - 5. The result, h, consists of partial products
-// in each lane that need to be reduced further to produce the
-// final result.
-//
-// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰
-//
-// Note that the multiplication by 5 of the high bits is
-// achieved by precalculating the multiplication of four of the
-// g coefficients by 5. These are g51-g54.
-#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
- VMLOF f0, g0, h0 \
- VMLOF f0, g3, h3 \
- VMLOF f0, g1, h1 \
- VMLOF f0, g4, h4 \
- VMLOF f0, g2, h2 \
- VMLOF f1, g54, T_0 \
- VMLOF f1, g2, T_3 \
- VMLOF f1, g0, T_1 \
- VMLOF f1, g3, T_4 \
- VMLOF f1, g1, T_2 \
- VMALOF f2, g53, h0, h0 \
- VMALOF f2, g1, h3, h3 \
- VMALOF f2, g54, h1, h1 \
- VMALOF f2, g2, h4, h4 \
- VMALOF f2, g0, h2, h2 \
- VMALOF f3, g52, T_0, T_0 \
- VMALOF f3, g0, T_3, T_3 \
- VMALOF f3, g53, T_1, T_1 \
- VMALOF f3, g1, T_4, T_4 \
- VMALOF f3, g54, T_2, T_2 \
- VMALOF f4, g51, h0, h0 \
- VMALOF f4, g54, h3, h3 \
- VMALOF f4, g52, h1, h1 \
- VMALOF f4, g0, h4, h4 \
- VMALOF f4, g53, h2, h2 \
- VAG T_0, h0, h0 \
- VAG T_3, h3, h3 \
- VAG T_1, h1, h1 \
- VAG T_4, h4, h4 \
- VAG T_2, h2, h2
-
-// REDUCE performs the following carry operations in four
-// stages, as specified in Bernstein & Schwabe:
-//
-// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4]
-// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0]
-// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3]
-// 4: h₂₆[3]->h₂₆[4]
-//
-// The result is that all of the limbs are limited to 26-bits
-// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits.
-//
-// Note that although each limb is aligned at 26-bit intervals
-// they may contain values that exceed 2²⁶ - 1, hence the need
-// to carry the excess bits in each limb.
-#define REDUCE(h0, h1, h2, h3, h4) \
- VESRLG $26, h0, T_0 \
- VESRLG $26, h3, T_1 \
- VN MOD26, h0, h0 \
- VN MOD26, h3, h3 \
- VAG T_0, h1, h1 \
- VAG T_1, h4, h4 \
- VESRLG $26, h1, T_2 \
- VESRLG $26, h4, T_3 \
- VN MOD26, h1, h1 \
- VN MOD26, h4, h4 \
- VESLG $2, T_3, T_4 \
- VAG T_3, T_4, T_4 \
- VAG T_2, h2, h2 \
- VAG T_4, h0, h0 \
- VESRLG $26, h2, T_0 \
- VESRLG $26, h0, T_1 \
- VN MOD26, h2, h2 \
- VN MOD26, h0, h0 \
- VAG T_0, h3, h3 \
- VAG T_1, h1, h1 \
- VESRLG $26, h3, T_2 \
- VN MOD26, h3, h3 \
- VAG T_2, h4, h4
-
-// EXPAND splits the 128-bit little-endian values in0 and in1
-// into 26-bit big-endian limbs and places the results into
-// the first and second lane of d₂₆[0:4] respectively.
-//
-// The EX0, EX1 and EX2 constants are arrays of byte indices
-// for permutation. The permutation both reverses the bytes
-// in the input and ensures the bytes are copied into the
-// destination limb ready to be shifted into their final
-// position.
-#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
- VPERM in0, in1, EX0, d0 \
- VPERM in0, in1, EX1, d2 \
- VPERM in0, in1, EX2, d4 \
- VESRLG $26, d0, d1 \
- VESRLG $30, d2, d3 \
- VESRLG $4, d2, d2 \
- VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]]
- VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]]
- VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]]
- VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]]
- VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]]
-
-// func updateVX(state *macState, msg []byte)
-TEXT ·updateVX(SB), NOSPLIT, $0
- MOVD state+0(FP), R1
- LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
-
- // load EX0, EX1 and EX2
- MOVD $·constants<>(SB), R5
- VLM (R5), EX0, EX2
-
- // generate masks
- VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
- VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
-
- // load h (accumulator) and r (key) from state
- VZERO T_1 // [0, 0]
- VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]]
- VLEG $0, 16(R1), T_1 // [h₆₄[2], 0]
- VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]]
- VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]]
- VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]]
-
- // unpack h and r into 26-bit limbs
- // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value
- VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]]
- VZERO H_1 // [0, 0]
- VZERO H_3 // [0, 0]
- VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
- VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0]
- VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]]
- VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only
- VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]]
- VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only
- VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete
- VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete
-
- // replicate r across all 4 vector elements
- VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]]
- VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]]
- VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]]
- VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]]
- VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]]
-
- // zero out lane 1 of h
- VLEIG $1, $0, H_0 // [h₂₆[0], 0]
- VLEIG $1, $0, H_1 // [h₂₆[1], 0]
- VLEIG $1, $0, H_2 // [h₂₆[2], 0]
- VLEIG $1, $0, H_3 // [h₂₆[3], 0]
- VLEIG $1, $0, H_4 // [h₂₆[4], 0]
-
- // calculate 5r (ignore least significant limb)
- VREPIF $5, T_0
- VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]]
- VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]]
- VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]]
- VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]]
-
- // skip r² calculation if we are only calculating one block
- CMPBLE R3, $16, skip
-
- // calculate r²
- MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
- REDUCE(M_0, M_1, M_2, M_3, M_4)
- VGBM $0x0f0f, T_0
- VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]]
- VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]]
- VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]]
- VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]]
- VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]]
-
- // calculate 5r² (ignore least significant limb)
- VREPIF $5, T_0
- VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]]
- VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]]
- VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]]
- VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]]
-
-loop:
- CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
-
- // load next 2 blocks from message
- VLM (R2), T_0, T_1
-
- // update message slice
- SUB $32, R3
- MOVD $32(R2), R2
-
- // unpack message blocks into 26-bit big-endian limbs
- EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
-
- // add 2¹²⁸ to each message block value
- VLEIB $4, $1, M_4
- VLEIB $12, $1, M_4
-
-multiply:
- // accumulate the incoming message
- VAG H_0, M_0, M_0
- VAG H_3, M_3, M_3
- VAG H_1, M_1, M_1
- VAG H_4, M_4, M_4
- VAG H_2, M_2, M_2
-
- // multiply the accumulator by the key coefficient
- MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
-
- // carry and partially reduce the partial products
- REDUCE(H_0, H_1, H_2, H_3, H_4)
-
- CMPBNE R3, $0, loop
-
-finish:
- // sum lane 0 and lane 1 and put the result in lane 1
- VZERO T_0
- VSUMQG H_0, T_0, H_0
- VSUMQG H_3, T_0, H_3
- VSUMQG H_1, T_0, H_1
- VSUMQG H_4, T_0, H_4
- VSUMQG H_2, T_0, H_2
-
- // reduce again after summation
- // TODO(mundaym): there might be a more efficient way to do this
- // now that we only have 1 active lane. For example, we could
- // simultaneously pack the values as we reduce them.
- REDUCE(H_0, H_1, H_2, H_3, H_4)
-
- // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1
- // TODO(mundaym): in testing this final carry was unnecessary.
- // Needs a proof before it can be removed though.
- VESRLG $26, H_1, T_1
- VN MOD26, H_1, H_1
- VAQ T_1, H_2, H_2
- VESRLG $26, H_2, T_2
- VN MOD26, H_2, H_2
- VAQ T_2, H_3, H_3
- VESRLG $26, H_3, T_3
- VN MOD26, H_3, H_3
- VAQ T_3, H_4, H_4
-
- // h is now < 2(2¹³⁰ - 5)
- // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1].
- VESLG $26, H_1, H_1
- VESLG $26, H_3, H_3
- VO H_0, H_1, H_0
- VO H_2, H_3, H_2
- VESLG $4, H_2, H_2
- VLEIB $7, $48, H_1
- VSLB H_1, H_2, H_2
- VO H_0, H_2, H_0
- VLEIB $7, $104, H_1
- VSLB H_1, H_4, H_3
- VO H_3, H_0, H_0
- VLEIB $7, $24, H_1
- VSRLB H_1, H_4, H_1
-
- // update state
- VSTEG $1, H_0, 0(R1)
- VSTEG $0, H_0, 8(R1)
- VSTEG $1, H_1, 16(R1)
- RET
-
-b2: // 2 or fewer blocks remaining
- CMPBLE R3, $16, b1
-
- // Load the 2 remaining blocks (17-32 bytes remaining).
- MOVD $-17(R3), R0 // index of final byte to load modulo 16
- VL (R2), T_0 // load full 16 byte block
- VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
-
- // The Poly1305 algorithm requires that a 1 bit be appended to
- // each message block. If the final block is less than 16 bytes
- // long then it is easiest to insert the 1 before the message
- // block is split into 26-bit limbs. If, on the other hand, the
- // final message block is 16 bytes long then we append the 1 bit
- // after expansion as normal.
- MOVBZ $1, R0
- MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
- CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
- VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
-
- // Split both blocks into 26-bit limbs in the appropriate lanes.
- EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
-
- // Append a 1 byte to the end of the second to last block.
- VLEIB $4, $1, M_4
-
- // Append a 1 byte to the end of the last block only if it is a
- // full 16 byte block.
- CMPBNE R3, $16, 2(PC)
- VLEIB $12, $1, M_4
-
- // Finally, set up the coefficients for the final multiplication.
- // We have previously saved r and 5r in the 32-bit even indexes
- // of the R_[0-4] and R5_[1-4] coefficient registers.
- //
- // We want lane 0 to be multiplied by r² so that can be kept the
- // same. We want lane 1 to be multiplied by r so we need to move
- // the saved r value into the 32-bit odd index in lane 1 by
- // rotating the 64-bit lane by 32.
- VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
- VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]]
- VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]]
- VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]]
- VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]]
- VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]]
- VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]]
- VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]]
- VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]]
- VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]]
-
- MOVD $0, R3
- BR multiply
-
-skip:
- CMPBEQ R3, $0, finish
-
-b1: // 1 block remaining
-
- // Load the final block (1-16 bytes). This will be placed into
- // lane 0.
- MOVD $-1(R3), R0
- VLL R0, (R2), T_0 // pad to 16 bytes with zeros
-
- // The Poly1305 algorithm requires that a 1 bit be appended to
- // each message block. If the final block is less than 16 bytes
- // long then it is easiest to insert the 1 before the message
- // block is split into 26-bit limbs. If, on the other hand, the
- // final message block is 16 bytes long then we append the 1 bit
- // after expansion as normal.
- MOVBZ $1, R0
- CMPBEQ R3, $16, 2(PC)
- VLVGB R3, R0, T_0
-
- // Set the message block in lane 1 to the value 0 so that it
- // can be accumulated without affecting the final result.
- VZERO T_1
-
- // Split the final message block into 26-bit limbs in lane 0.
- // Lane 1 will be contain 0.
- EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
-
- // Append a 1 byte to the end of the last block only if it is a
- // full 16 byte block.
- CMPBNE R3, $16, 2(PC)
- VLEIB $4, $1, M_4
-
- // We have previously saved r and 5r in the 32-bit even indexes
- // of the R_[0-4] and R5_[1-4] coefficient registers.
- //
- // We want lane 0 to be multiplied by r so we need to move the
- // saved r value into the 32-bit odd index in lane 0. We want
- // lane 1 to be set to the value 1. This makes multiplication
- // a no-op. We do this by setting lane 1 in every register to 0
- // and then just setting the 32-bit index 3 in R_0 to 1.
- VZERO T_0
- MOVD $0, R0
- MOVD $0x10111213, R12
- VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
- VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0]
- VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0]
- VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0]
- VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0]
- VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0]
- VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0]
- VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0]
- VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0]
- VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0]
-
- // Set the value of lane 1 to be 1.
- VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1]
-
- MOVD $0, R3
- BR multiply
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/nacl/box/box.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/nacl/box/box.go
deleted file mode 100644
index 7f3b830..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/nacl/box/box.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package box authenticates and encrypts small messages using public-key cryptography.
-
-Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate
-messages. The length of messages is not hidden.
-
-It is the caller's responsibility to ensure the uniqueness of nonces—for
-example, by using nonce 1 for the first message, nonce 2 for the second
-message, etc. Nonces are long enough that randomly generated nonces have
-negligible risk of collision.
-
-Messages should be small because:
-
-1. The whole message needs to be held in memory to be processed.
-
-2. Using large messages pressures implementations on small machines to decrypt
-and process plaintext before authenticating it. This is very dangerous, and
-this API does not allow it, but a protocol that uses excessive message sizes
-might present some implementations with no other choice.
-
-3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
-
-4. Performance may be improved by working with messages that fit into data caches.
-
-Thus large amounts of data should be chunked so that each message is small.
-(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
-chunk size.
-
-This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html.
-Anonymous sealing/opening is an extension of NaCl defined by and interoperable
-with libsodium:
-https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes.
-*/
-package box // import "golang.org/x/crypto/nacl/box"
-
-import (
- cryptorand "crypto/rand"
- "io"
-
- "golang.org/x/crypto/blake2b"
- "golang.org/x/crypto/curve25519"
- "golang.org/x/crypto/nacl/secretbox"
- "golang.org/x/crypto/salsa20/salsa"
-)
-
-const (
- // Overhead is the number of bytes of overhead when boxing a message.
- Overhead = secretbox.Overhead
-
- // AnonymousOverhead is the number of bytes of overhead when using anonymous
- // sealed boxes.
- AnonymousOverhead = Overhead + 32
-)
-
-// GenerateKey generates a new public/private key pair suitable for use with
-// Seal and Open.
-func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) {
- publicKey = new([32]byte)
- privateKey = new([32]byte)
- _, err = io.ReadFull(rand, privateKey[:])
- if err != nil {
- publicKey = nil
- privateKey = nil
- return
- }
-
- curve25519.ScalarBaseMult(publicKey, privateKey)
- return
-}
-
-var zeros [16]byte
-
-// Precompute calculates the shared key between peersPublicKey and privateKey
-// and writes it to sharedKey. The shared key can be used with
-// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing
-// when using the same pair of keys repeatedly.
-func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) {
- curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey)
- salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma)
-}
-
-// Seal appends an encrypted and authenticated copy of message to out, which
-// will be Overhead bytes longer than the original and must not overlap it. The
-// nonce must be unique for each distinct message for a given pair of keys.
-func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte {
- var sharedKey [32]byte
- Precompute(&sharedKey, peersPublicKey, privateKey)
- return secretbox.Seal(out, message, nonce, &sharedKey)
-}
-
-// SealAfterPrecomputation performs the same actions as Seal, but takes a
-// shared key as generated by Precompute.
-func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte {
- return secretbox.Seal(out, message, nonce, sharedKey)
-}
-
-// Open authenticates and decrypts a box produced by Seal and appends the
-// message to out, which must not overlap box. The output will be Overhead
-// bytes smaller than box.
-func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) {
- var sharedKey [32]byte
- Precompute(&sharedKey, peersPublicKey, privateKey)
- return secretbox.Open(out, box, nonce, &sharedKey)
-}
-
-// OpenAfterPrecomputation performs the same actions as Open, but takes a
-// shared key as generated by Precompute.
-func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) {
- return secretbox.Open(out, box, nonce, sharedKey)
-}
-
-// SealAnonymous appends an encrypted and authenticated copy of message to out,
-// which will be AnonymousOverhead bytes longer than the original and must not
-// overlap it. This differs from Seal in that the sender is not required to
-// provide a private key.
-func SealAnonymous(out, message []byte, recipient *[32]byte, rand io.Reader) ([]byte, error) {
- if rand == nil {
- rand = cryptorand.Reader
- }
- ephemeralPub, ephemeralPriv, err := GenerateKey(rand)
- if err != nil {
- return nil, err
- }
-
- var nonce [24]byte
- if err := sealNonce(ephemeralPub, recipient, &nonce); err != nil {
- return nil, err
- }
-
- if total := len(out) + AnonymousOverhead + len(message); cap(out) < total {
- original := out
- out = make([]byte, 0, total)
- out = append(out, original...)
- }
- out = append(out, ephemeralPub[:]...)
-
- return Seal(out, message, &nonce, recipient, ephemeralPriv), nil
-}
-
-// OpenAnonymous authenticates and decrypts a box produced by SealAnonymous and
-// appends the message to out, which must not overlap box. The output will be
-// AnonymousOverhead bytes smaller than box.
-func OpenAnonymous(out, box []byte, publicKey, privateKey *[32]byte) (message []byte, ok bool) {
- if len(box) < AnonymousOverhead {
- return nil, false
- }
-
- var ephemeralPub [32]byte
- copy(ephemeralPub[:], box[:32])
-
- var nonce [24]byte
- if err := sealNonce(&ephemeralPub, publicKey, &nonce); err != nil {
- return nil, false
- }
-
- return Open(out, box[32:], &nonce, &ephemeralPub, privateKey)
-}
-
-// sealNonce generates a 24 byte nonce that is a blake2b digest of the
-// ephemeral public key and the receiver's public key.
-func sealNonce(ephemeralPub, peersPublicKey *[32]byte, nonce *[24]byte) error {
- h, err := blake2b.New(24, nil)
- if err != nil {
- return err
- }
-
- if _, err = h.Write(ephemeralPub[:]); err != nil {
- return err
- }
-
- if _, err = h.Write(peersPublicKey[:]); err != nil {
- return err
- }
-
- h.Sum(nonce[:0])
-
- return nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
deleted file mode 100644
index f3c3242..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package secretbox encrypts and authenticates small messages.
-
-Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
-secret-key cryptography. The length of messages is not hidden.
-
-It is the caller's responsibility to ensure the uniqueness of nonces—for
-example, by using nonce 1 for the first message, nonce 2 for the second
-message, etc. Nonces are long enough that randomly generated nonces have
-negligible risk of collision.
-
-Messages should be small because:
-
-1. The whole message needs to be held in memory to be processed.
-
-2. Using large messages pressures implementations on small machines to decrypt
-and process plaintext before authenticating it. This is very dangerous, and
-this API does not allow it, but a protocol that uses excessive message sizes
-might present some implementations with no other choice.
-
-3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
-
-4. Performance may be improved by working with messages that fit into data caches.
-
-Thus large amounts of data should be chunked so that each message is small.
-(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
-chunk size.
-
-This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
-*/
-package secretbox // import "golang.org/x/crypto/nacl/secretbox"
-
-import (
- "golang.org/x/crypto/internal/alias"
- "golang.org/x/crypto/internal/poly1305"
- "golang.org/x/crypto/salsa20/salsa"
-)
-
-// Overhead is the number of bytes of overhead when boxing a message.
-const Overhead = poly1305.TagSize
-
-// setup produces a sub-key and Salsa20 counter given a nonce and key.
-func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
- // We use XSalsa20 for encryption so first we need to generate a
- // key and nonce with HSalsa20.
- var hNonce [16]byte
- copy(hNonce[:], nonce[:])
- salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
-
- // The final 8 bytes of the original nonce form the new nonce.
- copy(counter[:], nonce[16:])
-}
-
-// sliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
-
-// Seal appends an encrypted and authenticated copy of message to out, which
-// must not overlap message. The key and nonce pair must be unique for each
-// distinct message and the output will be Overhead bytes longer than message.
-func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
- var subKey [32]byte
- var counter [16]byte
- setup(&subKey, &counter, nonce, key)
-
- // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
- // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
- // keystream as a side effect.
- var firstBlock [64]byte
- salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
-
- var poly1305Key [32]byte
- copy(poly1305Key[:], firstBlock[:])
-
- ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
- if alias.AnyOverlap(out, message) {
- panic("nacl: invalid buffer overlap")
- }
-
- // We XOR up to 32 bytes of message with the keystream generated from
- // the first block.
- firstMessageBlock := message
- if len(firstMessageBlock) > 32 {
- firstMessageBlock = firstMessageBlock[:32]
- }
-
- tagOut := out
- out = out[poly1305.TagSize:]
- for i, x := range firstMessageBlock {
- out[i] = firstBlock[32+i] ^ x
- }
- message = message[len(firstMessageBlock):]
- ciphertext := out
- out = out[len(firstMessageBlock):]
-
- // Now encrypt the rest.
- counter[8] = 1
- salsa.XORKeyStream(out, message, &counter, &subKey)
-
- var tag [poly1305.TagSize]byte
- poly1305.Sum(&tag, ciphertext, &poly1305Key)
- copy(tagOut, tag[:])
-
- return ret
-}
-
-// Open authenticates and decrypts a box produced by Seal and appends the
-// message to out, which must not overlap box. The output will be Overhead
-// bytes smaller than box.
-func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
- if len(box) < Overhead {
- return nil, false
- }
-
- var subKey [32]byte
- var counter [16]byte
- setup(&subKey, &counter, nonce, key)
-
- // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
- // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
- // keystream as a side effect.
- var firstBlock [64]byte
- salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
-
- var poly1305Key [32]byte
- copy(poly1305Key[:], firstBlock[:])
- var tag [poly1305.TagSize]byte
- copy(tag[:], box)
-
- if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
- return nil, false
- }
-
- ret, out := sliceForAppend(out, len(box)-Overhead)
- if alias.AnyOverlap(out, box) {
- panic("nacl: invalid buffer overlap")
- }
-
- // We XOR up to 32 bytes of box with the keystream generated from
- // the first block.
- box = box[Overhead:]
- firstMessageBlock := box
- if len(firstMessageBlock) > 32 {
- firstMessageBlock = firstMessageBlock[:32]
- }
- for i, x := range firstMessageBlock {
- out[i] = firstBlock[32+i] ^ x
- }
-
- box = box[len(firstMessageBlock):]
- out = out[len(firstMessageBlock):]
-
- // Now decrypt the rest.
- counter[8] = 1
- salsa.XORKeyStream(out, box, &counter, &subKey)
-
- return ret, true
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
deleted file mode 100644
index 3fd05b2..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package salsa provides low-level access to functions in the Salsa family.
-package salsa // import "golang.org/x/crypto/salsa20/salsa"
-
-import "math/bits"
-
-// Sigma is the Salsa20 constant for 256-bit keys.
-var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
-
-// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
-// key k, and 16-byte constant c, and puts the result into the 32-byte array
-// out.
-func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
- x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
- x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
- x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
- x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
- x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
- x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
- x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
- x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
- x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
- x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
- x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
- x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
- x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
- x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
- x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
- x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
-
- for i := 0; i < 20; i += 2 {
- u := x0 + x12
- x4 ^= bits.RotateLeft32(u, 7)
- u = x4 + x0
- x8 ^= bits.RotateLeft32(u, 9)
- u = x8 + x4
- x12 ^= bits.RotateLeft32(u, 13)
- u = x12 + x8
- x0 ^= bits.RotateLeft32(u, 18)
-
- u = x5 + x1
- x9 ^= bits.RotateLeft32(u, 7)
- u = x9 + x5
- x13 ^= bits.RotateLeft32(u, 9)
- u = x13 + x9
- x1 ^= bits.RotateLeft32(u, 13)
- u = x1 + x13
- x5 ^= bits.RotateLeft32(u, 18)
-
- u = x10 + x6
- x14 ^= bits.RotateLeft32(u, 7)
- u = x14 + x10
- x2 ^= bits.RotateLeft32(u, 9)
- u = x2 + x14
- x6 ^= bits.RotateLeft32(u, 13)
- u = x6 + x2
- x10 ^= bits.RotateLeft32(u, 18)
-
- u = x15 + x11
- x3 ^= bits.RotateLeft32(u, 7)
- u = x3 + x15
- x7 ^= bits.RotateLeft32(u, 9)
- u = x7 + x3
- x11 ^= bits.RotateLeft32(u, 13)
- u = x11 + x7
- x15 ^= bits.RotateLeft32(u, 18)
-
- u = x0 + x3
- x1 ^= bits.RotateLeft32(u, 7)
- u = x1 + x0
- x2 ^= bits.RotateLeft32(u, 9)
- u = x2 + x1
- x3 ^= bits.RotateLeft32(u, 13)
- u = x3 + x2
- x0 ^= bits.RotateLeft32(u, 18)
-
- u = x5 + x4
- x6 ^= bits.RotateLeft32(u, 7)
- u = x6 + x5
- x7 ^= bits.RotateLeft32(u, 9)
- u = x7 + x6
- x4 ^= bits.RotateLeft32(u, 13)
- u = x4 + x7
- x5 ^= bits.RotateLeft32(u, 18)
-
- u = x10 + x9
- x11 ^= bits.RotateLeft32(u, 7)
- u = x11 + x10
- x8 ^= bits.RotateLeft32(u, 9)
- u = x8 + x11
- x9 ^= bits.RotateLeft32(u, 13)
- u = x9 + x8
- x10 ^= bits.RotateLeft32(u, 18)
-
- u = x15 + x14
- x12 ^= bits.RotateLeft32(u, 7)
- u = x12 + x15
- x13 ^= bits.RotateLeft32(u, 9)
- u = x13 + x12
- x14 ^= bits.RotateLeft32(u, 13)
- u = x14 + x13
- x15 ^= bits.RotateLeft32(u, 18)
- }
- out[0] = byte(x0)
- out[1] = byte(x0 >> 8)
- out[2] = byte(x0 >> 16)
- out[3] = byte(x0 >> 24)
-
- out[4] = byte(x5)
- out[5] = byte(x5 >> 8)
- out[6] = byte(x5 >> 16)
- out[7] = byte(x5 >> 24)
-
- out[8] = byte(x10)
- out[9] = byte(x10 >> 8)
- out[10] = byte(x10 >> 16)
- out[11] = byte(x10 >> 24)
-
- out[12] = byte(x15)
- out[13] = byte(x15 >> 8)
- out[14] = byte(x15 >> 16)
- out[15] = byte(x15 >> 24)
-
- out[16] = byte(x6)
- out[17] = byte(x6 >> 8)
- out[18] = byte(x6 >> 16)
- out[19] = byte(x6 >> 24)
-
- out[20] = byte(x7)
- out[21] = byte(x7 >> 8)
- out[22] = byte(x7 >> 16)
- out[23] = byte(x7 >> 24)
-
- out[24] = byte(x8)
- out[25] = byte(x8 >> 8)
- out[26] = byte(x8 >> 16)
- out[27] = byte(x8 >> 24)
-
- out[28] = byte(x9)
- out[29] = byte(x9 >> 8)
- out[30] = byte(x9 >> 16)
- out[31] = byte(x9 >> 24)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
deleted file mode 100644
index 7ec7bb3..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package salsa
-
-import "math/bits"
-
-// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
-// the result into the 64-byte array out. The input and output may be the same array.
-func Core208(out *[64]byte, in *[64]byte) {
- j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
- j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
- j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
- j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
- j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
- j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
- j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
- j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
- j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
- j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
- j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
- j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
- j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
- j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
- j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
- j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
-
- x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
- x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
-
- for i := 0; i < 8; i += 2 {
- u := x0 + x12
- x4 ^= bits.RotateLeft32(u, 7)
- u = x4 + x0
- x8 ^= bits.RotateLeft32(u, 9)
- u = x8 + x4
- x12 ^= bits.RotateLeft32(u, 13)
- u = x12 + x8
- x0 ^= bits.RotateLeft32(u, 18)
-
- u = x5 + x1
- x9 ^= bits.RotateLeft32(u, 7)
- u = x9 + x5
- x13 ^= bits.RotateLeft32(u, 9)
- u = x13 + x9
- x1 ^= bits.RotateLeft32(u, 13)
- u = x1 + x13
- x5 ^= bits.RotateLeft32(u, 18)
-
- u = x10 + x6
- x14 ^= bits.RotateLeft32(u, 7)
- u = x14 + x10
- x2 ^= bits.RotateLeft32(u, 9)
- u = x2 + x14
- x6 ^= bits.RotateLeft32(u, 13)
- u = x6 + x2
- x10 ^= bits.RotateLeft32(u, 18)
-
- u = x15 + x11
- x3 ^= bits.RotateLeft32(u, 7)
- u = x3 + x15
- x7 ^= bits.RotateLeft32(u, 9)
- u = x7 + x3
- x11 ^= bits.RotateLeft32(u, 13)
- u = x11 + x7
- x15 ^= bits.RotateLeft32(u, 18)
-
- u = x0 + x3
- x1 ^= bits.RotateLeft32(u, 7)
- u = x1 + x0
- x2 ^= bits.RotateLeft32(u, 9)
- u = x2 + x1
- x3 ^= bits.RotateLeft32(u, 13)
- u = x3 + x2
- x0 ^= bits.RotateLeft32(u, 18)
-
- u = x5 + x4
- x6 ^= bits.RotateLeft32(u, 7)
- u = x6 + x5
- x7 ^= bits.RotateLeft32(u, 9)
- u = x7 + x6
- x4 ^= bits.RotateLeft32(u, 13)
- u = x4 + x7
- x5 ^= bits.RotateLeft32(u, 18)
-
- u = x10 + x9
- x11 ^= bits.RotateLeft32(u, 7)
- u = x11 + x10
- x8 ^= bits.RotateLeft32(u, 9)
- u = x8 + x11
- x9 ^= bits.RotateLeft32(u, 13)
- u = x9 + x8
- x10 ^= bits.RotateLeft32(u, 18)
-
- u = x15 + x14
- x12 ^= bits.RotateLeft32(u, 7)
- u = x12 + x15
- x13 ^= bits.RotateLeft32(u, 9)
- u = x13 + x12
- x14 ^= bits.RotateLeft32(u, 13)
- u = x14 + x13
- x15 ^= bits.RotateLeft32(u, 18)
- }
- x0 += j0
- x1 += j1
- x2 += j2
- x3 += j3
- x4 += j4
- x5 += j5
- x6 += j6
- x7 += j7
- x8 += j8
- x9 += j9
- x10 += j10
- x11 += j11
- x12 += j12
- x13 += j13
- x14 += j14
- x15 += j15
-
- out[0] = byte(x0)
- out[1] = byte(x0 >> 8)
- out[2] = byte(x0 >> 16)
- out[3] = byte(x0 >> 24)
-
- out[4] = byte(x1)
- out[5] = byte(x1 >> 8)
- out[6] = byte(x1 >> 16)
- out[7] = byte(x1 >> 24)
-
- out[8] = byte(x2)
- out[9] = byte(x2 >> 8)
- out[10] = byte(x2 >> 16)
- out[11] = byte(x2 >> 24)
-
- out[12] = byte(x3)
- out[13] = byte(x3 >> 8)
- out[14] = byte(x3 >> 16)
- out[15] = byte(x3 >> 24)
-
- out[16] = byte(x4)
- out[17] = byte(x4 >> 8)
- out[18] = byte(x4 >> 16)
- out[19] = byte(x4 >> 24)
-
- out[20] = byte(x5)
- out[21] = byte(x5 >> 8)
- out[22] = byte(x5 >> 16)
- out[23] = byte(x5 >> 24)
-
- out[24] = byte(x6)
- out[25] = byte(x6 >> 8)
- out[26] = byte(x6 >> 16)
- out[27] = byte(x6 >> 24)
-
- out[28] = byte(x7)
- out[29] = byte(x7 >> 8)
- out[30] = byte(x7 >> 16)
- out[31] = byte(x7 >> 24)
-
- out[32] = byte(x8)
- out[33] = byte(x8 >> 8)
- out[34] = byte(x8 >> 16)
- out[35] = byte(x8 >> 24)
-
- out[36] = byte(x9)
- out[37] = byte(x9 >> 8)
- out[38] = byte(x9 >> 16)
- out[39] = byte(x9 >> 24)
-
- out[40] = byte(x10)
- out[41] = byte(x10 >> 8)
- out[42] = byte(x10 >> 16)
- out[43] = byte(x10 >> 24)
-
- out[44] = byte(x11)
- out[45] = byte(x11 >> 8)
- out[46] = byte(x11 >> 16)
- out[47] = byte(x11 >> 24)
-
- out[48] = byte(x12)
- out[49] = byte(x12 >> 8)
- out[50] = byte(x12 >> 16)
- out[51] = byte(x12 >> 24)
-
- out[52] = byte(x13)
- out[53] = byte(x13 >> 8)
- out[54] = byte(x13 >> 16)
- out[55] = byte(x13 >> 24)
-
- out[56] = byte(x14)
- out[57] = byte(x14 >> 8)
- out[58] = byte(x14 >> 16)
- out[59] = byte(x14 >> 24)
-
- out[60] = byte(x15)
- out[61] = byte(x15 >> 8)
- out[62] = byte(x15 >> 16)
- out[63] = byte(x15 >> 24)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
deleted file mode 100644
index e76b44f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && !purego && gc
-
-package salsa
-
-//go:noescape
-
-// salsa2020XORKeyStream is implemented in salsa20_amd64.s.
-func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
-
-// XORKeyStream crypts bytes from in to out using the given key and counters.
-// In and out must overlap entirely or not at all. Counter
-// contains the raw salsa20 counter bytes (both nonce and block counter).
-func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
- if len(in) == 0 {
- return
- }
- _ = out[len(in)-1]
- salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
deleted file mode 100644
index fcce023..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s
+++ /dev/null
@@ -1,880 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build amd64 && !purego && gc
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
-// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size.
-TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
- MOVQ out+0(FP),DI
- MOVQ in+8(FP),SI
- MOVQ n+16(FP),DX
- MOVQ nonce+24(FP),CX
- MOVQ key+32(FP),R8
-
- MOVQ SP,R12
- ADDQ $31, R12
- ANDQ $~31, R12
-
- MOVQ DX,R9
- MOVQ CX,DX
- MOVQ R8,R10
- CMPQ R9,$0
- JBE DONE
- START:
- MOVL 20(R10),CX
- MOVL 0(R10),R8
- MOVL 0(DX),AX
- MOVL 16(R10),R11
- MOVL CX,0(R12)
- MOVL R8, 4 (R12)
- MOVL AX, 8 (R12)
- MOVL R11, 12 (R12)
- MOVL 8(DX),CX
- MOVL 24(R10),R8
- MOVL 4(R10),AX
- MOVL 4(DX),R11
- MOVL CX,16(R12)
- MOVL R8, 20 (R12)
- MOVL AX, 24 (R12)
- MOVL R11, 28 (R12)
- MOVL 12(DX),CX
- MOVL 12(R10),DX
- MOVL 28(R10),R8
- MOVL 8(R10),AX
- MOVL DX,32(R12)
- MOVL CX, 36 (R12)
- MOVL R8, 40 (R12)
- MOVL AX, 44 (R12)
- MOVQ $1634760805,DX
- MOVQ $857760878,CX
- MOVQ $2036477234,R8
- MOVQ $1797285236,AX
- MOVL DX,48(R12)
- MOVL CX, 52 (R12)
- MOVL R8, 56 (R12)
- MOVL AX, 60 (R12)
- CMPQ R9,$256
- JB BYTESBETWEEN1AND255
- MOVOA 48(R12),X0
- PSHUFL $0X55,X0,X1
- PSHUFL $0XAA,X0,X2
- PSHUFL $0XFF,X0,X3
- PSHUFL $0X00,X0,X0
- MOVOA X1,64(R12)
- MOVOA X2,80(R12)
- MOVOA X3,96(R12)
- MOVOA X0,112(R12)
- MOVOA 0(R12),X0
- PSHUFL $0XAA,X0,X1
- PSHUFL $0XFF,X0,X2
- PSHUFL $0X00,X0,X3
- PSHUFL $0X55,X0,X0
- MOVOA X1,128(R12)
- MOVOA X2,144(R12)
- MOVOA X3,160(R12)
- MOVOA X0,176(R12)
- MOVOA 16(R12),X0
- PSHUFL $0XFF,X0,X1
- PSHUFL $0X55,X0,X2
- PSHUFL $0XAA,X0,X0
- MOVOA X1,192(R12)
- MOVOA X2,208(R12)
- MOVOA X0,224(R12)
- MOVOA 32(R12),X0
- PSHUFL $0X00,X0,X1
- PSHUFL $0XAA,X0,X2
- PSHUFL $0XFF,X0,X0
- MOVOA X1,240(R12)
- MOVOA X2,256(R12)
- MOVOA X0,272(R12)
- BYTESATLEAST256:
- MOVL 16(R12),DX
- MOVL 36 (R12),CX
- MOVL DX,288(R12)
- MOVL CX,304(R12)
- SHLQ $32,CX
- ADDQ CX,DX
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX, 292 (R12)
- MOVL CX, 308 (R12)
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX, 296 (R12)
- MOVL CX, 312 (R12)
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX, 300 (R12)
- MOVL CX, 316 (R12)
- ADDQ $1,DX
- MOVQ DX,CX
- SHRQ $32,CX
- MOVL DX,16(R12)
- MOVL CX, 36 (R12)
- MOVQ R9,352(R12)
- MOVQ $20,DX
- MOVOA 64(R12),X0
- MOVOA 80(R12),X1
- MOVOA 96(R12),X2
- MOVOA 256(R12),X3
- MOVOA 272(R12),X4
- MOVOA 128(R12),X5
- MOVOA 144(R12),X6
- MOVOA 176(R12),X7
- MOVOA 192(R12),X8
- MOVOA 208(R12),X9
- MOVOA 224(R12),X10
- MOVOA 304(R12),X11
- MOVOA 112(R12),X12
- MOVOA 160(R12),X13
- MOVOA 240(R12),X14
- MOVOA 288(R12),X15
- MAINLOOP1:
- MOVOA X1,320(R12)
- MOVOA X2,336(R12)
- MOVOA X13,X1
- PADDL X12,X1
- MOVOA X1,X2
- PSLLL $7,X1
- PXOR X1,X14
- PSRLL $25,X2
- PXOR X2,X14
- MOVOA X7,X1
- PADDL X0,X1
- MOVOA X1,X2
- PSLLL $7,X1
- PXOR X1,X11
- PSRLL $25,X2
- PXOR X2,X11
- MOVOA X12,X1
- PADDL X14,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X15
- PSRLL $23,X2
- PXOR X2,X15
- MOVOA X0,X1
- PADDL X11,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X9
- PSRLL $23,X2
- PXOR X2,X9
- MOVOA X14,X1
- PADDL X15,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X13
- PSRLL $19,X2
- PXOR X2,X13
- MOVOA X11,X1
- PADDL X9,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X7
- PSRLL $19,X2
- PXOR X2,X7
- MOVOA X15,X1
- PADDL X13,X1
- MOVOA X1,X2
- PSLLL $18,X1
- PXOR X1,X12
- PSRLL $14,X2
- PXOR X2,X12
- MOVOA 320(R12),X1
- MOVOA X12,320(R12)
- MOVOA X9,X2
- PADDL X7,X2
- MOVOA X2,X12
- PSLLL $18,X2
- PXOR X2,X0
- PSRLL $14,X12
- PXOR X12,X0
- MOVOA X5,X2
- PADDL X1,X2
- MOVOA X2,X12
- PSLLL $7,X2
- PXOR X2,X3
- PSRLL $25,X12
- PXOR X12,X3
- MOVOA 336(R12),X2
- MOVOA X0,336(R12)
- MOVOA X6,X0
- PADDL X2,X0
- MOVOA X0,X12
- PSLLL $7,X0
- PXOR X0,X4
- PSRLL $25,X12
- PXOR X12,X4
- MOVOA X1,X0
- PADDL X3,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X10
- PSRLL $23,X12
- PXOR X12,X10
- MOVOA X2,X0
- PADDL X4,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X8
- PSRLL $23,X12
- PXOR X12,X8
- MOVOA X3,X0
- PADDL X10,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X5
- PSRLL $19,X12
- PXOR X12,X5
- MOVOA X4,X0
- PADDL X8,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X6
- PSRLL $19,X12
- PXOR X12,X6
- MOVOA X10,X0
- PADDL X5,X0
- MOVOA X0,X12
- PSLLL $18,X0
- PXOR X0,X1
- PSRLL $14,X12
- PXOR X12,X1
- MOVOA 320(R12),X0
- MOVOA X1,320(R12)
- MOVOA X4,X1
- PADDL X0,X1
- MOVOA X1,X12
- PSLLL $7,X1
- PXOR X1,X7
- PSRLL $25,X12
- PXOR X12,X7
- MOVOA X8,X1
- PADDL X6,X1
- MOVOA X1,X12
- PSLLL $18,X1
- PXOR X1,X2
- PSRLL $14,X12
- PXOR X12,X2
- MOVOA 336(R12),X12
- MOVOA X2,336(R12)
- MOVOA X14,X1
- PADDL X12,X1
- MOVOA X1,X2
- PSLLL $7,X1
- PXOR X1,X5
- PSRLL $25,X2
- PXOR X2,X5
- MOVOA X0,X1
- PADDL X7,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X10
- PSRLL $23,X2
- PXOR X2,X10
- MOVOA X12,X1
- PADDL X5,X1
- MOVOA X1,X2
- PSLLL $9,X1
- PXOR X1,X8
- PSRLL $23,X2
- PXOR X2,X8
- MOVOA X7,X1
- PADDL X10,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X4
- PSRLL $19,X2
- PXOR X2,X4
- MOVOA X5,X1
- PADDL X8,X1
- MOVOA X1,X2
- PSLLL $13,X1
- PXOR X1,X14
- PSRLL $19,X2
- PXOR X2,X14
- MOVOA X10,X1
- PADDL X4,X1
- MOVOA X1,X2
- PSLLL $18,X1
- PXOR X1,X0
- PSRLL $14,X2
- PXOR X2,X0
- MOVOA 320(R12),X1
- MOVOA X0,320(R12)
- MOVOA X8,X0
- PADDL X14,X0
- MOVOA X0,X2
- PSLLL $18,X0
- PXOR X0,X12
- PSRLL $14,X2
- PXOR X2,X12
- MOVOA X11,X0
- PADDL X1,X0
- MOVOA X0,X2
- PSLLL $7,X0
- PXOR X0,X6
- PSRLL $25,X2
- PXOR X2,X6
- MOVOA 336(R12),X2
- MOVOA X12,336(R12)
- MOVOA X3,X0
- PADDL X2,X0
- MOVOA X0,X12
- PSLLL $7,X0
- PXOR X0,X13
- PSRLL $25,X12
- PXOR X12,X13
- MOVOA X1,X0
- PADDL X6,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X15
- PSRLL $23,X12
- PXOR X12,X15
- MOVOA X2,X0
- PADDL X13,X0
- MOVOA X0,X12
- PSLLL $9,X0
- PXOR X0,X9
- PSRLL $23,X12
- PXOR X12,X9
- MOVOA X6,X0
- PADDL X15,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X11
- PSRLL $19,X12
- PXOR X12,X11
- MOVOA X13,X0
- PADDL X9,X0
- MOVOA X0,X12
- PSLLL $13,X0
- PXOR X0,X3
- PSRLL $19,X12
- PXOR X12,X3
- MOVOA X15,X0
- PADDL X11,X0
- MOVOA X0,X12
- PSLLL $18,X0
- PXOR X0,X1
- PSRLL $14,X12
- PXOR X12,X1
- MOVOA X9,X0
- PADDL X3,X0
- MOVOA X0,X12
- PSLLL $18,X0
- PXOR X0,X2
- PSRLL $14,X12
- PXOR X12,X2
- MOVOA 320(R12),X12
- MOVOA 336(R12),X0
- SUBQ $2,DX
- JA MAINLOOP1
- PADDL 112(R12),X12
- PADDL 176(R12),X7
- PADDL 224(R12),X10
- PADDL 272(R12),X4
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- PSHUFL $0X39,X12,X12
- PSHUFL $0X39,X7,X7
- PSHUFL $0X39,X10,X10
- PSHUFL $0X39,X4,X4
- XORL 0(SI),DX
- XORL 4(SI),CX
- XORL 8(SI),R8
- XORL 12(SI),R9
- MOVL DX,0(DI)
- MOVL CX,4(DI)
- MOVL R8,8(DI)
- MOVL R9,12(DI)
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- PSHUFL $0X39,X12,X12
- PSHUFL $0X39,X7,X7
- PSHUFL $0X39,X10,X10
- PSHUFL $0X39,X4,X4
- XORL 64(SI),DX
- XORL 68(SI),CX
- XORL 72(SI),R8
- XORL 76(SI),R9
- MOVL DX,64(DI)
- MOVL CX,68(DI)
- MOVL R8,72(DI)
- MOVL R9,76(DI)
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- PSHUFL $0X39,X12,X12
- PSHUFL $0X39,X7,X7
- PSHUFL $0X39,X10,X10
- PSHUFL $0X39,X4,X4
- XORL 128(SI),DX
- XORL 132(SI),CX
- XORL 136(SI),R8
- XORL 140(SI),R9
- MOVL DX,128(DI)
- MOVL CX,132(DI)
- MOVL R8,136(DI)
- MOVL R9,140(DI)
- MOVD X12,DX
- MOVD X7,CX
- MOVD X10,R8
- MOVD X4,R9
- XORL 192(SI),DX
- XORL 196(SI),CX
- XORL 200(SI),R8
- XORL 204(SI),R9
- MOVL DX,192(DI)
- MOVL CX,196(DI)
- MOVL R8,200(DI)
- MOVL R9,204(DI)
- PADDL 240(R12),X14
- PADDL 64(R12),X0
- PADDL 128(R12),X5
- PADDL 192(R12),X8
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- PSHUFL $0X39,X14,X14
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X5,X5
- PSHUFL $0X39,X8,X8
- XORL 16(SI),DX
- XORL 20(SI),CX
- XORL 24(SI),R8
- XORL 28(SI),R9
- MOVL DX,16(DI)
- MOVL CX,20(DI)
- MOVL R8,24(DI)
- MOVL R9,28(DI)
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- PSHUFL $0X39,X14,X14
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X5,X5
- PSHUFL $0X39,X8,X8
- XORL 80(SI),DX
- XORL 84(SI),CX
- XORL 88(SI),R8
- XORL 92(SI),R9
- MOVL DX,80(DI)
- MOVL CX,84(DI)
- MOVL R8,88(DI)
- MOVL R9,92(DI)
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- PSHUFL $0X39,X14,X14
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X5,X5
- PSHUFL $0X39,X8,X8
- XORL 144(SI),DX
- XORL 148(SI),CX
- XORL 152(SI),R8
- XORL 156(SI),R9
- MOVL DX,144(DI)
- MOVL CX,148(DI)
- MOVL R8,152(DI)
- MOVL R9,156(DI)
- MOVD X14,DX
- MOVD X0,CX
- MOVD X5,R8
- MOVD X8,R9
- XORL 208(SI),DX
- XORL 212(SI),CX
- XORL 216(SI),R8
- XORL 220(SI),R9
- MOVL DX,208(DI)
- MOVL CX,212(DI)
- MOVL R8,216(DI)
- MOVL R9,220(DI)
- PADDL 288(R12),X15
- PADDL 304(R12),X11
- PADDL 80(R12),X1
- PADDL 144(R12),X6
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- PSHUFL $0X39,X15,X15
- PSHUFL $0X39,X11,X11
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X6,X6
- XORL 32(SI),DX
- XORL 36(SI),CX
- XORL 40(SI),R8
- XORL 44(SI),R9
- MOVL DX,32(DI)
- MOVL CX,36(DI)
- MOVL R8,40(DI)
- MOVL R9,44(DI)
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- PSHUFL $0X39,X15,X15
- PSHUFL $0X39,X11,X11
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X6,X6
- XORL 96(SI),DX
- XORL 100(SI),CX
- XORL 104(SI),R8
- XORL 108(SI),R9
- MOVL DX,96(DI)
- MOVL CX,100(DI)
- MOVL R8,104(DI)
- MOVL R9,108(DI)
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- PSHUFL $0X39,X15,X15
- PSHUFL $0X39,X11,X11
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X6,X6
- XORL 160(SI),DX
- XORL 164(SI),CX
- XORL 168(SI),R8
- XORL 172(SI),R9
- MOVL DX,160(DI)
- MOVL CX,164(DI)
- MOVL R8,168(DI)
- MOVL R9,172(DI)
- MOVD X15,DX
- MOVD X11,CX
- MOVD X1,R8
- MOVD X6,R9
- XORL 224(SI),DX
- XORL 228(SI),CX
- XORL 232(SI),R8
- XORL 236(SI),R9
- MOVL DX,224(DI)
- MOVL CX,228(DI)
- MOVL R8,232(DI)
- MOVL R9,236(DI)
- PADDL 160(R12),X13
- PADDL 208(R12),X9
- PADDL 256(R12),X3
- PADDL 96(R12),X2
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- PSHUFL $0X39,X13,X13
- PSHUFL $0X39,X9,X9
- PSHUFL $0X39,X3,X3
- PSHUFL $0X39,X2,X2
- XORL 48(SI),DX
- XORL 52(SI),CX
- XORL 56(SI),R8
- XORL 60(SI),R9
- MOVL DX,48(DI)
- MOVL CX,52(DI)
- MOVL R8,56(DI)
- MOVL R9,60(DI)
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- PSHUFL $0X39,X13,X13
- PSHUFL $0X39,X9,X9
- PSHUFL $0X39,X3,X3
- PSHUFL $0X39,X2,X2
- XORL 112(SI),DX
- XORL 116(SI),CX
- XORL 120(SI),R8
- XORL 124(SI),R9
- MOVL DX,112(DI)
- MOVL CX,116(DI)
- MOVL R8,120(DI)
- MOVL R9,124(DI)
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- PSHUFL $0X39,X13,X13
- PSHUFL $0X39,X9,X9
- PSHUFL $0X39,X3,X3
- PSHUFL $0X39,X2,X2
- XORL 176(SI),DX
- XORL 180(SI),CX
- XORL 184(SI),R8
- XORL 188(SI),R9
- MOVL DX,176(DI)
- MOVL CX,180(DI)
- MOVL R8,184(DI)
- MOVL R9,188(DI)
- MOVD X13,DX
- MOVD X9,CX
- MOVD X3,R8
- MOVD X2,R9
- XORL 240(SI),DX
- XORL 244(SI),CX
- XORL 248(SI),R8
- XORL 252(SI),R9
- MOVL DX,240(DI)
- MOVL CX,244(DI)
- MOVL R8,248(DI)
- MOVL R9,252(DI)
- MOVQ 352(R12),R9
- SUBQ $256,R9
- ADDQ $256,SI
- ADDQ $256,DI
- CMPQ R9,$256
- JAE BYTESATLEAST256
- CMPQ R9,$0
- JBE DONE
- BYTESBETWEEN1AND255:
- CMPQ R9,$64
- JAE NOCOPY
- MOVQ DI,DX
- LEAQ 360(R12),DI
- MOVQ R9,CX
- REP; MOVSB
- LEAQ 360(R12),DI
- LEAQ 360(R12),SI
- NOCOPY:
- MOVQ R9,352(R12)
- MOVOA 48(R12),X0
- MOVOA 0(R12),X1
- MOVOA 16(R12),X2
- MOVOA 32(R12),X3
- MOVOA X1,X4
- MOVQ $20,CX
- MAINLOOP2:
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X3
- PXOR X6,X3
- PADDL X3,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X3,X3
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X1
- PSHUFL $0X4E,X2,X2
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X1,X1
- PXOR X6,X0
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X1
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X1,X1
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X3
- PSHUFL $0X4E,X2,X2
- PXOR X6,X3
- PADDL X3,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X3,X3
- PXOR X6,X0
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X3
- PXOR X6,X3
- PADDL X3,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X3,X3
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X1
- PSHUFL $0X4E,X2,X2
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X3,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X1,X1
- PXOR X6,X0
- PADDL X0,X4
- MOVOA X0,X5
- MOVOA X4,X6
- PSLLL $7,X4
- PSRLL $25,X6
- PXOR X4,X1
- PXOR X6,X1
- PADDL X1,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $9,X5
- PSRLL $23,X6
- PXOR X5,X2
- PSHUFL $0X93,X1,X1
- PXOR X6,X2
- PADDL X2,X4
- MOVOA X2,X5
- MOVOA X4,X6
- PSLLL $13,X4
- PSRLL $19,X6
- PXOR X4,X3
- PSHUFL $0X4E,X2,X2
- PXOR X6,X3
- SUBQ $4,CX
- PADDL X3,X5
- MOVOA X1,X4
- MOVOA X5,X6
- PSLLL $18,X5
- PXOR X7,X7
- PSRLL $14,X6
- PXOR X5,X0
- PSHUFL $0X39,X3,X3
- PXOR X6,X0
- JA MAINLOOP2
- PADDL 48(R12),X0
- PADDL 0(R12),X1
- PADDL 16(R12),X2
- PADDL 32(R12),X3
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X2,X2
- PSHUFL $0X39,X3,X3
- XORL 0(SI),CX
- XORL 48(SI),R8
- XORL 32(SI),R9
- XORL 16(SI),AX
- MOVL CX,0(DI)
- MOVL R8,48(DI)
- MOVL R9,32(DI)
- MOVL AX,16(DI)
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X2,X2
- PSHUFL $0X39,X3,X3
- XORL 20(SI),CX
- XORL 4(SI),R8
- XORL 52(SI),R9
- XORL 36(SI),AX
- MOVL CX,20(DI)
- MOVL R8,4(DI)
- MOVL R9,52(DI)
- MOVL AX,36(DI)
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- PSHUFL $0X39,X0,X0
- PSHUFL $0X39,X1,X1
- PSHUFL $0X39,X2,X2
- PSHUFL $0X39,X3,X3
- XORL 40(SI),CX
- XORL 24(SI),R8
- XORL 8(SI),R9
- XORL 56(SI),AX
- MOVL CX,40(DI)
- MOVL R8,24(DI)
- MOVL R9,8(DI)
- MOVL AX,56(DI)
- MOVD X0,CX
- MOVD X1,R8
- MOVD X2,R9
- MOVD X3,AX
- XORL 60(SI),CX
- XORL 44(SI),R8
- XORL 28(SI),R9
- XORL 12(SI),AX
- MOVL CX,60(DI)
- MOVL R8,44(DI)
- MOVL R9,28(DI)
- MOVL AX,12(DI)
- MOVQ 352(R12),R9
- MOVL 16(R12),CX
- MOVL 36 (R12),R8
- ADDQ $1,CX
- SHLQ $32,R8
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $32,R8
- MOVL CX,16(R12)
- MOVL R8, 36 (R12)
- CMPQ R9,$64
- JA BYTESATLEAST65
- JAE BYTESATLEAST64
- MOVQ DI,SI
- MOVQ DX,DI
- MOVQ R9,CX
- REP; MOVSB
- BYTESATLEAST64:
- DONE:
- RET
- BYTESATLEAST65:
- SUBQ $64,R9
- ADDQ $64,DI
- ADDQ $64,SI
- JMP BYTESBETWEEN1AND255
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
deleted file mode 100644
index 9448760..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || purego || !gc
-
-package salsa
-
-// XORKeyStream crypts bytes from in to out using the given key and counters.
-// In and out must overlap entirely or not at all. Counter
-// contains the raw salsa20 counter bytes (both nonce and block counter).
-func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
- genericXORKeyStream(out, in, counter, key)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
deleted file mode 100644
index e5cdb9a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package salsa
-
-import "math/bits"
-
-const rounds = 20
-
-// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
-// and 16-byte constant c, and puts the result into 64-byte array out.
-func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
- j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
- j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
- j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
- j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
- j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
- j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
- j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
- j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
- j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
- j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
- j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
- j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
- j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
- j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
- j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
- j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
-
- x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
- x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
-
- for i := 0; i < rounds; i += 2 {
- u := x0 + x12
- x4 ^= bits.RotateLeft32(u, 7)
- u = x4 + x0
- x8 ^= bits.RotateLeft32(u, 9)
- u = x8 + x4
- x12 ^= bits.RotateLeft32(u, 13)
- u = x12 + x8
- x0 ^= bits.RotateLeft32(u, 18)
-
- u = x5 + x1
- x9 ^= bits.RotateLeft32(u, 7)
- u = x9 + x5
- x13 ^= bits.RotateLeft32(u, 9)
- u = x13 + x9
- x1 ^= bits.RotateLeft32(u, 13)
- u = x1 + x13
- x5 ^= bits.RotateLeft32(u, 18)
-
- u = x10 + x6
- x14 ^= bits.RotateLeft32(u, 7)
- u = x14 + x10
- x2 ^= bits.RotateLeft32(u, 9)
- u = x2 + x14
- x6 ^= bits.RotateLeft32(u, 13)
- u = x6 + x2
- x10 ^= bits.RotateLeft32(u, 18)
-
- u = x15 + x11
- x3 ^= bits.RotateLeft32(u, 7)
- u = x3 + x15
- x7 ^= bits.RotateLeft32(u, 9)
- u = x7 + x3
- x11 ^= bits.RotateLeft32(u, 13)
- u = x11 + x7
- x15 ^= bits.RotateLeft32(u, 18)
-
- u = x0 + x3
- x1 ^= bits.RotateLeft32(u, 7)
- u = x1 + x0
- x2 ^= bits.RotateLeft32(u, 9)
- u = x2 + x1
- x3 ^= bits.RotateLeft32(u, 13)
- u = x3 + x2
- x0 ^= bits.RotateLeft32(u, 18)
-
- u = x5 + x4
- x6 ^= bits.RotateLeft32(u, 7)
- u = x6 + x5
- x7 ^= bits.RotateLeft32(u, 9)
- u = x7 + x6
- x4 ^= bits.RotateLeft32(u, 13)
- u = x4 + x7
- x5 ^= bits.RotateLeft32(u, 18)
-
- u = x10 + x9
- x11 ^= bits.RotateLeft32(u, 7)
- u = x11 + x10
- x8 ^= bits.RotateLeft32(u, 9)
- u = x8 + x11
- x9 ^= bits.RotateLeft32(u, 13)
- u = x9 + x8
- x10 ^= bits.RotateLeft32(u, 18)
-
- u = x15 + x14
- x12 ^= bits.RotateLeft32(u, 7)
- u = x12 + x15
- x13 ^= bits.RotateLeft32(u, 9)
- u = x13 + x12
- x14 ^= bits.RotateLeft32(u, 13)
- u = x14 + x13
- x15 ^= bits.RotateLeft32(u, 18)
- }
- x0 += j0
- x1 += j1
- x2 += j2
- x3 += j3
- x4 += j4
- x5 += j5
- x6 += j6
- x7 += j7
- x8 += j8
- x9 += j9
- x10 += j10
- x11 += j11
- x12 += j12
- x13 += j13
- x14 += j14
- x15 += j15
-
- out[0] = byte(x0)
- out[1] = byte(x0 >> 8)
- out[2] = byte(x0 >> 16)
- out[3] = byte(x0 >> 24)
-
- out[4] = byte(x1)
- out[5] = byte(x1 >> 8)
- out[6] = byte(x1 >> 16)
- out[7] = byte(x1 >> 24)
-
- out[8] = byte(x2)
- out[9] = byte(x2 >> 8)
- out[10] = byte(x2 >> 16)
- out[11] = byte(x2 >> 24)
-
- out[12] = byte(x3)
- out[13] = byte(x3 >> 8)
- out[14] = byte(x3 >> 16)
- out[15] = byte(x3 >> 24)
-
- out[16] = byte(x4)
- out[17] = byte(x4 >> 8)
- out[18] = byte(x4 >> 16)
- out[19] = byte(x4 >> 24)
-
- out[20] = byte(x5)
- out[21] = byte(x5 >> 8)
- out[22] = byte(x5 >> 16)
- out[23] = byte(x5 >> 24)
-
- out[24] = byte(x6)
- out[25] = byte(x6 >> 8)
- out[26] = byte(x6 >> 16)
- out[27] = byte(x6 >> 24)
-
- out[28] = byte(x7)
- out[29] = byte(x7 >> 8)
- out[30] = byte(x7 >> 16)
- out[31] = byte(x7 >> 24)
-
- out[32] = byte(x8)
- out[33] = byte(x8 >> 8)
- out[34] = byte(x8 >> 16)
- out[35] = byte(x8 >> 24)
-
- out[36] = byte(x9)
- out[37] = byte(x9 >> 8)
- out[38] = byte(x9 >> 16)
- out[39] = byte(x9 >> 24)
-
- out[40] = byte(x10)
- out[41] = byte(x10 >> 8)
- out[42] = byte(x10 >> 16)
- out[43] = byte(x10 >> 24)
-
- out[44] = byte(x11)
- out[45] = byte(x11 >> 8)
- out[46] = byte(x11 >> 16)
- out[47] = byte(x11 >> 24)
-
- out[48] = byte(x12)
- out[49] = byte(x12 >> 8)
- out[50] = byte(x12 >> 16)
- out[51] = byte(x12 >> 24)
-
- out[52] = byte(x13)
- out[53] = byte(x13 >> 8)
- out[54] = byte(x13 >> 16)
- out[55] = byte(x13 >> 24)
-
- out[56] = byte(x14)
- out[57] = byte(x14 >> 8)
- out[58] = byte(x14 >> 16)
- out[59] = byte(x14 >> 24)
-
- out[60] = byte(x15)
- out[61] = byte(x15 >> 8)
- out[62] = byte(x15 >> 16)
- out[63] = byte(x15 >> 24)
-}
-
-// genericXORKeyStream is the generic implementation of XORKeyStream to be used
-// when no assembly implementation is available.
-func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
- var block [64]byte
- var counterCopy [16]byte
- copy(counterCopy[:], counter[:])
-
- for len(in) >= 64 {
- core(&block, &counterCopy, key, &Sigma)
- for i, x := range block {
- out[i] = in[i] ^ x
- }
- u := uint32(1)
- for i := 8; i < 16; i++ {
- u += uint32(counterCopy[i])
- counterCopy[i] = byte(u)
- u >>= 8
- }
- in = in[64:]
- out = out[64:]
- }
-
- if len(in) > 0 {
- core(&block, &counterCopy, key, &Sigma)
- for i, v := range in {
- out[i] = v ^ block[i]
- }
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/LICENSE b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/LICENSE
deleted file mode 100644
index 6a66aea..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/PATENTS b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/PATENTS
deleted file mode 100644
index 7330990..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
deleted file mode 100644
index 269e173..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-//
-// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
-//
-
-TEXT ·syscall6(SB),NOSPLIT,$0-88
- JMP syscall·syscall6(SB)
-
-TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
- JMP syscall·rawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/byteorder.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/byteorder.go
deleted file mode 100644
index 271055b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/byteorder.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-import (
- "runtime"
-)
-
-// byteOrder is a subset of encoding/binary.ByteOrder.
-type byteOrder interface {
- Uint32([]byte) uint32
- Uint64([]byte) uint64
-}
-
-type littleEndian struct{}
-type bigEndian struct{}
-
-func (littleEndian) Uint32(b []byte) uint32 {
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func (littleEndian) Uint64(b []byte) uint64 {
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-func (bigEndian) Uint32(b []byte) uint32 {
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
-}
-
-func (bigEndian) Uint64(b []byte) uint64 {
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
-}
-
-// hostByteOrder returns littleEndian on little-endian machines and
-// bigEndian on big-endian machines.
-func hostByteOrder() byteOrder {
- switch runtime.GOARCH {
- case "386", "amd64", "amd64p32",
- "alpha",
- "arm", "arm64",
- "loong64",
- "mipsle", "mips64le", "mips64p32le",
- "nios2",
- "ppc64le",
- "riscv", "riscv64",
- "sh":
- return littleEndian{}
- case "armbe", "arm64be",
- "m68k",
- "mips", "mips64", "mips64p32",
- "ppc", "ppc64",
- "s390", "s390x",
- "shbe",
- "sparc", "sparc64":
- return bigEndian{}
- }
- panic("unknown architecture")
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu.go
deleted file mode 100644
index 8fa707a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cpu implements processor feature detection for
-// various CPU architectures.
-package cpu
-
-import (
- "os"
- "strings"
-)
-
-// Initialized reports whether the CPU features were initialized.
-//
-// For some GOOS/GOARCH combinations initialization of the CPU features depends
-// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm
-// Initialized will report false if reading the file fails.
-var Initialized bool
-
-// CacheLinePad is used to pad structs to avoid false sharing.
-type CacheLinePad struct{ _ [cacheLineSize]byte }
-
-// X86 contains the supported CPU features of the
-// current X86/AMD64 platform. If the current platform
-// is not X86/AMD64 then all feature flags are false.
-//
-// X86 is padded to avoid false sharing. Further the HasAVX
-// and HasAVX2 are only set if the OS supports XMM and YMM
-// registers in addition to the CPUID feature bit being set.
-var X86 struct {
- _ CacheLinePad
- HasAES bool // AES hardware implementation (AES NI)
- HasADX bool // Multi-precision add-carry instruction extensions
- HasAVX bool // Advanced vector extension
- HasAVX2 bool // Advanced vector extension 2
- HasAVX512 bool // Advanced vector extension 512
- HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
- HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
- HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
- HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions
- HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
- HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
- HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
- HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add
- HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
- HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
- HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
- HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions
- HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations
- HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions
- HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions
- HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions
- HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
- HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
- HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
- HasAMXTile bool // Advanced Matrix Extension Tile instructions
- HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions
- HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions
- HasBMI1 bool // Bit manipulation instruction set 1
- HasBMI2 bool // Bit manipulation instruction set 2
- HasCX16 bool // Compare and exchange 16 Bytes
- HasERMS bool // Enhanced REP for MOVSB and STOSB
- HasFMA bool // Fused-multiply-add instructions
- HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
- HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
- HasPOPCNT bool // Hamming weight instruction POPCNT.
- HasRDRAND bool // RDRAND instruction (on-chip random number generator)
- HasRDSEED bool // RDSEED instruction (on-chip random number generator)
- HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
- HasSSE3 bool // Streaming SIMD extension 3
- HasSSSE3 bool // Supplemental streaming SIMD extension 3
- HasSSE41 bool // Streaming SIMD extension 4 and 4.1
- HasSSE42 bool // Streaming SIMD extension 4 and 4.2
- _ CacheLinePad
-}
-
-// ARM64 contains the supported CPU features of the
-// current ARMv8(aarch64) platform. If the current platform
-// is not arm64 then all feature flags are false.
-var ARM64 struct {
- _ CacheLinePad
- HasFP bool // Floating-point instruction set (always available)
- HasASIMD bool // Advanced SIMD (always available)
- HasEVTSTRM bool // Event stream support
- HasAES bool // AES hardware implementation
- HasPMULL bool // Polynomial multiplication instruction set
- HasSHA1 bool // SHA1 hardware implementation
- HasSHA2 bool // SHA2 hardware implementation
- HasCRC32 bool // CRC32 hardware implementation
- HasATOMICS bool // Atomic memory operation instruction set
- HasFPHP bool // Half precision floating-point instruction set
- HasASIMDHP bool // Advanced SIMD half precision instruction set
- HasCPUID bool // CPUID identification scheme registers
- HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
- HasJSCVT bool // Javascript conversion from floating-point to integer
- HasFCMA bool // Floating-point multiplication and addition of complex numbers
- HasLRCPC bool // Release Consistent processor consistent support
- HasDCPOP bool // Persistent memory support
- HasSHA3 bool // SHA3 hardware implementation
- HasSM3 bool // SM3 hardware implementation
- HasSM4 bool // SM4 hardware implementation
- HasASIMDDP bool // Advanced SIMD double precision instruction set
- HasSHA512 bool // SHA512 hardware implementation
- HasSVE bool // Scalable Vector Extensions
- HasSVE2 bool // Scalable Vector Extensions 2
- HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
- _ CacheLinePad
-}
-
-// ARM contains the supported CPU features of the current ARM (32-bit) platform.
-// All feature flags are false if:
-// 1. the current platform is not arm, or
-// 2. the current operating system is not Linux.
-var ARM struct {
- _ CacheLinePad
- HasSWP bool // SWP instruction support
- HasHALF bool // Half-word load and store support
- HasTHUMB bool // ARM Thumb instruction set
- Has26BIT bool // Address space limited to 26-bits
- HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support
- HasFPA bool // Floating point arithmetic support
- HasVFP bool // Vector floating point support
- HasEDSP bool // DSP Extensions support
- HasJAVA bool // Java instruction set
- HasIWMMXT bool // Intel Wireless MMX technology support
- HasCRUNCH bool // MaverickCrunch context switching and handling
- HasTHUMBEE bool // Thumb EE instruction set
- HasNEON bool // NEON instruction set
- HasVFPv3 bool // Vector floating point version 3 support
- HasVFPv3D16 bool // Vector floating point version 3 D8-D15
- HasTLS bool // Thread local storage support
- HasVFPv4 bool // Vector floating point version 4 support
- HasIDIVA bool // Integer divide instruction support in ARM mode
- HasIDIVT bool // Integer divide instruction support in Thumb mode
- HasVFPD32 bool // Vector floating point version 3 D15-D31
- HasLPAE bool // Large Physical Address Extensions
- HasEVTSTRM bool // Event stream support
- HasAES bool // AES hardware implementation
- HasPMULL bool // Polynomial multiplication instruction set
- HasSHA1 bool // SHA1 hardware implementation
- HasSHA2 bool // SHA2 hardware implementation
- HasCRC32 bool // CRC32 hardware implementation
- _ CacheLinePad
-}
-
-// MIPS64X contains the supported CPU features of the current mips64/mips64le
-// platforms. If the current platform is not mips64/mips64le or the current
-// operating system is not Linux then all feature flags are false.
-var MIPS64X struct {
- _ CacheLinePad
- HasMSA bool // MIPS SIMD architecture
- _ CacheLinePad
-}
-
-// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
-// If the current platform is not ppc64/ppc64le then all feature flags are false.
-//
-// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
-// since there are no optional categories. There are some exceptions that also
-// require kernel support to work (DARN, SCV), so there are feature bits for
-// those as well. The struct is padded to avoid false sharing.
-var PPC64 struct {
- _ CacheLinePad
- HasDARN bool // Hardware random number generator (requires kernel enablement)
- HasSCV bool // Syscall vectored (requires kernel enablement)
- IsPOWER8 bool // ISA v2.07 (POWER8)
- IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8
- _ CacheLinePad
-}
-
-// S390X contains the supported CPU features of the current IBM Z
-// (s390x) platform. If the current platform is not IBM Z then all
-// feature flags are false.
-//
-// S390X is padded to avoid false sharing. Further HasVX is only set
-// if the OS supports vector registers in addition to the STFLE
-// feature bit being set.
-var S390X struct {
- _ CacheLinePad
- HasZARCH bool // z/Architecture mode is active [mandatory]
- HasSTFLE bool // store facility list extended
- HasLDISP bool // long (20-bit) displacements
- HasEIMM bool // 32-bit immediates
- HasDFP bool // decimal floating point
- HasETF3EH bool // ETF-3 enhanced
- HasMSA bool // message security assist (CPACF)
- HasAES bool // KM-AES{128,192,256} functions
- HasAESCBC bool // KMC-AES{128,192,256} functions
- HasAESCTR bool // KMCTR-AES{128,192,256} functions
- HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
- HasGHASH bool // KIMD-GHASH function
- HasSHA1 bool // K{I,L}MD-SHA-1 functions
- HasSHA256 bool // K{I,L}MD-SHA-256 functions
- HasSHA512 bool // K{I,L}MD-SHA-512 functions
- HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
- HasVX bool // vector facility
- HasVXE bool // vector-enhancements facility 1
- _ CacheLinePad
-}
-
-func init() {
- archInit()
- initOptions()
- processOptions()
-}
-
-// options contains the cpu debug options that can be used in GODEBUG.
-// Options are arch dependent and are added by the arch specific initOptions functions.
-// Features that are mandatory for the specific GOARCH should have the Required field set
-// (e.g. SSE2 on amd64).
-var options []option
-
-// Option names should be lower case. e.g. avx instead of AVX.
-type option struct {
- Name string
- Feature *bool
- Specified bool // whether feature value was specified in GODEBUG
- Enable bool // whether feature should be enabled
- Required bool // whether feature is mandatory and can not be disabled
-}
-
-func processOptions() {
- env := os.Getenv("GODEBUG")
-field:
- for env != "" {
- field := ""
- i := strings.IndexByte(env, ',')
- if i < 0 {
- field, env = env, ""
- } else {
- field, env = env[:i], env[i+1:]
- }
- if len(field) < 4 || field[:4] != "cpu." {
- continue
- }
- i = strings.IndexByte(field, '=')
- if i < 0 {
- print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
- continue
- }
- key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
-
- var enable bool
- switch value {
- case "on":
- enable = true
- case "off":
- enable = false
- default:
- print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
- continue field
- }
-
- if key == "all" {
- for i := range options {
- options[i].Specified = true
- options[i].Enable = enable || options[i].Required
- }
- continue field
- }
-
- for i := range options {
- if options[i].Name == key {
- options[i].Specified = true
- options[i].Enable = enable
- continue field
- }
- }
-
- print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
- }
-
- for _, o := range options {
- if !o.Specified {
- continue
- }
-
- if o.Enable && !*o.Feature {
- print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
- continue
- }
-
- if !o.Enable && o.Required {
- print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
- continue
- }
-
- *o.Feature = o.Enable
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_aix.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_aix.go
deleted file mode 100644
index 9bf0c32..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_aix.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix
-
-package cpu
-
-const (
- // getsystemcfg constants
- _SC_IMPL = 2
- _IMPL_POWER8 = 0x10000
- _IMPL_POWER9 = 0x20000
-)
-
-func archInit() {
- impl := getsystemcfg(_SC_IMPL)
- if impl&_IMPL_POWER8 != 0 {
- PPC64.IsPOWER8 = true
- }
- if impl&_IMPL_POWER9 != 0 {
- PPC64.IsPOWER8 = true
- PPC64.IsPOWER9 = true
- }
-
- Initialized = true
-}
-
-func getsystemcfg(label int) (n uint64) {
- r0, _ := callgetsystemcfg(label)
- n = uint64(r0)
- return
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm.go
deleted file mode 100644
index 301b752..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-const cacheLineSize = 32
-
-// HWCAP/HWCAP2 bits.
-// These are specific to Linux.
-const (
- hwcap_SWP = 1 << 0
- hwcap_HALF = 1 << 1
- hwcap_THUMB = 1 << 2
- hwcap_26BIT = 1 << 3
- hwcap_FAST_MULT = 1 << 4
- hwcap_FPA = 1 << 5
- hwcap_VFP = 1 << 6
- hwcap_EDSP = 1 << 7
- hwcap_JAVA = 1 << 8
- hwcap_IWMMXT = 1 << 9
- hwcap_CRUNCH = 1 << 10
- hwcap_THUMBEE = 1 << 11
- hwcap_NEON = 1 << 12
- hwcap_VFPv3 = 1 << 13
- hwcap_VFPv3D16 = 1 << 14
- hwcap_TLS = 1 << 15
- hwcap_VFPv4 = 1 << 16
- hwcap_IDIVA = 1 << 17
- hwcap_IDIVT = 1 << 18
- hwcap_VFPD32 = 1 << 19
- hwcap_LPAE = 1 << 20
- hwcap_EVTSTRM = 1 << 21
-
- hwcap2_AES = 1 << 0
- hwcap2_PMULL = 1 << 1
- hwcap2_SHA1 = 1 << 2
- hwcap2_SHA2 = 1 << 3
- hwcap2_CRC32 = 1 << 4
-)
-
-func initOptions() {
- options = []option{
- {Name: "pmull", Feature: &ARM.HasPMULL},
- {Name: "sha1", Feature: &ARM.HasSHA1},
- {Name: "sha2", Feature: &ARM.HasSHA2},
- {Name: "swp", Feature: &ARM.HasSWP},
- {Name: "thumb", Feature: &ARM.HasTHUMB},
- {Name: "thumbee", Feature: &ARM.HasTHUMBEE},
- {Name: "tls", Feature: &ARM.HasTLS},
- {Name: "vfp", Feature: &ARM.HasVFP},
- {Name: "vfpd32", Feature: &ARM.HasVFPD32},
- {Name: "vfpv3", Feature: &ARM.HasVFPv3},
- {Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16},
- {Name: "vfpv4", Feature: &ARM.HasVFPv4},
- {Name: "half", Feature: &ARM.HasHALF},
- {Name: "26bit", Feature: &ARM.Has26BIT},
- {Name: "fastmul", Feature: &ARM.HasFASTMUL},
- {Name: "fpa", Feature: &ARM.HasFPA},
- {Name: "edsp", Feature: &ARM.HasEDSP},
- {Name: "java", Feature: &ARM.HasJAVA},
- {Name: "iwmmxt", Feature: &ARM.HasIWMMXT},
- {Name: "crunch", Feature: &ARM.HasCRUNCH},
- {Name: "neon", Feature: &ARM.HasNEON},
- {Name: "idivt", Feature: &ARM.HasIDIVT},
- {Name: "idiva", Feature: &ARM.HasIDIVA},
- {Name: "lpae", Feature: &ARM.HasLPAE},
- {Name: "evtstrm", Feature: &ARM.HasEVTSTRM},
- {Name: "aes", Feature: &ARM.HasAES},
- {Name: "crc32", Feature: &ARM.HasCRC32},
- }
-
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm64.go
deleted file mode 100644
index 0e27a21..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm64.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-import "runtime"
-
-// cacheLineSize is used to prevent false sharing of cache lines.
-// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size.
-// It doesn't cost much and is much more future-proof.
-const cacheLineSize = 128
-
-func initOptions() {
- options = []option{
- {Name: "fp", Feature: &ARM64.HasFP},
- {Name: "asimd", Feature: &ARM64.HasASIMD},
- {Name: "evstrm", Feature: &ARM64.HasEVTSTRM},
- {Name: "aes", Feature: &ARM64.HasAES},
- {Name: "fphp", Feature: &ARM64.HasFPHP},
- {Name: "jscvt", Feature: &ARM64.HasJSCVT},
- {Name: "lrcpc", Feature: &ARM64.HasLRCPC},
- {Name: "pmull", Feature: &ARM64.HasPMULL},
- {Name: "sha1", Feature: &ARM64.HasSHA1},
- {Name: "sha2", Feature: &ARM64.HasSHA2},
- {Name: "sha3", Feature: &ARM64.HasSHA3},
- {Name: "sha512", Feature: &ARM64.HasSHA512},
- {Name: "sm3", Feature: &ARM64.HasSM3},
- {Name: "sm4", Feature: &ARM64.HasSM4},
- {Name: "sve", Feature: &ARM64.HasSVE},
- {Name: "sve2", Feature: &ARM64.HasSVE2},
- {Name: "crc32", Feature: &ARM64.HasCRC32},
- {Name: "atomics", Feature: &ARM64.HasATOMICS},
- {Name: "asimdhp", Feature: &ARM64.HasASIMDHP},
- {Name: "cpuid", Feature: &ARM64.HasCPUID},
- {Name: "asimrdm", Feature: &ARM64.HasASIMDRDM},
- {Name: "fcma", Feature: &ARM64.HasFCMA},
- {Name: "dcpop", Feature: &ARM64.HasDCPOP},
- {Name: "asimddp", Feature: &ARM64.HasASIMDDP},
- {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
- }
-}
-
-func archInit() {
- switch runtime.GOOS {
- case "freebsd":
- readARM64Registers()
- case "linux", "netbsd", "openbsd":
- doinit()
- default:
- // Many platforms don't seem to allow reading these registers.
- setMinimalFeatures()
- }
-}
-
-// setMinimalFeatures fakes the minimal ARM64 features expected by
-// TestARM64minimalFeatures.
-func setMinimalFeatures() {
- ARM64.HasASIMD = true
- ARM64.HasFP = true
-}
-
-func readARM64Registers() {
- Initialized = true
-
- parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0())
-}
-
-func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
- // ID_AA64ISAR0_EL1
- switch extractBits(isar0, 4, 7) {
- case 1:
- ARM64.HasAES = true
- case 2:
- ARM64.HasAES = true
- ARM64.HasPMULL = true
- }
-
- switch extractBits(isar0, 8, 11) {
- case 1:
- ARM64.HasSHA1 = true
- }
-
- switch extractBits(isar0, 12, 15) {
- case 1:
- ARM64.HasSHA2 = true
- case 2:
- ARM64.HasSHA2 = true
- ARM64.HasSHA512 = true
- }
-
- switch extractBits(isar0, 16, 19) {
- case 1:
- ARM64.HasCRC32 = true
- }
-
- switch extractBits(isar0, 20, 23) {
- case 2:
- ARM64.HasATOMICS = true
- }
-
- switch extractBits(isar0, 28, 31) {
- case 1:
- ARM64.HasASIMDRDM = true
- }
-
- switch extractBits(isar0, 32, 35) {
- case 1:
- ARM64.HasSHA3 = true
- }
-
- switch extractBits(isar0, 36, 39) {
- case 1:
- ARM64.HasSM3 = true
- }
-
- switch extractBits(isar0, 40, 43) {
- case 1:
- ARM64.HasSM4 = true
- }
-
- switch extractBits(isar0, 44, 47) {
- case 1:
- ARM64.HasASIMDDP = true
- }
-
- // ID_AA64ISAR1_EL1
- switch extractBits(isar1, 0, 3) {
- case 1:
- ARM64.HasDCPOP = true
- }
-
- switch extractBits(isar1, 12, 15) {
- case 1:
- ARM64.HasJSCVT = true
- }
-
- switch extractBits(isar1, 16, 19) {
- case 1:
- ARM64.HasFCMA = true
- }
-
- switch extractBits(isar1, 20, 23) {
- case 1:
- ARM64.HasLRCPC = true
- }
-
- // ID_AA64PFR0_EL1
- switch extractBits(pfr0, 16, 19) {
- case 0:
- ARM64.HasFP = true
- case 1:
- ARM64.HasFP = true
- ARM64.HasFPHP = true
- }
-
- switch extractBits(pfr0, 20, 23) {
- case 0:
- ARM64.HasASIMD = true
- case 1:
- ARM64.HasASIMD = true
- ARM64.HasASIMDHP = true
- }
-
- switch extractBits(pfr0, 32, 35) {
- case 1:
- ARM64.HasSVE = true
-
- parseARM64SVERegister(getzfr0())
- }
-}
-
-func parseARM64SVERegister(zfr0 uint64) {
- switch extractBits(zfr0, 0, 3) {
- case 1:
- ARM64.HasSVE2 = true
- }
-}
-
-func extractBits(data uint64, start, end uint) uint {
- return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm64.s
deleted file mode 100644
index 22cc998..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_arm64.s
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-// func getisar0() uint64
-TEXT ·getisar0(SB),NOSPLIT,$0-8
- // get Instruction Set Attributes 0 into x0
- // mrs x0, ID_AA64ISAR0_EL1 = d5380600
- WORD $0xd5380600
- MOVD R0, ret+0(FP)
- RET
-
-// func getisar1() uint64
-TEXT ·getisar1(SB),NOSPLIT,$0-8
- // get Instruction Set Attributes 1 into x0
- // mrs x0, ID_AA64ISAR1_EL1 = d5380620
- WORD $0xd5380620
- MOVD R0, ret+0(FP)
- RET
-
-// func getpfr0() uint64
-TEXT ·getpfr0(SB),NOSPLIT,$0-8
- // get Processor Feature Register 0 into x0
- // mrs x0, ID_AA64PFR0_EL1 = d5380400
- WORD $0xd5380400
- MOVD R0, ret+0(FP)
- RET
-
-// func getzfr0() uint64
-TEXT ·getzfr0(SB),NOSPLIT,$0-8
- // get SVE Feature Register 0 into x0
- // mrs x0, ID_AA64ZFR0_EL1 = d5380480
- WORD $0xd5380480
- MOVD R0, ret+0(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
deleted file mode 100644
index 6ac6e1e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-package cpu
-
-func getisar0() uint64
-func getisar1() uint64
-func getpfr0() uint64
-func getzfr0() uint64
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
deleted file mode 100644
index c8ae6dd..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-package cpu
-
-// haveAsmFunctions reports whether the other functions in this file can
-// be safely called.
-func haveAsmFunctions() bool { return true }
-
-// The following feature detection functions are defined in cpu_s390x.s.
-// They are likely to be expensive to call so the results should be cached.
-func stfle() facilityList
-func kmQuery() queryResult
-func kmcQuery() queryResult
-func kmctrQuery() queryResult
-func kmaQuery() queryResult
-func kimdQuery() queryResult
-func klmdQuery() queryResult
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
deleted file mode 100644
index 910728f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (386 || amd64 || amd64p32) && gc
-
-package cpu
-
-// cpuid is implemented in cpu_x86.s for gc compiler
-// and in cpu_gccgo.c for gccgo.
-func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
-
-// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
-// and in cpu_gccgo.c for gccgo.
-func xgetbv() (eax, edx uint32)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
deleted file mode 100644
index 7f19467..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo
-
-package cpu
-
-func getisar0() uint64 { return 0 }
-func getisar1() uint64 { return 0 }
-func getpfr0() uint64 { return 0 }
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
deleted file mode 100644
index 9526d2c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo
-
-package cpu
-
-// haveAsmFunctions reports whether the other functions in this file can
-// be safely called.
-func haveAsmFunctions() bool { return false }
-
-// TODO(mundaym): the following feature detection functions are currently
-// stubs. See https://golang.org/cl/162887 for how to fix this.
-// They are likely to be expensive to call so the results should be cached.
-func stfle() facilityList { panic("not implemented for gccgo") }
-func kmQuery() queryResult { panic("not implemented for gccgo") }
-func kmcQuery() queryResult { panic("not implemented for gccgo") }
-func kmctrQuery() queryResult { panic("not implemented for gccgo") }
-func kmaQuery() queryResult { panic("not implemented for gccgo") }
-func kimdQuery() queryResult { panic("not implemented for gccgo") }
-func klmdQuery() queryResult { panic("not implemented for gccgo") }
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
deleted file mode 100644
index 3f73a05..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (386 || amd64 || amd64p32) && gccgo
-
-#include
-#include
-#include
-
-// Need to wrap __get_cpuid_count because it's declared as static.
-int
-gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf,
- uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
-{
- return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
-}
-
-#pragma GCC diagnostic ignored "-Wunknown-pragmas"
-#pragma GCC push_options
-#pragma GCC target("xsave")
-#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function)
-
-// xgetbv reads the contents of an XCR (Extended Control Register)
-// specified in the ECX register into registers EDX:EAX.
-// Currently, the only supported value for XCR is 0.
-void
-gccgoXgetbv(uint32_t *eax, uint32_t *edx)
-{
- uint64_t v = _xgetbv(0);
- *eax = v & 0xffffffff;
- *edx = v >> 32;
-}
-
-#pragma clang attribute pop
-#pragma GCC pop_options
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
deleted file mode 100644
index 99c60fe..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (386 || amd64 || amd64p32) && gccgo
-
-package cpu
-
-//extern gccgoGetCpuidCount
-func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32)
-
-func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) {
- var a, b, c, d uint32
- gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d)
- return a, b, c, d
-}
-
-//extern gccgoXgetbv
-func gccgoXgetbv(eax, edx *uint32)
-
-func xgetbv() (eax, edx uint32) {
- var a, d uint32
- gccgoXgetbv(&a, &d)
- return a, d
-}
-
-// gccgo doesn't build on Darwin, per:
-// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76
-func darwinSupportsAVX512() bool {
- return false
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux.go
deleted file mode 100644
index 743eb54..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !386 && !amd64 && !amd64p32 && !arm64
-
-package cpu
-
-func archInit() {
- if err := readHWCAP(); err != nil {
- return
- }
- doinit()
- Initialized = true
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
deleted file mode 100644
index 2057006..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-func doinit() {
- ARM.HasSWP = isSet(hwCap, hwcap_SWP)
- ARM.HasHALF = isSet(hwCap, hwcap_HALF)
- ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB)
- ARM.Has26BIT = isSet(hwCap, hwcap_26BIT)
- ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT)
- ARM.HasFPA = isSet(hwCap, hwcap_FPA)
- ARM.HasVFP = isSet(hwCap, hwcap_VFP)
- ARM.HasEDSP = isSet(hwCap, hwcap_EDSP)
- ARM.HasJAVA = isSet(hwCap, hwcap_JAVA)
- ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT)
- ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH)
- ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE)
- ARM.HasNEON = isSet(hwCap, hwcap_NEON)
- ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3)
- ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16)
- ARM.HasTLS = isSet(hwCap, hwcap_TLS)
- ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4)
- ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA)
- ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT)
- ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32)
- ARM.HasLPAE = isSet(hwCap, hwcap_LPAE)
- ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
- ARM.HasAES = isSet(hwCap2, hwcap2_AES)
- ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL)
- ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1)
- ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2)
- ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32)
-}
-
-func isSet(hwc uint, value uint) bool {
- return hwc&value != 0
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
deleted file mode 100644
index 3d386d0..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-import (
- "strings"
- "syscall"
-)
-
-// HWCAP/HWCAP2 bits. These are exposed by Linux.
-const (
- hwcap_FP = 1 << 0
- hwcap_ASIMD = 1 << 1
- hwcap_EVTSTRM = 1 << 2
- hwcap_AES = 1 << 3
- hwcap_PMULL = 1 << 4
- hwcap_SHA1 = 1 << 5
- hwcap_SHA2 = 1 << 6
- hwcap_CRC32 = 1 << 7
- hwcap_ATOMICS = 1 << 8
- hwcap_FPHP = 1 << 9
- hwcap_ASIMDHP = 1 << 10
- hwcap_CPUID = 1 << 11
- hwcap_ASIMDRDM = 1 << 12
- hwcap_JSCVT = 1 << 13
- hwcap_FCMA = 1 << 14
- hwcap_LRCPC = 1 << 15
- hwcap_DCPOP = 1 << 16
- hwcap_SHA3 = 1 << 17
- hwcap_SM3 = 1 << 18
- hwcap_SM4 = 1 << 19
- hwcap_ASIMDDP = 1 << 20
- hwcap_SHA512 = 1 << 21
- hwcap_SVE = 1 << 22
- hwcap_ASIMDFHM = 1 << 23
-
- hwcap2_SVE2 = 1 << 1
-)
-
-// linuxKernelCanEmulateCPUID reports whether we're running
-// on Linux 4.11+. Ideally we'd like to ask the question about
-// whether the current kernel contains
-// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2
-// but the version number will have to do.
-func linuxKernelCanEmulateCPUID() bool {
- var un syscall.Utsname
- syscall.Uname(&un)
- var sb strings.Builder
- for _, b := range un.Release[:] {
- if b == 0 {
- break
- }
- sb.WriteByte(byte(b))
- }
- major, minor, _, ok := parseRelease(sb.String())
- return ok && (major > 4 || major == 4 && minor >= 11)
-}
-
-func doinit() {
- if err := readHWCAP(); err != nil {
- // We failed to read /proc/self/auxv. This can happen if the binary has
- // been given extra capabilities(7) with /bin/setcap.
- //
- // When this happens, we have two options. If the Linux kernel is new
- // enough (4.11+), we can read the arm64 registers directly which'll
- // trap into the kernel and then return back to userspace.
- //
- // But on older kernels, such as Linux 4.4.180 as used on many Synology
- // devices, calling readARM64Registers (specifically getisar0) will
- // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo
- // instead.
- //
- // See golang/go#57336.
- if linuxKernelCanEmulateCPUID() {
- readARM64Registers()
- } else {
- readLinuxProcCPUInfo()
- }
- return
- }
-
- // HWCAP feature bits
- ARM64.HasFP = isSet(hwCap, hwcap_FP)
- ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD)
- ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
- ARM64.HasAES = isSet(hwCap, hwcap_AES)
- ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL)
- ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1)
- ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2)
- ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32)
- ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS)
- ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP)
- ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP)
- ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID)
- ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM)
- ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT)
- ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA)
- ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC)
- ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP)
- ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3)
- ARM64.HasSM3 = isSet(hwCap, hwcap_SM3)
- ARM64.HasSM4 = isSet(hwCap, hwcap_SM4)
- ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP)
- ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
- ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
- ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
-
- // HWCAP2 feature bits
- ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2)
-}
-
-func isSet(hwc uint, value uint) bool {
- return hwc&value != 0
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
deleted file mode 100644
index 4686c1d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (mips64 || mips64le)
-
-package cpu
-
-// HWCAP bits. These are exposed by the Linux kernel 5.4.
-const (
- // CPU features
- hwcap_MIPS_MSA = 1 << 1
-)
-
-func doinit() {
- // HWCAP feature bits
- MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA)
-}
-
-func isSet(hwc uint, value uint) bool {
- return hwc&value != 0
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
deleted file mode 100644
index cd63e73..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
-
-package cpu
-
-func doinit() {}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
deleted file mode 100644
index 197188e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (ppc64 || ppc64le)
-
-package cpu
-
-// HWCAP/HWCAP2 bits. These are exposed by the kernel.
-const (
- // ISA Level
- _PPC_FEATURE2_ARCH_2_07 = 0x80000000
- _PPC_FEATURE2_ARCH_3_00 = 0x00800000
-
- // CPU features
- _PPC_FEATURE2_DARN = 0x00200000
- _PPC_FEATURE2_SCV = 0x00100000
-)
-
-func doinit() {
- // HWCAP2 feature bits
- PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07)
- PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00)
- PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN)
- PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV)
-}
-
-func isSet(hwc uint, value uint) bool {
- return hwc&value != 0
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
deleted file mode 100644
index 1517ac6..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-const (
- // bit mask values from /usr/include/bits/hwcap.h
- hwcap_ZARCH = 2
- hwcap_STFLE = 4
- hwcap_MSA = 8
- hwcap_LDISP = 16
- hwcap_EIMM = 32
- hwcap_DFP = 64
- hwcap_ETF3EH = 256
- hwcap_VX = 2048
- hwcap_VXE = 8192
-)
-
-func initS390Xbase() {
- // test HWCAP bit vector
- has := func(featureMask uint) bool {
- return hwCap&featureMask == featureMask
- }
-
- // mandatory
- S390X.HasZARCH = has(hwcap_ZARCH)
-
- // optional
- S390X.HasSTFLE = has(hwcap_STFLE)
- S390X.HasLDISP = has(hwcap_LDISP)
- S390X.HasEIMM = has(hwcap_EIMM)
- S390X.HasETF3EH = has(hwcap_ETF3EH)
- S390X.HasDFP = has(hwcap_DFP)
- S390X.HasMSA = has(hwcap_MSA)
- S390X.HasVX = has(hwcap_VX)
- if S390X.HasVX {
- S390X.HasVXE = has(hwcap_VXE)
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_loong64.go
deleted file mode 100644
index 5586358..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_loong64.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build loong64
-
-package cpu
-
-const cacheLineSize = 64
-
-func initOptions() {
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
deleted file mode 100644
index fedb00c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_mips64x.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build mips64 || mips64le
-
-package cpu
-
-const cacheLineSize = 32
-
-func initOptions() {
- options = []option{
- {Name: "msa", Feature: &MIPS64X.HasMSA},
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
deleted file mode 100644
index ffb4ec7..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_mipsx.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build mips || mipsle
-
-package cpu
-
-const cacheLineSize = 32
-
-func initOptions() {}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
deleted file mode 100644
index ebfb3fc..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-import (
- "syscall"
- "unsafe"
-)
-
-// Minimal copy of functionality from x/sys/unix so the cpu package can call
-// sysctl without depending on x/sys/unix.
-
-const (
- _CTL_QUERY = -2
-
- _SYSCTL_VERS_1 = 0x1000000
-)
-
-var _zero uintptr
-
-func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
- var _p0 unsafe.Pointer
- if len(mib) > 0 {
- _p0 = unsafe.Pointer(&mib[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- _, _, errno := syscall.Syscall6(
- syscall.SYS___SYSCTL,
- uintptr(_p0),
- uintptr(len(mib)),
- uintptr(unsafe.Pointer(old)),
- uintptr(unsafe.Pointer(oldlen)),
- uintptr(unsafe.Pointer(new)),
- uintptr(newlen))
- if errno != 0 {
- return errno
- }
- return nil
-}
-
-type sysctlNode struct {
- Flags uint32
- Num int32
- Name [32]int8
- Ver uint32
- __rsvd uint32
- Un [16]byte
- _sysctl_size [8]byte
- _sysctl_func [8]byte
- _sysctl_parent [8]byte
- _sysctl_desc [8]byte
-}
-
-func sysctlNodes(mib []int32) ([]sysctlNode, error) {
- var olen uintptr
-
- // Get a list of all sysctl nodes below the given MIB by performing
- // a sysctl for the given MIB with CTL_QUERY appended.
- mib = append(mib, _CTL_QUERY)
- qnode := sysctlNode{Flags: _SYSCTL_VERS_1}
- qp := (*byte)(unsafe.Pointer(&qnode))
- sz := unsafe.Sizeof(qnode)
- if err := sysctl(mib, nil, &olen, qp, sz); err != nil {
- return nil, err
- }
-
- // Now that we know the size, get the actual nodes.
- nodes := make([]sysctlNode, olen/sz)
- np := (*byte)(unsafe.Pointer(&nodes[0]))
- if err := sysctl(mib, np, &olen, qp, sz); err != nil {
- return nil, err
- }
-
- return nodes, nil
-}
-
-func nametomib(name string) ([]int32, error) {
- // Split name into components.
- var parts []string
- last := 0
- for i := 0; i < len(name); i++ {
- if name[i] == '.' {
- parts = append(parts, name[last:i])
- last = i + 1
- }
- }
- parts = append(parts, name[last:])
-
- mib := []int32{}
- // Discover the nodes and construct the MIB OID.
- for partno, part := range parts {
- nodes, err := sysctlNodes(mib)
- if err != nil {
- return nil, err
- }
- for _, node := range nodes {
- n := make([]byte, 0)
- for i := range node.Name {
- if node.Name[i] != 0 {
- n = append(n, byte(node.Name[i]))
- }
- }
- if string(n) == part {
- mib = append(mib, int32(node.Num))
- break
- }
- }
- if len(mib) != partno+1 {
- return nil, err
- }
- }
-
- return mib, nil
-}
-
-// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's
-type aarch64SysctlCPUID struct {
- midr uint64 /* Main ID Register */
- revidr uint64 /* Revision ID Register */
- mpidr uint64 /* Multiprocessor Affinity Register */
- aa64dfr0 uint64 /* A64 Debug Feature Register 0 */
- aa64dfr1 uint64 /* A64 Debug Feature Register 1 */
- aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */
- aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */
- aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */
- aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */
- aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */
- aa64pfr0 uint64 /* A64 Processor Feature Register 0 */
- aa64pfr1 uint64 /* A64 Processor Feature Register 1 */
- aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */
- mvfr0 uint32 /* Media and VFP Feature Register 0 */
- mvfr1 uint32 /* Media and VFP Feature Register 1 */
- mvfr2 uint32 /* Media and VFP Feature Register 2 */
- pad uint32
- clidr uint64 /* Cache Level ID Register */
- ctr uint64 /* Cache Type Register */
-}
-
-func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) {
- mib, err := nametomib(name)
- if err != nil {
- return nil, err
- }
-
- out := aarch64SysctlCPUID{}
- n := unsafe.Sizeof(out)
- _, _, errno := syscall.Syscall6(
- syscall.SYS___SYSCTL,
- uintptr(unsafe.Pointer(&mib[0])),
- uintptr(len(mib)),
- uintptr(unsafe.Pointer(&out)),
- uintptr(unsafe.Pointer(&n)),
- uintptr(0),
- uintptr(0))
- if errno != 0 {
- return nil, errno
- }
- return &out, nil
-}
-
-func doinit() {
- cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id")
- if err != nil {
- setMinimalFeatures()
- return
- }
- parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0)
-
- Initialized = true
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go
deleted file mode 100644
index 85b64d5..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-import (
- "syscall"
- "unsafe"
-)
-
-// Minimal copy of functionality from x/sys/unix so the cpu package can call
-// sysctl without depending on x/sys/unix.
-
-const (
- // From OpenBSD's sys/sysctl.h.
- _CTL_MACHDEP = 7
-
- // From OpenBSD's machine/cpu.h.
- _CPU_ID_AA64ISAR0 = 2
- _CPU_ID_AA64ISAR1 = 3
-)
-
-// Implemented in the runtime package (runtime/sys_openbsd3.go)
-func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
-
-//go:linkname syscall_syscall6 syscall.syscall6
-
-func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
- _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
- if errno != 0 {
- return errno
- }
- return nil
-}
-
-var libc_sysctl_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
-
-func sysctlUint64(mib []uint32) (uint64, bool) {
- var out uint64
- nout := unsafe.Sizeof(out)
- if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil {
- return 0, false
- }
- return out, true
-}
-
-func doinit() {
- setMinimalFeatures()
-
- // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl.
- isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0})
- if !ok {
- return
- }
- isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1})
- if !ok {
- return
- }
- parseARM64SystemRegisters(isar0, isar1, 0)
-
- Initialized = true
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s
deleted file mode 100644
index 054ba05..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_sysctl(SB)
-
-GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
-DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
deleted file mode 100644
index e9ecf2a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_arm.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !linux && arm
-
-package cpu
-
-func archInit() {}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
deleted file mode 100644
index 5341e7f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !linux && !netbsd && !openbsd && arm64
-
-package cpu
-
-func doinit() {}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
deleted file mode 100644
index 5f8f241..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !linux && (mips64 || mips64le)
-
-package cpu
-
-func archInit() {
- Initialized = true
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
deleted file mode 100644
index 89608fb..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !aix && !linux && (ppc64 || ppc64le)
-
-package cpu
-
-func archInit() {
- PPC64.IsPOWER8 = true
- Initialized = true
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go
deleted file mode 100644
index 5ab8780..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !linux && riscv64
-
-package cpu
-
-func archInit() {
- Initialized = true
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
deleted file mode 100644
index c14f12b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build ppc64 || ppc64le
-
-package cpu
-
-const cacheLineSize = 128
-
-func initOptions() {
- options = []option{
- {Name: "darn", Feature: &PPC64.HasDARN},
- {Name: "scv", Feature: &PPC64.HasSCV},
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
deleted file mode 100644
index 7f0c79c..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build riscv64
-
-package cpu
-
-const cacheLineSize = 64
-
-func initOptions() {}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_s390x.go
deleted file mode 100644
index 5881b88..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_s390x.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-const cacheLineSize = 256
-
-func initOptions() {
- options = []option{
- {Name: "zarch", Feature: &S390X.HasZARCH, Required: true},
- {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true},
- {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true},
- {Name: "eimm", Feature: &S390X.HasEIMM, Required: true},
- {Name: "dfp", Feature: &S390X.HasDFP},
- {Name: "etf3eh", Feature: &S390X.HasETF3EH},
- {Name: "msa", Feature: &S390X.HasMSA},
- {Name: "aes", Feature: &S390X.HasAES},
- {Name: "aescbc", Feature: &S390X.HasAESCBC},
- {Name: "aesctr", Feature: &S390X.HasAESCTR},
- {Name: "aesgcm", Feature: &S390X.HasAESGCM},
- {Name: "ghash", Feature: &S390X.HasGHASH},
- {Name: "sha1", Feature: &S390X.HasSHA1},
- {Name: "sha256", Feature: &S390X.HasSHA256},
- {Name: "sha3", Feature: &S390X.HasSHA3},
- {Name: "sha512", Feature: &S390X.HasSHA512},
- {Name: "vx", Feature: &S390X.HasVX},
- {Name: "vxe", Feature: &S390X.HasVXE},
- }
-}
-
-// bitIsSet reports whether the bit at index is set. The bit index
-// is in big endian order, so bit index 0 is the leftmost bit.
-func bitIsSet(bits []uint64, index uint) bool {
- return bits[index/64]&((1<<63)>>(index%64)) != 0
-}
-
-// facility is a bit index for the named facility.
-type facility uint8
-
-const (
- // mandatory facilities
- zarch facility = 1 // z architecture mode is active
- stflef facility = 7 // store-facility-list-extended
- ldisp facility = 18 // long-displacement
- eimm facility = 21 // extended-immediate
-
- // miscellaneous facilities
- dfp facility = 42 // decimal-floating-point
- etf3eh facility = 30 // extended-translation 3 enhancement
-
- // cryptography facilities
- msa facility = 17 // message-security-assist
- msa3 facility = 76 // message-security-assist extension 3
- msa4 facility = 77 // message-security-assist extension 4
- msa5 facility = 57 // message-security-assist extension 5
- msa8 facility = 146 // message-security-assist extension 8
- msa9 facility = 155 // message-security-assist extension 9
-
- // vector facilities
- vx facility = 129 // vector facility
- vxe facility = 135 // vector-enhancements 1
- vxe2 facility = 148 // vector-enhancements 2
-)
-
-// facilityList contains the result of an STFLE call.
-// Bits are numbered in big endian order so the
-// leftmost bit (the MSB) is at index 0.
-type facilityList struct {
- bits [4]uint64
-}
-
-// Has reports whether the given facilities are present.
-func (s *facilityList) Has(fs ...facility) bool {
- if len(fs) == 0 {
- panic("no facility bits provided")
- }
- for _, f := range fs {
- if !bitIsSet(s.bits[:], uint(f)) {
- return false
- }
- }
- return true
-}
-
-// function is the code for the named cryptographic function.
-type function uint8
-
-const (
- // KM{,A,C,CTR} function codes
- aes128 function = 18 // AES-128
- aes192 function = 19 // AES-192
- aes256 function = 20 // AES-256
-
- // K{I,L}MD function codes
- sha1 function = 1 // SHA-1
- sha256 function = 2 // SHA-256
- sha512 function = 3 // SHA-512
- sha3_224 function = 32 // SHA3-224
- sha3_256 function = 33 // SHA3-256
- sha3_384 function = 34 // SHA3-384
- sha3_512 function = 35 // SHA3-512
- shake128 function = 36 // SHAKE-128
- shake256 function = 37 // SHAKE-256
-
- // KLMD function codes
- ghash function = 65 // GHASH
-)
-
-// queryResult contains the result of a Query function
-// call. Bits are numbered in big endian order so the
-// leftmost bit (the MSB) is at index 0.
-type queryResult struct {
- bits [2]uint64
-}
-
-// Has reports whether the given functions are present.
-func (q *queryResult) Has(fns ...function) bool {
- if len(fns) == 0 {
- panic("no function codes provided")
- }
- for _, f := range fns {
- if !bitIsSet(q.bits[:], uint(f)) {
- return false
- }
- }
- return true
-}
-
-func doinit() {
- initS390Xbase()
-
- // We need implementations of stfle, km and so on
- // to detect cryptographic features.
- if !haveAsmFunctions() {
- return
- }
-
- // optional cryptographic functions
- if S390X.HasMSA {
- aes := []function{aes128, aes192, aes256}
-
- // cipher message
- km, kmc := kmQuery(), kmcQuery()
- S390X.HasAES = km.Has(aes...)
- S390X.HasAESCBC = kmc.Has(aes...)
- if S390X.HasSTFLE {
- facilities := stfle()
- if facilities.Has(msa4) {
- kmctr := kmctrQuery()
- S390X.HasAESCTR = kmctr.Has(aes...)
- }
- if facilities.Has(msa8) {
- kma := kmaQuery()
- S390X.HasAESGCM = kma.Has(aes...)
- }
- }
-
- // compute message digest
- kimd := kimdQuery() // intermediate (no padding)
- klmd := klmdQuery() // last (padding)
- S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1)
- S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256)
- S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512)
- S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist
- sha3 := []function{
- sha3_224, sha3_256, sha3_384, sha3_512,
- shake128, shake256,
- }
- S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...)
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_s390x.s
deleted file mode 100644
index 1fb4b70..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_s390x.s
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-// func stfle() facilityList
-TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32
- MOVD $ret+0(FP), R1
- MOVD $3, R0 // last doubleword index to store
- XC $32, (R1), (R1) // clear 4 doublewords (32 bytes)
- WORD $0xb2b01000 // store facility list extended (STFLE)
- RET
-
-// func kmQuery() queryResult
-TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16
- MOVD $0, R0 // set function code to 0 (KM-Query)
- MOVD $ret+0(FP), R1 // address of 16-byte return value
- WORD $0xB92E0024 // cipher message (KM)
- RET
-
-// func kmcQuery() queryResult
-TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16
- MOVD $0, R0 // set function code to 0 (KMC-Query)
- MOVD $ret+0(FP), R1 // address of 16-byte return value
- WORD $0xB92F0024 // cipher message with chaining (KMC)
- RET
-
-// func kmctrQuery() queryResult
-TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16
- MOVD $0, R0 // set function code to 0 (KMCTR-Query)
- MOVD $ret+0(FP), R1 // address of 16-byte return value
- WORD $0xB92D4024 // cipher message with counter (KMCTR)
- RET
-
-// func kmaQuery() queryResult
-TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16
- MOVD $0, R0 // set function code to 0 (KMA-Query)
- MOVD $ret+0(FP), R1 // address of 16-byte return value
- WORD $0xb9296024 // cipher message with authentication (KMA)
- RET
-
-// func kimdQuery() queryResult
-TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16
- MOVD $0, R0 // set function code to 0 (KIMD-Query)
- MOVD $ret+0(FP), R1 // address of 16-byte return value
- WORD $0xB93E0024 // compute intermediate message digest (KIMD)
- RET
-
-// func klmdQuery() queryResult
-TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16
- MOVD $0, R0 // set function code to 0 (KLMD-Query)
- MOVD $ret+0(FP), R1 // address of 16-byte return value
- WORD $0xB93F0024 // compute last message digest (KLMD)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_wasm.go
deleted file mode 100644
index 384787e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_wasm.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build wasm
-
-package cpu
-
-// We're compiling the cpu package for an unknown (software-abstracted) CPU.
-// Make CacheLinePad an empty struct and hope that the usual struct alignment
-// rules are good enough.
-
-const cacheLineSize = 0
-
-func initOptions() {}
-
-func archInit() {}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_x86.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_x86.go
deleted file mode 100644
index c29f5e4..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build 386 || amd64 || amd64p32
-
-package cpu
-
-import "runtime"
-
-const cacheLineSize = 64
-
-func initOptions() {
- options = []option{
- {Name: "adx", Feature: &X86.HasADX},
- {Name: "aes", Feature: &X86.HasAES},
- {Name: "avx", Feature: &X86.HasAVX},
- {Name: "avx2", Feature: &X86.HasAVX2},
- {Name: "avx512", Feature: &X86.HasAVX512},
- {Name: "avx512f", Feature: &X86.HasAVX512F},
- {Name: "avx512cd", Feature: &X86.HasAVX512CD},
- {Name: "avx512er", Feature: &X86.HasAVX512ER},
- {Name: "avx512pf", Feature: &X86.HasAVX512PF},
- {Name: "avx512vl", Feature: &X86.HasAVX512VL},
- {Name: "avx512bw", Feature: &X86.HasAVX512BW},
- {Name: "avx512dq", Feature: &X86.HasAVX512DQ},
- {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA},
- {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI},
- {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW},
- {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS},
- {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ},
- {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ},
- {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI},
- {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI},
- {Name: "avx512vaes", Feature: &X86.HasAVX512VAES},
- {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2},
- {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG},
- {Name: "avx512bf16", Feature: &X86.HasAVX512BF16},
- {Name: "amxtile", Feature: &X86.HasAMXTile},
- {Name: "amxint8", Feature: &X86.HasAMXInt8},
- {Name: "amxbf16", Feature: &X86.HasAMXBF16},
- {Name: "bmi1", Feature: &X86.HasBMI1},
- {Name: "bmi2", Feature: &X86.HasBMI2},
- {Name: "cx16", Feature: &X86.HasCX16},
- {Name: "erms", Feature: &X86.HasERMS},
- {Name: "fma", Feature: &X86.HasFMA},
- {Name: "osxsave", Feature: &X86.HasOSXSAVE},
- {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
- {Name: "popcnt", Feature: &X86.HasPOPCNT},
- {Name: "rdrand", Feature: &X86.HasRDRAND},
- {Name: "rdseed", Feature: &X86.HasRDSEED},
- {Name: "sse3", Feature: &X86.HasSSE3},
- {Name: "sse41", Feature: &X86.HasSSE41},
- {Name: "sse42", Feature: &X86.HasSSE42},
- {Name: "ssse3", Feature: &X86.HasSSSE3},
-
- // These capabilities should always be enabled on amd64:
- {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
- }
-}
-
-func archInit() {
-
- Initialized = true
-
- maxID, _, _, _ := cpuid(0, 0)
-
- if maxID < 1 {
- return
- }
-
- _, _, ecx1, edx1 := cpuid(1, 0)
- X86.HasSSE2 = isSet(26, edx1)
-
- X86.HasSSE3 = isSet(0, ecx1)
- X86.HasPCLMULQDQ = isSet(1, ecx1)
- X86.HasSSSE3 = isSet(9, ecx1)
- X86.HasFMA = isSet(12, ecx1)
- X86.HasCX16 = isSet(13, ecx1)
- X86.HasSSE41 = isSet(19, ecx1)
- X86.HasSSE42 = isSet(20, ecx1)
- X86.HasPOPCNT = isSet(23, ecx1)
- X86.HasAES = isSet(25, ecx1)
- X86.HasOSXSAVE = isSet(27, ecx1)
- X86.HasRDRAND = isSet(30, ecx1)
-
- var osSupportsAVX, osSupportsAVX512 bool
- // For XGETBV, OSXSAVE bit is required and sufficient.
- if X86.HasOSXSAVE {
- eax, _ := xgetbv()
- // Check if XMM and YMM registers have OS support.
- osSupportsAVX = isSet(1, eax) && isSet(2, eax)
-
- if runtime.GOOS == "darwin" {
- // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers.
- // Since users can't rely on mask register contents, let's not advertise AVX-512 support.
- // See issue 49233.
- osSupportsAVX512 = false
- } else {
- // Check if OPMASK and ZMM registers have OS support.
- osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
- }
- }
-
- X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
-
- if maxID < 7 {
- return
- }
-
- _, ebx7, ecx7, edx7 := cpuid(7, 0)
- X86.HasBMI1 = isSet(3, ebx7)
- X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
- X86.HasBMI2 = isSet(8, ebx7)
- X86.HasERMS = isSet(9, ebx7)
- X86.HasRDSEED = isSet(18, ebx7)
- X86.HasADX = isSet(19, ebx7)
-
- X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
- if X86.HasAVX512 {
- X86.HasAVX512F = true
- X86.HasAVX512CD = isSet(28, ebx7)
- X86.HasAVX512ER = isSet(27, ebx7)
- X86.HasAVX512PF = isSet(26, ebx7)
- X86.HasAVX512VL = isSet(31, ebx7)
- X86.HasAVX512BW = isSet(30, ebx7)
- X86.HasAVX512DQ = isSet(17, ebx7)
- X86.HasAVX512IFMA = isSet(21, ebx7)
- X86.HasAVX512VBMI = isSet(1, ecx7)
- X86.HasAVX5124VNNIW = isSet(2, edx7)
- X86.HasAVX5124FMAPS = isSet(3, edx7)
- X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7)
- X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7)
- X86.HasAVX512VNNI = isSet(11, ecx7)
- X86.HasAVX512GFNI = isSet(8, ecx7)
- X86.HasAVX512VAES = isSet(9, ecx7)
- X86.HasAVX512VBMI2 = isSet(6, ecx7)
- X86.HasAVX512BITALG = isSet(12, ecx7)
-
- eax71, _, _, _ := cpuid(7, 1)
- X86.HasAVX512BF16 = isSet(5, eax71)
- }
-
- X86.HasAMXTile = isSet(24, edx7)
- X86.HasAMXInt8 = isSet(25, edx7)
- X86.HasAMXBF16 = isSet(22, edx7)
-}
-
-func isSet(bitpos uint, value uint32) bool {
- return value&(1<> 63))
-)
-
-// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2
-// These are initialized in cpu_$GOARCH.go
-// and should not be changed after they are initialized.
-var hwCap uint
-var hwCap2 uint
-
-func readHWCAP() error {
- // For Go 1.21+, get auxv from the Go runtime.
- if a := getAuxv(); len(a) > 0 {
- for len(a) >= 2 {
- tag, val := a[0], uint(a[1])
- a = a[2:]
- switch tag {
- case _AT_HWCAP:
- hwCap = val
- case _AT_HWCAP2:
- hwCap2 = val
- }
- }
- return nil
- }
-
- buf, err := os.ReadFile(procAuxv)
- if err != nil {
- // e.g. on android /proc/self/auxv is not accessible, so silently
- // ignore the error and leave Initialized = false. On some
- // architectures (e.g. arm64) doinit() implements a fallback
- // readout and will set Initialized = true again.
- return err
- }
- bo := hostByteOrder()
- for len(buf) >= 2*(uintSize/8) {
- var tag, val uint
- switch uintSize {
- case 32:
- tag = uint(bo.Uint32(buf[0:]))
- val = uint(bo.Uint32(buf[4:]))
- buf = buf[8:]
- case 64:
- tag = uint(bo.Uint64(buf[0:]))
- val = uint(bo.Uint64(buf[8:]))
- buf = buf[16:]
- }
- switch tag {
- case _AT_HWCAP:
- hwCap = val
- case _AT_HWCAP2:
- hwCap2 = val
- }
- }
- return nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/parse.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/parse.go
deleted file mode 100644
index 762b63d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/parse.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-import "strconv"
-
-// parseRelease parses a dot-separated version number. It follows the semver
-// syntax, but allows the minor and patch versions to be elided.
-//
-// This is a copy of the Go runtime's parseRelease from
-// https://golang.org/cl/209597.
-func parseRelease(rel string) (major, minor, patch int, ok bool) {
- // Strip anything after a dash or plus.
- for i := 0; i < len(rel); i++ {
- if rel[i] == '-' || rel[i] == '+' {
- rel = rel[:i]
- break
- }
- }
-
- next := func() (int, bool) {
- for i := 0; i < len(rel); i++ {
- if rel[i] == '.' {
- ver, err := strconv.Atoi(rel[:i])
- rel = rel[i+1:]
- return ver, err == nil
- }
- }
- ver, err := strconv.Atoi(rel)
- rel = ""
- return ver, err == nil
- }
- if major, ok = next(); !ok || rel == "" {
- return
- }
- if minor, ok = next(); !ok || rel == "" {
- return
- }
- patch, ok = next()
- return
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
deleted file mode 100644
index 4cd64c7..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && arm64
-
-package cpu
-
-import (
- "errors"
- "io"
- "os"
- "strings"
-)
-
-func readLinuxProcCPUInfo() error {
- f, err := os.Open("/proc/cpuinfo")
- if err != nil {
- return err
- }
- defer f.Close()
-
- var buf [1 << 10]byte // enough for first CPU
- n, err := io.ReadFull(f, buf[:])
- if err != nil && err != io.ErrUnexpectedEOF {
- return err
- }
- in := string(buf[:n])
- const features = "\nFeatures : "
- i := strings.Index(in, features)
- if i == -1 {
- return errors.New("no CPU features found")
- }
- in = in[i+len(features):]
- if i := strings.Index(in, "\n"); i != -1 {
- in = in[:i]
- }
- m := map[string]*bool{}
-
- initOptions() // need it early here; it's harmless to call twice
- for _, o := range options {
- m[o.Name] = o.Feature
- }
- // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm".
- m["evtstrm"] = &ARM64.HasEVTSTRM
-
- for _, f := range strings.Fields(in) {
- if p, ok := m[f]; ok {
- *p = true
- }
- }
- return nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/runtime_auxv.go
deleted file mode 100644
index 5f92ac9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/runtime_auxv.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cpu
-
-// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init)
-// on platforms that use auxv.
-var getAuxvFn func() []uintptr
-
-func getAuxv() []uintptr {
- if getAuxvFn == nil {
- return nil
- }
- return getAuxvFn()
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
deleted file mode 100644
index 4c9788e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-
-package cpu
-
-import (
- _ "unsafe" // for linkname
-)
-
-//go:linkname runtime_getAuxv runtime.getAuxv
-func runtime_getAuxv() []uintptr
-
-func init() {
- getAuxvFn = runtime_getAuxv
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
deleted file mode 100644
index 1b9ccb0..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Recreate a getsystemcfg syscall handler instead of
-// using the one provided by x/sys/unix to avoid having
-// the dependency between them. (See golang.org/issue/32102)
-// Moreover, this file will be used during the building of
-// gccgo's libgo and thus must not used a CGo method.
-
-//go:build aix && gccgo
-
-package cpu
-
-import (
- "syscall"
-)
-
-//extern getsystemcfg
-func gccgoGetsystemcfg(label uint32) (r uint64)
-
-func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) {
- r1 = uintptr(gccgoGetsystemcfg(uint32(label)))
- e1 = syscall.GetErrno()
- return
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
deleted file mode 100644
index e8b6cdb..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Minimal copy of x/sys/unix so the cpu package can make a
-// system call on AIX without depending on x/sys/unix.
-// (See golang.org/issue/32102)
-
-//go:build aix && ppc64 && gc
-
-package cpu
-
-import (
- "syscall"
- "unsafe"
-)
-
-//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o"
-
-//go:linkname libc_getsystemcfg libc_getsystemcfg
-
-type syscallFunc uintptr
-
-var libc_getsystemcfg syscallFunc
-
-type errno = syscall.Errno
-
-// Implemented in runtime/syscall_aix.go.
-func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
-func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
-
-func callgetsystemcfg(label int) (r1 uintptr, e1 errno) {
- r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0)
- return
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/.gitignore b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/.gitignore
deleted file mode 100644
index e3e0fc6..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-_obj/
-unix.test
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/README.md b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/README.md
deleted file mode 100644
index 7d3c060..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/README.md
+++ /dev/null
@@ -1,184 +0,0 @@
-# Building `sys/unix`
-
-The sys/unix package provides access to the raw system call interface of the
-underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
-
-Porting Go to a new architecture/OS combination or adding syscalls, types, or
-constants to an existing architecture/OS pair requires some manual effort;
-however, there are tools that automate much of the process.
-
-## Build Systems
-
-There are currently two ways we generate the necessary files. We are currently
-migrating the build system to use containers so the builds are reproducible.
-This is being done on an OS-by-OS basis. Please update this documentation as
-components of the build system change.
-
-### Old Build System (currently for `GOOS != "linux"`)
-
-The old build system generates the Go files based on the C header files
-present on your system. This means that files
-for a given GOOS/GOARCH pair must be generated on a system with that OS and
-architecture. This also means that the generated code can differ from system
-to system, based on differences in the header files.
-
-To avoid this, if you are using the old build system, only generate the Go
-files on an installation with unmodified header files. It is also important to
-keep track of which version of the OS the files were generated from (ex.
-Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
-and have each OS upgrade correspond to a single change.
-
-To build the files for your current OS and architecture, make sure GOOS and
-GOARCH are set correctly and run `mkall.sh`. This will generate the files for
-your specific system. Running `mkall.sh -n` shows the commands that will be run.
-
-Requirements: bash, go
-
-### New Build System (currently for `GOOS == "linux"`)
-
-The new build system uses a Docker container to generate the go files directly
-from source checkouts of the kernel and various system libraries. This means
-that on any platform that supports Docker, all the files using the new build
-system can be generated at once, and generated files will not change based on
-what the person running the scripts has installed on their computer.
-
-The OS specific files for the new build system are located in the `${GOOS}`
-directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
-the kernel or system library updates, modify the Dockerfile at
-`${GOOS}/Dockerfile` to checkout the new release of the source.
-
-To build all the files under the new build system, you must be on an amd64/Linux
-system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
-then generate all of the files for all of the GOOS/GOARCH pairs in the new build
-system. Running `mkall.sh -n` shows the commands that will be run.
-
-Requirements: bash, go, docker
-
-## Component files
-
-This section describes the various files used in the code generation process.
-It also contains instructions on how to modify these files to add a new
-architecture/OS or to add additional syscalls, types, or constants. Note that
-if you are using the new build system, the scripts/programs cannot be called normally.
-They must be called from within the docker container.
-
-### asm files
-
-The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
-call dispatch. There are three entry points:
-```
- func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
- func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
- func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
-```
-The first and second are the standard ones; they differ only in how many
-arguments can be passed to the kernel. The third is for low-level use by the
-ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
-let it know that a system call is running.
-
-When porting Go to a new architecture/OS, this file must be implemented for
-each GOOS/GOARCH pair.
-
-### mksysnum
-
-Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
-for the old system). This program takes in a list of header files containing the
-syscall number declarations and parses them to produce the corresponding list of
-Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
-constants.
-
-Adding new syscall numbers is mostly done by running the build on a sufficiently
-new installation of the target OS (or updating the source checkouts for the
-new build system). However, depending on the OS, you may need to update the
-parsing in mksysnum.
-
-### mksyscall.go
-
-The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
-hand-written Go files which implement system calls (for unix, the specific OS,
-or the specific OS/Architecture pair respectively) that need special handling
-and list `//sys` comments giving prototypes for ones that can be generated.
-
-The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
-them into syscalls. This requires the name of the prototype in the comment to
-match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
-prototype can be exported (capitalized) or not.
-
-Adding a new syscall often just requires adding a new `//sys` function prototype
-with the desired arguments and a capitalized name so it is exported. However, if
-you want the interface to the syscall to be different, often one will make an
-unexported `//sys` prototype, and then write a custom wrapper in
-`syscall_${GOOS}.go`.
-
-### types files
-
-For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
-`types_${GOOS}.go` on the old system). This file includes standard C headers and
-creates Go type aliases to the corresponding C types. The file is then fed
-through godef to get the Go compatible definitions. Finally, the generated code
-is fed though mkpost.go to format the code correctly and remove any hidden or
-private identifiers. This cleaned-up code is written to
-`ztypes_${GOOS}_${GOARCH}.go`.
-
-The hardest part about preparing this file is figuring out which headers to
-include and which symbols need to be `#define`d to get the actual data
-structures that pass through to the kernel system calls. Some C libraries
-preset alternate versions for binary compatibility and translate them on the
-way in and out of system calls, but there is almost always a `#define` that can
-get the real ones.
-See `types_darwin.go` and `linux/types.go` for examples.
-
-To add a new type, add in the necessary include statement at the top of the
-file (if it is not already there) and add in a type alias line. Note that if
-your type is significantly different on different architectures, you may need
-some `#if/#elif` macros in your include statements.
-
-### mkerrors.sh
-
-This script is used to generate the system's various constants. This doesn't
-just include the error numbers and error strings, but also the signal numbers
-and a wide variety of miscellaneous constants. The constants come from the list
-of include files in the `includes_${uname}` variable. A regex then picks out
-the desired `#define` statements, and generates the corresponding Go constants.
-The error numbers and strings are generated from `#include `, and the
-signal numbers and strings are generated from `#include `. All of
-these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
-`_errors.c`, which prints out all the constants.
-
-To add a constant, add the header that includes it to the appropriate variable.
-Then, edit the regex (if necessary) to match the desired constant. Avoid making
-the regex too broad to avoid matching unintended constants.
-
-### internal/mkmerge
-
-This program is used to extract duplicate const, func, and type declarations
-from the generated architecture-specific files listed below, and merge these
-into a common file for each OS.
-
-The merge is performed in the following steps:
-1. Construct the set of common code that is idential in all architecture-specific files.
-2. Write this common code to the merged file.
-3. Remove the common code from all architecture-specific files.
-
-
-## Generated files
-
-### `zerrors_${GOOS}_${GOARCH}.go`
-
-A file containing all of the system's generated error numbers, error strings,
-signal numbers, and constants. Generated by `mkerrors.sh` (see above).
-
-### `zsyscall_${GOOS}_${GOARCH}.go`
-
-A file containing all the generated syscalls for a specific GOOS and GOARCH.
-Generated by `mksyscall.go` (see above).
-
-### `zsysnum_${GOOS}_${GOARCH}.go`
-
-A list of numeric constants for all the syscall number of the specific GOOS
-and GOARCH. Generated by mksysnum (see above).
-
-### `ztypes_${GOOS}_${GOARCH}.go`
-
-A file containing Go types for passing into (or returning from) syscalls.
-Generated by godefs and the types file (see above).
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/affinity_linux.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/affinity_linux.go
deleted file mode 100644
index 6e5c81a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// CPU affinity functions
-
-package unix
-
-import (
- "math/bits"
- "unsafe"
-)
-
-const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
-
-// CPUSet represents a CPU affinity mask.
-type CPUSet [cpuSetSize]cpuMask
-
-func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
- _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
- if e != 0 {
- return errnoErr(e)
- }
- return nil
-}
-
-// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
-// If pid is 0 the calling thread is used.
-func SchedGetaffinity(pid int, set *CPUSet) error {
- return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
-}
-
-// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
-// If pid is 0 the calling thread is used.
-func SchedSetaffinity(pid int, set *CPUSet) error {
- return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
-}
-
-// Zero clears the set s, so that it contains no CPUs.
-func (s *CPUSet) Zero() {
- for i := range s {
- s[i] = 0
- }
-}
-
-func cpuBitsIndex(cpu int) int {
- return cpu / _NCPUBITS
-}
-
-func cpuBitsMask(cpu int) cpuMask {
- return cpuMask(1 << (uint(cpu) % _NCPUBITS))
-}
-
-// Set adds cpu to the set s.
-func (s *CPUSet) Set(cpu int) {
- i := cpuBitsIndex(cpu)
- if i < len(s) {
- s[i] |= cpuBitsMask(cpu)
- }
-}
-
-// Clear removes cpu from the set s.
-func (s *CPUSet) Clear(cpu int) {
- i := cpuBitsIndex(cpu)
- if i < len(s) {
- s[i] &^= cpuBitsMask(cpu)
- }
-}
-
-// IsSet reports whether cpu is in the set s.
-func (s *CPUSet) IsSet(cpu int) bool {
- i := cpuBitsIndex(cpu)
- if i < len(s) {
- return s[i]&cpuBitsMask(cpu) != 0
- }
- return false
-}
-
-// Count returns the number of CPUs in the set s.
-func (s *CPUSet) Count() int {
- c := 0
- for _, b := range s {
- c += bits.OnesCount64(uint64(b))
- }
- return c
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/aliases.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/aliases.go
deleted file mode 100644
index b0e4198..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/aliases.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-
-package unix
-
-import "syscall"
-
-type Signal = syscall.Signal
-type Errno = syscall.Errno
-type SysProcAttr = syscall.SysProcAttr
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
deleted file mode 100644
index 269e173..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-//
-// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
-//
-
-TEXT ·syscall6(SB),NOSPLIT,$0-88
- JMP syscall·syscall6(SB)
-
-TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
- JMP syscall·rawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_386.s
deleted file mode 100644
index a4fcef0..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_386.s
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (freebsd || netbsd || openbsd) && gc
-
-#include "textflag.h"
-
-// System call support for 386 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-52
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- JMP syscall·RawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
deleted file mode 100644
index 1e63615..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
-
-#include "textflag.h"
-
-// System call support for AMD64 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
deleted file mode 100644
index 6496c31..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_arm.s
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (freebsd || netbsd || openbsd) && gc
-
-#include "textflag.h"
-
-// System call support for ARM BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- B syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- B syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-52
- B syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- B syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- B syscall·RawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
deleted file mode 100644
index 4fd1f54..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || freebsd || netbsd || openbsd) && gc
-
-#include "textflag.h"
-
-// System call support for ARM64 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
deleted file mode 100644
index 42f7eb9..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || freebsd || netbsd || openbsd) && gc
-
-#include "textflag.h"
-
-//
-// System call support for ppc64, BSD
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
deleted file mode 100644
index f890266..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (darwin || freebsd || netbsd || openbsd) && gc
-
-#include "textflag.h"
-
-// System call support for RISCV64 BSD
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_386.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_386.s
deleted file mode 100644
index 3b47348..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_386.s
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-//
-// System calls for 386, Linux
-//
-
-// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
-// instead of the glibc-specific "CALL 0x10(GS)".
-#define INVOKE_SYSCALL INT $0x80
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
- CALL runtime·entersyscall(SB)
- MOVL trap+0(FP), AX // syscall entry
- MOVL a1+4(FP), BX
- MOVL a2+8(FP), CX
- MOVL a3+12(FP), DX
- MOVL $0, SI
- MOVL $0, DI
- INVOKE_SYSCALL
- MOVL AX, r1+16(FP)
- MOVL DX, r2+20(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
- MOVL trap+0(FP), AX // syscall entry
- MOVL a1+4(FP), BX
- MOVL a2+8(FP), CX
- MOVL a3+12(FP), DX
- MOVL $0, SI
- MOVL $0, DI
- INVOKE_SYSCALL
- MOVL AX, r1+16(FP)
- MOVL DX, r2+20(FP)
- RET
-
-TEXT ·socketcall(SB),NOSPLIT,$0-36
- JMP syscall·socketcall(SB)
-
-TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
- JMP syscall·rawsocketcall(SB)
-
-TEXT ·seek(SB),NOSPLIT,$0-28
- JMP syscall·seek(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
deleted file mode 100644
index 67e29f3..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-//
-// System calls for AMD64, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- CALL runtime·entersyscall(SB)
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ $0, R10
- MOVQ $0, R8
- MOVQ $0, R9
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- CALL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVQ a1+8(FP), DI
- MOVQ a2+16(FP), SI
- MOVQ a3+24(FP), DX
- MOVQ $0, R10
- MOVQ $0, R8
- MOVQ $0, R9
- MOVQ trap+0(FP), AX // syscall entry
- SYSCALL
- MOVQ AX, r1+32(FP)
- MOVQ DX, r2+40(FP)
- RET
-
-TEXT ·gettimeofday(SB),NOSPLIT,$0-16
- JMP syscall·gettimeofday(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_arm.s
deleted file mode 100644
index d6ae269..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_arm.s
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-//
-// System calls for arm, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- B syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- B syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
- BL runtime·entersyscall(SB)
- MOVW trap+0(FP), R7
- MOVW a1+4(FP), R0
- MOVW a2+8(FP), R1
- MOVW a3+12(FP), R2
- MOVW $0, R3
- MOVW $0, R4
- MOVW $0, R5
- SWI $0
- MOVW R0, r1+16(FP)
- MOVW $0, R0
- MOVW R0, r2+20(FP)
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- B syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- B syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
- MOVW trap+0(FP), R7 // syscall entry
- MOVW a1+4(FP), R0
- MOVW a2+8(FP), R1
- MOVW a3+12(FP), R2
- SWI $0
- MOVW R0, r1+16(FP)
- MOVW $0, R0
- MOVW R0, r2+20(FP)
- RET
-
-TEXT ·seek(SB),NOSPLIT,$0-28
- B syscall·seek(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
deleted file mode 100644
index 01e5e25..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && arm64 && gc
-
-#include "textflag.h"
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- B syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- B syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R0
- MOVD a2+16(FP), R1
- MOVD a3+24(FP), R2
- MOVD $0, R3
- MOVD $0, R4
- MOVD $0, R5
- MOVD trap+0(FP), R8 // syscall entry
- SVC
- MOVD R0, r1+32(FP) // r1
- MOVD R1, r2+40(FP) // r2
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- B syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- B syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVD a1+8(FP), R0
- MOVD a2+16(FP), R1
- MOVD a3+24(FP), R2
- MOVD $0, R3
- MOVD $0, R4
- MOVD $0, R5
- MOVD trap+0(FP), R8 // syscall entry
- SVC
- MOVD R0, r1+32(FP)
- MOVD R1, r2+40(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
deleted file mode 100644
index 2abf12f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_loong64.s
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && loong64 && gc
-
-#include "textflag.h"
-
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- JAL runtime·entersyscall(SB)
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R11 // syscall entry
- SYSCALL
- MOVV R4, r1+32(FP)
- MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
- JAL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R11 // syscall entry
- SYSCALL
- MOVV R4, r1+32(FP)
- MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
deleted file mode 100644
index f84bae7..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (mips64 || mips64le) && gc
-
-#include "textflag.h"
-
-//
-// System calls for mips64, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- JAL runtime·entersyscall(SB)
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVV R2, r1+32(FP)
- MOVV R3, r2+40(FP)
- JAL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVV a1+8(FP), R4
- MOVV a2+16(FP), R5
- MOVV a3+24(FP), R6
- MOVV R0, R7
- MOVV R0, R8
- MOVV R0, R9
- MOVV trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVV R2, r1+32(FP)
- MOVV R3, r2+40(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
deleted file mode 100644
index f08f628..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (mips || mipsle) && gc
-
-#include "textflag.h"
-
-//
-// System calls for mips, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-28
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-52
- JMP syscall·Syscall9(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
- JAL runtime·entersyscall(SB)
- MOVW a1+4(FP), R4
- MOVW a2+8(FP), R5
- MOVW a3+12(FP), R6
- MOVW R0, R7
- MOVW trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVW R2, r1+16(FP) // r1
- MOVW R3, r2+20(FP) // r2
- JAL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
- MOVW a1+4(FP), R4
- MOVW a2+8(FP), R5
- MOVW a3+12(FP), R6
- MOVW trap+0(FP), R2 // syscall entry
- SYSCALL
- MOVW R2, r1+16(FP)
- MOVW R3, r2+20(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
deleted file mode 100644
index bdfc024..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && (ppc64 || ppc64le) && gc
-
-#include "textflag.h"
-
-//
-// System calls for ppc64, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R3
- MOVD a2+16(FP), R4
- MOVD a3+24(FP), R5
- MOVD R0, R6
- MOVD R0, R7
- MOVD R0, R8
- MOVD trap+0(FP), R9 // syscall entry
- SYSCALL R9
- MOVD R3, r1+32(FP)
- MOVD R4, r2+40(FP)
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVD a1+8(FP), R3
- MOVD a2+16(FP), R4
- MOVD a3+24(FP), R5
- MOVD R0, R6
- MOVD R0, R7
- MOVD R0, R8
- MOVD trap+0(FP), R9 // syscall entry
- SYSCALL R9
- MOVD R3, r1+32(FP)
- MOVD R4, r2+40(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
deleted file mode 100644
index 2e8c996..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build riscv64 && gc
-
-#include "textflag.h"
-
-//
-// System calls for linux/riscv64.
-//
-// Where available, just jump to package syscall's implementation of
-// these functions.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- CALL runtime·entersyscall(SB)
- MOV a1+8(FP), A0
- MOV a2+16(FP), A1
- MOV a3+24(FP), A2
- MOV trap+0(FP), A7 // syscall entry
- ECALL
- MOV A0, r1+32(FP) // r1
- MOV A1, r2+40(FP) // r2
- CALL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOV a1+8(FP), A0
- MOV a2+16(FP), A1
- MOV a3+24(FP), A2
- MOV trap+0(FP), A7 // syscall entry
- ECALL
- MOV A0, r1+32(FP)
- MOV A1, r2+40(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
deleted file mode 100644
index 2c394b1..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux && s390x && gc
-
-#include "textflag.h"
-
-//
-// System calls for s390x, Linux
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- BR syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- BR syscall·Syscall6(SB)
-
-TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
- BL runtime·entersyscall(SB)
- MOVD a1+8(FP), R2
- MOVD a2+16(FP), R3
- MOVD a3+24(FP), R4
- MOVD $0, R5
- MOVD $0, R6
- MOVD $0, R7
- MOVD trap+0(FP), R1 // syscall entry
- SYSCALL
- MOVD R2, r1+32(FP)
- MOVD R3, r2+40(FP)
- BL runtime·exitsyscall(SB)
- RET
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- BR syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- BR syscall·RawSyscall6(SB)
-
-TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
- MOVD a1+8(FP), R2
- MOVD a2+16(FP), R3
- MOVD a3+24(FP), R4
- MOVD $0, R5
- MOVD $0, R6
- MOVD $0, R7
- MOVD trap+0(FP), R1 // syscall entry
- SYSCALL
- MOVD R2, r1+32(FP)
- MOVD R3, r2+40(FP)
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
deleted file mode 100644
index fab586a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-//
-// System call support for mips64, OpenBSD
-//
-
-// Just jump to package syscall's implementation for all these functions.
-// The runtime may know about them.
-
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- JMP syscall·Syscall(SB)
-
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- JMP syscall·Syscall6(SB)
-
-TEXT ·Syscall9(SB),NOSPLIT,$0-104
- JMP syscall·Syscall9(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- JMP syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- JMP syscall·RawSyscall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
deleted file mode 100644
index f949ec5..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc
-
-#include "textflag.h"
-
-//
-// System calls for amd64, Solaris are implemented in runtime/syscall_solaris.go
-//
-
-TEXT ·sysvicall6(SB),NOSPLIT,$0-88
- JMP syscall·sysvicall6(SB)
-
-TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88
- JMP syscall·rawSysvicall6(SB)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
deleted file mode 100644
index 813dfad..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/asm_zos_s390x.s
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x && gc
-
-#include "textflag.h"
-
-#define PSALAA 1208(R0)
-#define GTAB64(x) 80(x)
-#define LCA64(x) 88(x)
-#define SAVSTACK_ASYNC(x) 336(x) // in the LCA
-#define CAA(x) 8(x)
-#define CEECAATHDID(x) 976(x) // in the CAA
-#define EDCHPXV(x) 1016(x) // in the CAA
-#define GOCB(x) 1104(x) // in the CAA
-
-// SS_*, where x=SAVSTACK_ASYNC
-#define SS_LE(x) 0(x)
-#define SS_GO(x) 8(x)
-#define SS_ERRNO(x) 16(x)
-#define SS_ERRNOJR(x) 20(x)
-
-// Function Descriptor Offsets
-#define __errno 0x156*16
-#define __err2ad 0x16C*16
-
-// Call Instructions
-#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6
-#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD
-#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE
-
-DATA zosLibVec<>(SB)/8, $0
-GLOBL zosLibVec<>(SB), NOPTR, $8
-
-TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
- MOVD CAA(R8), R8
- MOVD EDCHPXV(R8), R8
- MOVD R8, zosLibVec<>(SB)
- RET
-
-TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0
- MOVD zosLibVec<>(SB), R8
- MOVD R8, ret+0(FP)
- RET
-
-TEXT ·clearErrno(SB), NOSPLIT, $0-0
- BL addrerrno<>(SB)
- MOVD $0, 0(R3)
- RET
-
-// Returns the address of errno in R3.
-TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get __errno FuncDesc.
- MOVD CAA(R8), R9
- MOVD EDCHPXV(R9), R9
- ADD $(__errno), R9
- LMG 0(R9), R5, R6
-
- // Switch to saved LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R4
- MOVD $0, 0(R9)
-
- // Call __errno function.
- LE_CALL
- NOPH
-
- // Switch back to Go stack.
- XOR R0, R0 // Restore R0 to $0.
- MOVD R4, 0(R9) // Save stack pointer.
- RET
-
-// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64)
-TEXT ·svcCall(SB), NOSPLIT, $0
- BL runtime·save_g(SB) // Save g and stack pointer
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD R15, 0(R9)
-
- MOVD argv+8(FP), R1 // Move function arguments into registers
- MOVD dsa+16(FP), g
- MOVD fnptr+0(FP), R15
-
- BYTE $0x0D // Branch to function
- BYTE $0xEF
-
- BL runtime·load_g(SB) // Restore g and stack pointer
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
- MOVD SAVSTACK_ASYNC(R8), R9
- MOVD 0(R9), R15
-
- RET
-
-// func svcLoad(name *byte) unsafe.Pointer
-TEXT ·svcLoad(SB), NOSPLIT, $0
- MOVD R15, R2 // Save go stack pointer
- MOVD name+0(FP), R0 // Move SVC args into registers
- MOVD $0x80000000, R1
- MOVD $0, R15
- SVC_LOAD
- MOVW R15, R3 // Save return code from SVC
- MOVD R2, R15 // Restore go stack pointer
- CMP R3, $0 // Check SVC return code
- BNE error
-
- MOVD $-2, R3 // Reset last bit of entry point to zero
- AND R0, R3
- MOVD R3, ret+8(FP) // Return entry point returned by SVC
- CMP R0, R3 // Check if last bit of entry point was set
- BNE done
-
- MOVD R15, R2 // Save go stack pointer
- MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08)
- SVC_DELETE
- MOVD R2, R15 // Restore go stack pointer
-
-error:
- MOVD $0, ret+8(FP) // Return 0 on failure
-
-done:
- XOR R0, R0 // Reset r0 to 0
- RET
-
-// func svcUnload(name *byte, fnptr unsafe.Pointer) int64
-TEXT ·svcUnload(SB), NOSPLIT, $0
- MOVD R15, R2 // Save go stack pointer
- MOVD name+0(FP), R0 // Move SVC args into registers
- MOVD fnptr+8(FP), R15
- SVC_DELETE
- XOR R0, R0 // Reset r0 to 0
- MOVD R15, R1 // Save SVC return code
- MOVD R2, R15 // Restore go stack pointer
- MOVD R1, ret+16(FP) // Return SVC return code
- RET
-
-// func gettid() uint64
-TEXT ·gettid(SB), NOSPLIT, $0
- // Get library control area (LCA).
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
-
- // Get CEECAATHDID
- MOVD CAA(R8), R9
- MOVD CEECAATHDID(R9), R9
- MOVD R9, ret+0(FP)
-
- RET
-
-//
-// Call LE function, if the return is -1
-// errno and errno2 is retrieved
-//
-TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
- MOVD CAA(R8), R9
- MOVD g, GOCB(R9)
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address
- MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer
-
- MOVD parms_base+8(FP), R7 // R7 -> argument array
- MOVD parms_len+16(FP), R8 // R8 number of arguments
-
- // arg 1 ---> R1
- CMP R8, $0
- BEQ docall
- SUB $1, R8
- MOVD 0(R7), R1
-
- // arg 2 ---> R2
- CMP R8, $0
- BEQ docall
- SUB $1, R8
- ADD $8, R7
- MOVD 0(R7), R2
-
- // arg 3 --> R3
- CMP R8, $0
- BEQ docall
- SUB $1, R8
- ADD $8, R7
- MOVD 0(R7), R3
-
- CMP R8, $0
- BEQ docall
- MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
-
-repeat:
- ADD $8, R7
- MOVD 0(R7), R0 // advance arg pointer by 8 byte
- ADD $8, R6 // advance LE argument address by 8 byte
- MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
- SUB $1, R8
- CMP R8, $0
- BNE repeat
-
-docall:
- MOVD funcdesc+0(FP), R8 // R8-> function descriptor
- LMG 0(R8), R5, R6
- MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
- LE_CALL // balr R7, R6 (return #1)
- NOPH
- MOVD R3, ret+32(FP)
- CMP R3, $-1 // compare result to -1
- BNE done
-
- // retrieve errno and errno2
- MOVD zosLibVec<>(SB), R8
- ADD $(__errno), R8
- LMG 0(R8), R5, R6
- LE_CALL // balr R7, R6 __errno (return #3)
- NOPH
- MOVWZ 0(R3), R3
- MOVD R3, err+48(FP)
- MOVD zosLibVec<>(SB), R8
- ADD $(__err2ad), R8
- LMG 0(R8), R5, R6
- LE_CALL // balr R7, R6 __err2ad (return #2)
- NOPH
- MOVW (R3), R2 // retrieve errno2
- MOVD R2, errno2+40(FP) // store in return area
-
-done:
- MOVD R4, 0(R9) // Save stack pointer.
- RET
-
-//
-// Call LE function, if the return is 0
-// errno and errno2 is retrieved
-//
-TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0
- MOVW PSALAA, R8
- MOVD LCA64(R8), R8
- MOVD CAA(R8), R9
- MOVD g, GOCB(R9)
-
- // Restore LE stack.
- MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address
- MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer
-
- MOVD parms_base+8(FP), R7 // R7 -> argument array
- MOVD parms_len+16(FP), R8 // R8 number of arguments
-
- // arg 1 ---> R1
- CMP R8, $0
- BEQ docall
- SUB $1, R8
- MOVD 0(R7), R1
-
- // arg 2 ---> R2
- CMP R8, $0
- BEQ docall
- SUB $1, R8
- ADD $8, R7
- MOVD 0(R7), R2
-
- // arg 3 --> R3
- CMP R8, $0
- BEQ docall
- SUB $1, R8
- ADD $8, R7
- MOVD 0(R7), R3
-
- CMP R8, $0
- BEQ docall
- MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
-
-repeat:
- ADD $8, R7
- MOVD 0(R7), R0 // advance arg pointer by 8 byte
- ADD $8, R6 // advance LE argument address by 8 byte
- MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
- SUB $1, R8
- CMP R8, $0
- BNE repeat
-
-docall:
- MOVD funcdesc+0(FP), R8 // R8-> function descriptor
- LMG 0(R8), R5, R6
- MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
- LE_CALL // balr R7, R6 (return #1)
- NOPH
- MOVD R3, ret+32(FP)
- CMP R3, $0 // compare result to 0
- BNE done
-
- // retrieve errno and errno2
- MOVD zosLibVec<>(SB), R8
- ADD $(__errno), R8
- LMG 0(R8), R5, R6
- LE_CALL // balr R7, R6 __errno (return #3)
- NOPH
- MOVWZ 0(R3), R3
- MOVD R3, err+48(FP)
- MOVD zosLibVec<>(SB), R8
- ADD $(__err2ad), R8
- LMG 0(R8), R5, R6
- LE_CALL // balr R7, R6 __err2ad (return #2)
- NOPH
- MOVW (R3), R2 // retrieve errno2
- MOVD R2, errno2+40(FP) // store in return area
- XOR R2, R2
- MOVWZ R2, (R3) // clear errno2
-
-done:
- MOVD R4, 0(R9) // Save stack pointer.
- RET
-
-//
-// function to test if a pointer can be safely dereferenced (content read)
-// return 0 for succces
-//
-TEXT ·ptrtest(SB), NOSPLIT, $0-16
- MOVD arg+0(FP), R10 // test pointer in R10
-
- // set up R2 to point to CEECAADMC
- BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
- BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
- BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
- BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
- BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
- BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
-
- // set up R5 to point to the "shunt" path which set 1 to R3 (failure)
- BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
- BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
- BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
-
- // if r3 is not zero (failed) then branch to finish
- BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
- BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
-
- // stomic store shunt address in R5 into CEECAADMC
- BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
-
- // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above
- BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10)
-
- // finish here, restore 0 into CEECAADMC
- BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
- BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
- MOVD R3, ret+8(FP) // result in R3
- RET
-
-//
-// function to test if a untptr can be loaded from a pointer
-// return 1: the 8-byte content
-// 2: 0 for success, 1 for failure
-//
-// func safeload(ptr uintptr) ( value uintptr, error uintptr)
-TEXT ·safeload(SB), NOSPLIT, $0-24
- MOVD ptr+0(FP), R10 // test pointer in R10
- MOVD $0x0, R6
- BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
- BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
- BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
- BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
- BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
- BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
- BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
- BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
- BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
- BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
- BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
- BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
- BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10)
- BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
- BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
- MOVD R6, value+8(FP) // result in R6
- MOVD R3, error+16(FP) // error in R3
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bluetooth_linux.go
deleted file mode 100644
index a178a61..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bluetooth_linux.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Bluetooth sockets and messages
-
-package unix
-
-// Bluetooth Protocols
-const (
- BTPROTO_L2CAP = 0
- BTPROTO_HCI = 1
- BTPROTO_SCO = 2
- BTPROTO_RFCOMM = 3
- BTPROTO_BNEP = 4
- BTPROTO_CMTP = 5
- BTPROTO_HIDP = 6
- BTPROTO_AVDTP = 7
-)
-
-const (
- HCI_CHANNEL_RAW = 0
- HCI_CHANNEL_USER = 1
- HCI_CHANNEL_MONITOR = 2
- HCI_CHANNEL_CONTROL = 3
- HCI_CHANNEL_LOGGING = 4
-)
-
-// Socketoption Level
-const (
- SOL_BLUETOOTH = 0x112
- SOL_HCI = 0x0
- SOL_L2CAP = 0x6
- SOL_RFCOMM = 0x12
- SOL_SCO = 0x11
-)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bpxsvc_zos.go
deleted file mode 100644
index 39d647d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bpxsvc_zos.go
+++ /dev/null
@@ -1,657 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos
-
-package unix
-
-import (
- "bytes"
- "fmt"
- "unsafe"
-)
-
-//go:noescape
-func bpxcall(plist []unsafe.Pointer, bpx_offset int64)
-
-//go:noescape
-func A2e([]byte)
-
-//go:noescape
-func E2a([]byte)
-
-const (
- BPX4STA = 192 // stat
- BPX4FST = 104 // fstat
- BPX4LST = 132 // lstat
- BPX4OPN = 156 // open
- BPX4CLO = 72 // close
- BPX4CHR = 500 // chattr
- BPX4FCR = 504 // fchattr
- BPX4LCR = 1180 // lchattr
- BPX4CTW = 492 // cond_timed_wait
- BPX4GTH = 1056 // __getthent
- BPX4PTQ = 412 // pthread_quiesc
- BPX4PTR = 320 // ptrace
-)
-
-const (
- //options
- //byte1
- BPX_OPNFHIGH = 0x80
- //byte2
- BPX_OPNFEXEC = 0x80
- //byte3
- BPX_O_NOLARGEFILE = 0x08
- BPX_O_LARGEFILE = 0x04
- BPX_O_ASYNCSIG = 0x02
- BPX_O_SYNC = 0x01
- //byte4
- BPX_O_CREXCL = 0xc0
- BPX_O_CREAT = 0x80
- BPX_O_EXCL = 0x40
- BPX_O_NOCTTY = 0x20
- BPX_O_TRUNC = 0x10
- BPX_O_APPEND = 0x08
- BPX_O_NONBLOCK = 0x04
- BPX_FNDELAY = 0x04
- BPX_O_RDWR = 0x03
- BPX_O_RDONLY = 0x02
- BPX_O_WRONLY = 0x01
- BPX_O_ACCMODE = 0x03
- BPX_O_GETFL = 0x0f
-
- //mode
- // byte1 (file type)
- BPX_FT_DIR = 1
- BPX_FT_CHARSPEC = 2
- BPX_FT_REGFILE = 3
- BPX_FT_FIFO = 4
- BPX_FT_SYMLINK = 5
- BPX_FT_SOCKET = 6
- //byte3
- BPX_S_ISUID = 0x08
- BPX_S_ISGID = 0x04
- BPX_S_ISVTX = 0x02
- BPX_S_IRWXU1 = 0x01
- BPX_S_IRUSR = 0x01
- //byte4
- BPX_S_IRWXU2 = 0xc0
- BPX_S_IWUSR = 0x80
- BPX_S_IXUSR = 0x40
- BPX_S_IRWXG = 0x38
- BPX_S_IRGRP = 0x20
- BPX_S_IWGRP = 0x10
- BPX_S_IXGRP = 0x08
- BPX_S_IRWXOX = 0x07
- BPX_S_IROTH = 0x04
- BPX_S_IWOTH = 0x02
- BPX_S_IXOTH = 0x01
-
- CW_INTRPT = 1
- CW_CONDVAR = 32
- CW_TIMEOUT = 64
-
- PGTHA_NEXT = 2
- PGTHA_CURRENT = 1
- PGTHA_FIRST = 0
- PGTHA_LAST = 3
- PGTHA_PROCESS = 0x80
- PGTHA_CONTTY = 0x40
- PGTHA_PATH = 0x20
- PGTHA_COMMAND = 0x10
- PGTHA_FILEDATA = 0x08
- PGTHA_THREAD = 0x04
- PGTHA_PTAG = 0x02
- PGTHA_COMMANDLONG = 0x01
- PGTHA_THREADFAST = 0x80
- PGTHA_FILEPATH = 0x40
- PGTHA_THDSIGMASK = 0x20
- // thread quiece mode
- QUIESCE_TERM int32 = 1
- QUIESCE_FORCE int32 = 2
- QUIESCE_QUERY int32 = 3
- QUIESCE_FREEZE int32 = 4
- QUIESCE_UNFREEZE int32 = 5
- FREEZE_THIS_THREAD int32 = 6
- FREEZE_EXIT int32 = 8
- QUIESCE_SRB int32 = 9
-)
-
-type Pgtha struct {
- Pid uint32 // 0
- Tid0 uint32 // 4
- Tid1 uint32
- Accesspid byte // C
- Accesstid byte // D
- Accessasid uint16 // E
- Loginname [8]byte // 10
- Flag1 byte // 18
- Flag1b2 byte // 19
-}
-
-type Bpxystat_t struct { // DSECT BPXYSTAT
- St_id [4]uint8 // 0
- St_length uint16 // 0x4
- St_version uint16 // 0x6
- St_mode uint32 // 0x8
- St_ino uint32 // 0xc
- St_dev uint32 // 0x10
- St_nlink uint32 // 0x14
- St_uid uint32 // 0x18
- St_gid uint32 // 0x1c
- St_size uint64 // 0x20
- St_atime uint32 // 0x28
- St_mtime uint32 // 0x2c
- St_ctime uint32 // 0x30
- St_rdev uint32 // 0x34
- St_auditoraudit uint32 // 0x38
- St_useraudit uint32 // 0x3c
- St_blksize uint32 // 0x40
- St_createtime uint32 // 0x44
- St_auditid [4]uint32 // 0x48
- St_res01 uint32 // 0x58
- Ft_ccsid uint16 // 0x5c
- Ft_flags uint16 // 0x5e
- St_res01a [2]uint32 // 0x60
- St_res02 uint32 // 0x68
- St_blocks uint32 // 0x6c
- St_opaque [3]uint8 // 0x70
- St_visible uint8 // 0x73
- St_reftime uint32 // 0x74
- St_fid uint64 // 0x78
- St_filefmt uint8 // 0x80
- St_fspflag2 uint8 // 0x81
- St_res03 [2]uint8 // 0x82
- St_ctimemsec uint32 // 0x84
- St_seclabel [8]uint8 // 0x88
- St_res04 [4]uint8 // 0x90
- // end of version 1
- _ uint32 // 0x94
- St_atime64 uint64 // 0x98
- St_mtime64 uint64 // 0xa0
- St_ctime64 uint64 // 0xa8
- St_createtime64 uint64 // 0xb0
- St_reftime64 uint64 // 0xb8
- _ uint64 // 0xc0
- St_res05 [16]uint8 // 0xc8
- // end of version 2
-}
-
-type BpxFilestatus struct {
- Oflag1 byte
- Oflag2 byte
- Oflag3 byte
- Oflag4 byte
-}
-
-type BpxMode struct {
- Ftype byte
- Mode1 byte
- Mode2 byte
- Mode3 byte
-}
-
-// Thr attribute structure for extended attributes
-type Bpxyatt_t struct { // DSECT BPXYATT
- Att_id [4]uint8
- Att_version uint16
- Att_res01 [2]uint8
- Att_setflags1 uint8
- Att_setflags2 uint8
- Att_setflags3 uint8
- Att_setflags4 uint8
- Att_mode uint32
- Att_uid uint32
- Att_gid uint32
- Att_opaquemask [3]uint8
- Att_visblmaskres uint8
- Att_opaque [3]uint8
- Att_visibleres uint8
- Att_size_h uint32
- Att_size_l uint32
- Att_atime uint32
- Att_mtime uint32
- Att_auditoraudit uint32
- Att_useraudit uint32
- Att_ctime uint32
- Att_reftime uint32
- // end of version 1
- Att_filefmt uint8
- Att_res02 [3]uint8
- Att_filetag uint32
- Att_res03 [8]uint8
- // end of version 2
- Att_atime64 uint64
- Att_mtime64 uint64
- Att_ctime64 uint64
- Att_reftime64 uint64
- Att_seclabel [8]uint8
- Att_ver3res02 [8]uint8
- // end of version 3
-}
-
-func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) {
- if len(name) < 1024 {
- var namebuf [1024]byte
- sz := int32(copy(namebuf[:], name))
- A2e(namebuf[:sz])
- var parms [7]unsafe.Pointer
- parms[0] = unsafe.Pointer(&sz)
- parms[1] = unsafe.Pointer(&namebuf[0])
- parms[2] = unsafe.Pointer(options)
- parms[3] = unsafe.Pointer(mode)
- parms[4] = unsafe.Pointer(&rv)
- parms[5] = unsafe.Pointer(&rc)
- parms[6] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4OPN)
- return rv, rc, rn
- }
- return -1, -1, -1
-}
-
-func BpxClose(fd int32) (rv int32, rc int32, rn int32) {
- var parms [4]unsafe.Pointer
- parms[0] = unsafe.Pointer(&fd)
- parms[1] = unsafe.Pointer(&rv)
- parms[2] = unsafe.Pointer(&rc)
- parms[3] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4CLO)
- return rv, rc, rn
-}
-
-func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
- st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
- st.St_version = 2
- stat_sz := uint32(unsafe.Sizeof(*st))
- var parms [6]unsafe.Pointer
- parms[0] = unsafe.Pointer(&fd)
- parms[1] = unsafe.Pointer(&stat_sz)
- parms[2] = unsafe.Pointer(st)
- parms[3] = unsafe.Pointer(&rv)
- parms[4] = unsafe.Pointer(&rc)
- parms[5] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4FST)
- return rv, rc, rn
-}
-
-func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
- if len(name) < 1024 {
- var namebuf [1024]byte
- sz := int32(copy(namebuf[:], name))
- A2e(namebuf[:sz])
- st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
- st.St_version = 2
- stat_sz := uint32(unsafe.Sizeof(*st))
- var parms [7]unsafe.Pointer
- parms[0] = unsafe.Pointer(&sz)
- parms[1] = unsafe.Pointer(&namebuf[0])
- parms[2] = unsafe.Pointer(&stat_sz)
- parms[3] = unsafe.Pointer(st)
- parms[4] = unsafe.Pointer(&rv)
- parms[5] = unsafe.Pointer(&rc)
- parms[6] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4STA)
- return rv, rc, rn
- }
- return -1, -1, -1
-}
-
-func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
- if len(name) < 1024 {
- var namebuf [1024]byte
- sz := int32(copy(namebuf[:], name))
- A2e(namebuf[:sz])
- st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
- st.St_version = 2
- stat_sz := uint32(unsafe.Sizeof(*st))
- var parms [7]unsafe.Pointer
- parms[0] = unsafe.Pointer(&sz)
- parms[1] = unsafe.Pointer(&namebuf[0])
- parms[2] = unsafe.Pointer(&stat_sz)
- parms[3] = unsafe.Pointer(st)
- parms[4] = unsafe.Pointer(&rv)
- parms[5] = unsafe.Pointer(&rc)
- parms[6] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4LST)
- return rv, rc, rn
- }
- return -1, -1, -1
-}
-
-func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
- if len(path) >= 1024 {
- return -1, -1, -1
- }
- var namebuf [1024]byte
- sz := int32(copy(namebuf[:], path))
- A2e(namebuf[:sz])
- attr_sz := uint32(unsafe.Sizeof(*attr))
- var parms [7]unsafe.Pointer
- parms[0] = unsafe.Pointer(&sz)
- parms[1] = unsafe.Pointer(&namebuf[0])
- parms[2] = unsafe.Pointer(&attr_sz)
- parms[3] = unsafe.Pointer(attr)
- parms[4] = unsafe.Pointer(&rv)
- parms[5] = unsafe.Pointer(&rc)
- parms[6] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4CHR)
- return rv, rc, rn
-}
-
-func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
- if len(path) >= 1024 {
- return -1, -1, -1
- }
- var namebuf [1024]byte
- sz := int32(copy(namebuf[:], path))
- A2e(namebuf[:sz])
- attr_sz := uint32(unsafe.Sizeof(*attr))
- var parms [7]unsafe.Pointer
- parms[0] = unsafe.Pointer(&sz)
- parms[1] = unsafe.Pointer(&namebuf[0])
- parms[2] = unsafe.Pointer(&attr_sz)
- parms[3] = unsafe.Pointer(attr)
- parms[4] = unsafe.Pointer(&rv)
- parms[5] = unsafe.Pointer(&rc)
- parms[6] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4LCR)
- return rv, rc, rn
-}
-
-func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
- attr_sz := uint32(unsafe.Sizeof(*attr))
- var parms [6]unsafe.Pointer
- parms[0] = unsafe.Pointer(&fd)
- parms[1] = unsafe.Pointer(&attr_sz)
- parms[2] = unsafe.Pointer(attr)
- parms[3] = unsafe.Pointer(&rv)
- parms[4] = unsafe.Pointer(&rc)
- parms[5] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4FCR)
- return rv, rc, rn
-}
-
-func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) {
- var parms [8]unsafe.Pointer
- parms[0] = unsafe.Pointer(&sec)
- parms[1] = unsafe.Pointer(&nsec)
- parms[2] = unsafe.Pointer(&events)
- parms[3] = unsafe.Pointer(secrem)
- parms[4] = unsafe.Pointer(nsecrem)
- parms[5] = unsafe.Pointer(&rv)
- parms[6] = unsafe.Pointer(&rc)
- parms[7] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4CTW)
- return rv, rc, rn
-}
-func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) {
- var parms [7]unsafe.Pointer
- inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte
- parms[0] = unsafe.Pointer(&inlen)
- parms[1] = unsafe.Pointer(&in)
- parms[2] = unsafe.Pointer(outlen)
- parms[3] = unsafe.Pointer(&out)
- parms[4] = unsafe.Pointer(&rv)
- parms[5] = unsafe.Pointer(&rc)
- parms[6] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4GTH)
- return rv, rc, rn
-}
-func ZosJobname() (jobname string, err error) {
- var pgtha Pgtha
- pgtha.Pid = uint32(Getpid())
- pgtha.Accesspid = PGTHA_CURRENT
- pgtha.Flag1 = PGTHA_PROCESS
- var out [256]byte
- var outlen uint32
- outlen = 256
- rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0]))
- if rv == 0 {
- gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic
- ix := bytes.Index(out[:], gthc)
- if ix == -1 {
- err = fmt.Errorf("BPX4GTH: gthc return data not found")
- return
- }
- jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80
- E2a(jn)
- jobname = string(bytes.TrimRight(jn, " "))
-
- } else {
- err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn)
- }
- return
-}
-func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) {
- var userdata [8]byte
- var parms [5]unsafe.Pointer
- copy(userdata[:], data+" ")
- A2e(userdata[:])
- parms[0] = unsafe.Pointer(&code)
- parms[1] = unsafe.Pointer(&userdata[0])
- parms[2] = unsafe.Pointer(&rv)
- parms[3] = unsafe.Pointer(&rc)
- parms[4] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4PTQ)
- return rv, rc, rn
-}
-
-const (
- PT_TRACE_ME = 0 // Debug this process
- PT_READ_I = 1 // Read a full word
- PT_READ_D = 2 // Read a full word
- PT_READ_U = 3 // Read control info
- PT_WRITE_I = 4 //Write a full word
- PT_WRITE_D = 5 //Write a full word
- PT_CONTINUE = 7 //Continue the process
- PT_KILL = 8 //Terminate the process
- PT_READ_GPR = 11 // Read GPR, CR, PSW
- PT_READ_FPR = 12 // Read FPR
- PT_READ_VR = 13 // Read VR
- PT_WRITE_GPR = 14 // Write GPR, CR, PSW
- PT_WRITE_FPR = 15 // Write FPR
- PT_WRITE_VR = 16 // Write VR
- PT_READ_BLOCK = 17 // Read storage
- PT_WRITE_BLOCK = 19 // Write storage
- PT_READ_GPRH = 20 // Read GPRH
- PT_WRITE_GPRH = 21 // Write GPRH
- PT_REGHSET = 22 // Read all GPRHs
- PT_ATTACH = 30 // Attach to a process
- PT_DETACH = 31 // Detach from a process
- PT_REGSET = 32 // Read all GPRs
- PT_REATTACH = 33 // Reattach to a process
- PT_LDINFO = 34 // Read loader info
- PT_MULTI = 35 // Multi process mode
- PT_LD64INFO = 36 // RMODE64 Info Area
- PT_BLOCKREQ = 40 // Block request
- PT_THREAD_INFO = 60 // Read thread info
- PT_THREAD_MODIFY = 61
- PT_THREAD_READ_FOCUS = 62
- PT_THREAD_WRITE_FOCUS = 63
- PT_THREAD_HOLD = 64
- PT_THREAD_SIGNAL = 65
- PT_EXPLAIN = 66
- PT_EVENTS = 67
- PT_THREAD_INFO_EXTENDED = 68
- PT_REATTACH2 = 71
- PT_CAPTURE = 72
- PT_UNCAPTURE = 73
- PT_GET_THREAD_TCB = 74
- PT_GET_ALET = 75
- PT_SWAPIN = 76
- PT_EXTENDED_EVENT = 98
- PT_RECOVER = 99 // Debug a program check
- PT_GPR0 = 0 // General purpose register 0
- PT_GPR1 = 1 // General purpose register 1
- PT_GPR2 = 2 // General purpose register 2
- PT_GPR3 = 3 // General purpose register 3
- PT_GPR4 = 4 // General purpose register 4
- PT_GPR5 = 5 // General purpose register 5
- PT_GPR6 = 6 // General purpose register 6
- PT_GPR7 = 7 // General purpose register 7
- PT_GPR8 = 8 // General purpose register 8
- PT_GPR9 = 9 // General purpose register 9
- PT_GPR10 = 10 // General purpose register 10
- PT_GPR11 = 11 // General purpose register 11
- PT_GPR12 = 12 // General purpose register 12
- PT_GPR13 = 13 // General purpose register 13
- PT_GPR14 = 14 // General purpose register 14
- PT_GPR15 = 15 // General purpose register 15
- PT_FPR0 = 16 // Floating point register 0
- PT_FPR1 = 17 // Floating point register 1
- PT_FPR2 = 18 // Floating point register 2
- PT_FPR3 = 19 // Floating point register 3
- PT_FPR4 = 20 // Floating point register 4
- PT_FPR5 = 21 // Floating point register 5
- PT_FPR6 = 22 // Floating point register 6
- PT_FPR7 = 23 // Floating point register 7
- PT_FPR8 = 24 // Floating point register 8
- PT_FPR9 = 25 // Floating point register 9
- PT_FPR10 = 26 // Floating point register 10
- PT_FPR11 = 27 // Floating point register 11
- PT_FPR12 = 28 // Floating point register 12
- PT_FPR13 = 29 // Floating point register 13
- PT_FPR14 = 30 // Floating point register 14
- PT_FPR15 = 31 // Floating point register 15
- PT_FPC = 32 // Floating point control register
- PT_PSW = 40 // PSW
- PT_PSW0 = 40 // Left half of the PSW
- PT_PSW1 = 41 // Right half of the PSW
- PT_CR0 = 42 // Control register 0
- PT_CR1 = 43 // Control register 1
- PT_CR2 = 44 // Control register 2
- PT_CR3 = 45 // Control register 3
- PT_CR4 = 46 // Control register 4
- PT_CR5 = 47 // Control register 5
- PT_CR6 = 48 // Control register 6
- PT_CR7 = 49 // Control register 7
- PT_CR8 = 50 // Control register 8
- PT_CR9 = 51 // Control register 9
- PT_CR10 = 52 // Control register 10
- PT_CR11 = 53 // Control register 11
- PT_CR12 = 54 // Control register 12
- PT_CR13 = 55 // Control register 13
- PT_CR14 = 56 // Control register 14
- PT_CR15 = 57 // Control register 15
- PT_GPRH0 = 58 // GP High register 0
- PT_GPRH1 = 59 // GP High register 1
- PT_GPRH2 = 60 // GP High register 2
- PT_GPRH3 = 61 // GP High register 3
- PT_GPRH4 = 62 // GP High register 4
- PT_GPRH5 = 63 // GP High register 5
- PT_GPRH6 = 64 // GP High register 6
- PT_GPRH7 = 65 // GP High register 7
- PT_GPRH8 = 66 // GP High register 8
- PT_GPRH9 = 67 // GP High register 9
- PT_GPRH10 = 68 // GP High register 10
- PT_GPRH11 = 69 // GP High register 11
- PT_GPRH12 = 70 // GP High register 12
- PT_GPRH13 = 71 // GP High register 13
- PT_GPRH14 = 72 // GP High register 14
- PT_GPRH15 = 73 // GP High register 15
- PT_VR0 = 74 // Vector register 0
- PT_VR1 = 75 // Vector register 1
- PT_VR2 = 76 // Vector register 2
- PT_VR3 = 77 // Vector register 3
- PT_VR4 = 78 // Vector register 4
- PT_VR5 = 79 // Vector register 5
- PT_VR6 = 80 // Vector register 6
- PT_VR7 = 81 // Vector register 7
- PT_VR8 = 82 // Vector register 8
- PT_VR9 = 83 // Vector register 9
- PT_VR10 = 84 // Vector register 10
- PT_VR11 = 85 // Vector register 11
- PT_VR12 = 86 // Vector register 12
- PT_VR13 = 87 // Vector register 13
- PT_VR14 = 88 // Vector register 14
- PT_VR15 = 89 // Vector register 15
- PT_VR16 = 90 // Vector register 16
- PT_VR17 = 91 // Vector register 17
- PT_VR18 = 92 // Vector register 18
- PT_VR19 = 93 // Vector register 19
- PT_VR20 = 94 // Vector register 20
- PT_VR21 = 95 // Vector register 21
- PT_VR22 = 96 // Vector register 22
- PT_VR23 = 97 // Vector register 23
- PT_VR24 = 98 // Vector register 24
- PT_VR25 = 99 // Vector register 25
- PT_VR26 = 100 // Vector register 26
- PT_VR27 = 101 // Vector register 27
- PT_VR28 = 102 // Vector register 28
- PT_VR29 = 103 // Vector register 29
- PT_VR30 = 104 // Vector register 30
- PT_VR31 = 105 // Vector register 31
- PT_PSWG = 106 // PSWG
- PT_PSWG0 = 106 // Bytes 0-3
- PT_PSWG1 = 107 // Bytes 4-7
- PT_PSWG2 = 108 // Bytes 8-11 (IA high word)
- PT_PSWG3 = 109 // Bytes 12-15 (IA low word)
-)
-
-func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) {
- var parms [8]unsafe.Pointer
- parms[0] = unsafe.Pointer(&request)
- parms[1] = unsafe.Pointer(&pid)
- parms[2] = unsafe.Pointer(&addr)
- parms[3] = unsafe.Pointer(&data)
- parms[4] = unsafe.Pointer(&buffer)
- parms[5] = unsafe.Pointer(&rv)
- parms[6] = unsafe.Pointer(&rc)
- parms[7] = unsafe.Pointer(&rn)
- bpxcall(parms[:], BPX4PTR)
- return rv, rc, rn
-}
-
-func copyU8(val uint8, dest []uint8) int {
- if len(dest) < 1 {
- return 0
- }
- dest[0] = val
- return 1
-}
-
-func copyU8Arr(src, dest []uint8) int {
- if len(dest) < len(src) {
- return 0
- }
- for i, v := range src {
- dest[i] = v
- }
- return len(src)
-}
-
-func copyU16(val uint16, dest []uint16) int {
- if len(dest) < 1 {
- return 0
- }
- dest[0] = val
- return 1
-}
-
-func copyU32(val uint32, dest []uint32) int {
- if len(dest) < 1 {
- return 0
- }
- dest[0] = val
- return 1
-}
-
-func copyU32Arr(src, dest []uint32) int {
- if len(dest) < len(src) {
- return 0
- }
- for i, v := range src {
- dest[i] = v
- }
- return len(src)
-}
-
-func copyU64(val uint64, dest []uint64) int {
- if len(dest) < 1 {
- return 0
- }
- dest[0] = val
- return 1
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bpxsvc_zos.s
deleted file mode 100644
index 4bd4a17..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/bpxsvc_zos.s
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "go_asm.h"
-#include "textflag.h"
-
-// function to call USS assembly language services
-//
-// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm
-//
-// arg1 unsafe.Pointer array that ressembles an OS PLIST
-//
-// arg2 function offset as in
-// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm
-//
-// func bpxcall(plist []unsafe.Pointer, bpx_offset int64)
-
-TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0
- MOVD plist_base+0(FP), R1 // r1 points to plist
- MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table
- MOVD R14, R7 // save r14
- MOVD R15, R8 // save r15
- MOVWZ 16(R0), R9
- MOVWZ 544(R9), R9
- MOVWZ 24(R9), R9 // call vector in r9
- ADD R2, R9 // add offset to vector table
- MOVWZ (R9), R9 // r9 points to entry point
- BYTE $0x0D // BL R14,R9 --> basr r14,r9
- BYTE $0xE9 // clobbers 0,1,14,15
- MOVD R8, R15 // restore 15
- JMP R7 // return via saved return address
-
-// func A2e(arr [] byte)
-// code page conversion from 819 to 1047
-TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0
- MOVD arg_base+0(FP), R2 // pointer to arry of characters
- MOVD arg_len+8(FP), R3 // count
- XOR R0, R0
- XOR R1, R1
- BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2))
-
- // ASCII -> EBCDIC conversion table:
- BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03
- BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f
- BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b
- BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f
- BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13
- BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26
- BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27
- BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f
- BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b
- BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d
- BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e
- BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61
- BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3
- BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7
- BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e
- BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f
- BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3
- BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7
- BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2
- BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6
- BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2
- BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6
- BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad
- BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d
- BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83
- BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87
- BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92
- BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96
- BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2
- BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6
- BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0
- BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07
- BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23
- BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17
- BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b
- BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b
- BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33
- BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08
- BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b
- BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff
- BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1
- BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5
- BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a
- BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc
- BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa
- BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3
- BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b
- BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab
- BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66
- BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68
- BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73
- BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77
- BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee
- BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf
- BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb
- BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59
- BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46
- BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48
- BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53
- BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57
- BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce
- BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1
- BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb
- BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf
-
-retry:
- WORD $0xB9931022 // TROO 2,2,b'0001'
- BVS retry
- RET
-
-// func e2a(arr [] byte)
-// code page conversion from 1047 to 819
-TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0
- MOVD arg_base+0(FP), R2 // pointer to arry of characters
- MOVD arg_len+8(FP), R3 // count
- XOR R0, R0
- XOR R1, R1
- BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2))
-
- // EBCDIC -> ASCII conversion table:
- BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03
- BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f
- BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b
- BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f
- BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13
- BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87
- BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f
- BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f
- BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83
- BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b
- BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b
- BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07
- BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93
- BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04
- BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b
- BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a
- BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4
- BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5
- BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e
- BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c
- BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb
- BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef
- BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24
- BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e
- BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4
- BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5
- BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c
- BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f
- BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb
- BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf
- BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23
- BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22
- BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63
- BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67
- BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb
- BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1
- BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c
- BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70
- BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba
- BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4
- BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74
- BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78
- BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf
- BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae
- BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7
- BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc
- BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8
- BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7
- BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43
- BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47
- BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4
- BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5
- BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c
- BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50
- BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb
- BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff
- BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54
- BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58
- BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4
- BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5
- BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33
- BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37
- BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb
- BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f
-
-retry:
- WORD $0xB9931022 // TROO 2,2,b'0001'
- BVS retry
- RET
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/cap_freebsd.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/cap_freebsd.go
deleted file mode 100644
index a086578..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/cap_freebsd.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build freebsd
-
-package unix
-
-import (
- "errors"
- "fmt"
-)
-
-// Go implementation of C mostly found in /usr/src/sys/kern/subr_capability.c
-
-const (
- // This is the version of CapRights this package understands. See C implementation for parallels.
- capRightsGoVersion = CAP_RIGHTS_VERSION_00
- capArSizeMin = CAP_RIGHTS_VERSION_00 + 2
- capArSizeMax = capRightsGoVersion + 2
-)
-
-var (
- bit2idx = []int{
- -1, 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1,
- 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- }
-)
-
-func capidxbit(right uint64) int {
- return int((right >> 57) & 0x1f)
-}
-
-func rightToIndex(right uint64) (int, error) {
- idx := capidxbit(right)
- if idx < 0 || idx >= len(bit2idx) {
- return -2, fmt.Errorf("index for right 0x%x out of range", right)
- }
- return bit2idx[idx], nil
-}
-
-func caprver(right uint64) int {
- return int(right >> 62)
-}
-
-func capver(rights *CapRights) int {
- return caprver(rights.Rights[0])
-}
-
-func caparsize(rights *CapRights) int {
- return capver(rights) + 2
-}
-
-// CapRightsSet sets the permissions in setrights in rights.
-func CapRightsSet(rights *CapRights, setrights []uint64) error {
- // This is essentially a copy of cap_rights_vset()
- if capver(rights) != CAP_RIGHTS_VERSION_00 {
- return fmt.Errorf("bad rights version %d", capver(rights))
- }
-
- n := caparsize(rights)
- if n < capArSizeMin || n > capArSizeMax {
- return errors.New("bad rights size")
- }
-
- for _, right := range setrights {
- if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errors.New("bad right version")
- }
- i, err := rightToIndex(right)
- if err != nil {
- return err
- }
- if i >= n {
- return errors.New("index overflow")
- }
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch")
- }
- rights.Rights[i] |= right
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch (after assign)")
- }
- }
-
- return nil
-}
-
-// CapRightsClear clears the permissions in clearrights from rights.
-func CapRightsClear(rights *CapRights, clearrights []uint64) error {
- // This is essentially a copy of cap_rights_vclear()
- if capver(rights) != CAP_RIGHTS_VERSION_00 {
- return fmt.Errorf("bad rights version %d", capver(rights))
- }
-
- n := caparsize(rights)
- if n < capArSizeMin || n > capArSizeMax {
- return errors.New("bad rights size")
- }
-
- for _, right := range clearrights {
- if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errors.New("bad right version")
- }
- i, err := rightToIndex(right)
- if err != nil {
- return err
- }
- if i >= n {
- return errors.New("index overflow")
- }
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch")
- }
- rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errors.New("index mismatch (after assign)")
- }
- }
-
- return nil
-}
-
-// CapRightsIsSet checks whether all the permissions in setrights are present in rights.
-func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
- // This is essentially a copy of cap_rights_is_vset()
- if capver(rights) != CAP_RIGHTS_VERSION_00 {
- return false, fmt.Errorf("bad rights version %d", capver(rights))
- }
-
- n := caparsize(rights)
- if n < capArSizeMin || n > capArSizeMax {
- return false, errors.New("bad rights size")
- }
-
- for _, right := range setrights {
- if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return false, errors.New("bad right version")
- }
- i, err := rightToIndex(right)
- if err != nil {
- return false, err
- }
- if i >= n {
- return false, errors.New("index overflow")
- }
- if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return false, errors.New("index mismatch")
- }
- if (rights.Rights[i] & right) != right {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-func capright(idx uint64, bit uint64) uint64 {
- return ((1 << (57 + idx)) | bit)
-}
-
-// CapRightsInit returns a pointer to an initialised CapRights structure filled with rights.
-// See man cap_rights_init(3) and rights(4).
-func CapRightsInit(rights []uint64) (*CapRights, error) {
- var r CapRights
- r.Rights[0] = (capRightsGoVersion << 62) | capright(0, 0)
- r.Rights[1] = capright(1, 0)
-
- err := CapRightsSet(&r, rights)
- if err != nil {
- return nil, err
- }
- return &r, nil
-}
-
-// CapRightsLimit reduces the operations permitted on fd to at most those contained in rights.
-// The capability rights on fd can never be increased by CapRightsLimit.
-// See man cap_rights_limit(2) and rights(4).
-func CapRightsLimit(fd uintptr, rights *CapRights) error {
- return capRightsLimit(int(fd), rights)
-}
-
-// CapRightsGet returns a CapRights structure containing the operations permitted on fd.
-// See man cap_rights_get(3) and rights(4).
-func CapRightsGet(fd uintptr) (*CapRights, error) {
- r, err := CapRightsInit(nil)
- if err != nil {
- return nil, err
- }
- err = capRightsGet(capRightsGoVersion, int(fd), r)
- if err != nil {
- return nil, err
- }
- return r, nil
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/constants.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/constants.go
deleted file mode 100644
index 6fb7cb7..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/constants.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-
-package unix
-
-const (
- R_OK = 0x4
- W_OK = 0x2
- X_OK = 0x1
-)
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
deleted file mode 100644
index d785134..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix && ppc
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used by AIX.
-
-package unix
-
-// Major returns the major component of a Linux device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 16) & 0xffff)
-}
-
-// Minor returns the minor component of a Linux device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffff)
-}
-
-// Mkdev returns a Linux device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- return uint64(((major) << 16) | (minor))
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
deleted file mode 100644
index 623a5e6..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix && ppc64
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used AIX.
-
-package unix
-
-// Major returns the major component of a Linux device number.
-func Major(dev uint64) uint32 {
- return uint32((dev & 0x3fffffff00000000) >> 32)
-}
-
-// Minor returns the minor component of a Linux device number.
-func Minor(dev uint64) uint32 {
- return uint32((dev & 0x00000000ffffffff) >> 0)
-}
-
-// Mkdev returns a Linux device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- var DEVNO64 uint64
- DEVNO64 = 0x8000000000000000
- return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_darwin.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_darwin.go
deleted file mode 100644
index 8d1dc0f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_darwin.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in Darwin's sys/types.h header.
-
-package unix
-
-// Major returns the major component of a Darwin device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 24) & 0xff)
-}
-
-// Minor returns the minor component of a Darwin device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffffff)
-}
-
-// Mkdev returns a Darwin device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 24) | uint64(minor)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_dragonfly.go
deleted file mode 100644
index 8502f20..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_dragonfly.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in Dragonfly's sys/types.h header.
-//
-// The information below is extracted and adapted from sys/types.h:
-//
-// Minor gives a cookie instead of an index since in order to avoid changing the
-// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
-// devices that don't use them.
-
-package unix
-
-// Major returns the major component of a DragonFlyBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 8) & 0xff)
-}
-
-// Minor returns the minor component of a DragonFlyBSD device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffff00ff)
-}
-
-// Mkdev returns a DragonFlyBSD device number generated from the given major and
-// minor components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 8) | uint64(minor)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_freebsd.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_freebsd.go
deleted file mode 100644
index eba3b4b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_freebsd.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in FreeBSD's sys/types.h header.
-//
-// The information below is extracted and adapted from sys/types.h:
-//
-// Minor gives a cookie instead of an index since in order to avoid changing the
-// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
-// devices that don't use them.
-
-package unix
-
-// Major returns the major component of a FreeBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 8) & 0xff)
-}
-
-// Minor returns the minor component of a FreeBSD device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0xffff00ff)
-}
-
-// Mkdev returns a FreeBSD device number generated from the given major and
-// minor components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 8) | uint64(minor)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_linux.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_linux.go
deleted file mode 100644
index d165d6f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_linux.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used by the Linux kernel and glibc.
-//
-// The information below is extracted and adapted from bits/sysmacros.h in the
-// glibc sources:
-//
-// dev_t in glibc is 64-bit, with 32-bit major and minor numbers. glibc's
-// default encoding is MMMM Mmmm mmmM MMmm, where M is a hex digit of the major
-// number and m is a hex digit of the minor number. This is backward compatible
-// with legacy systems where dev_t is 16 bits wide, encoded as MMmm. It is also
-// backward compatible with the Linux kernel, which for some architectures uses
-// 32-bit dev_t, encoded as mmmM MMmm.
-
-package unix
-
-// Major returns the major component of a Linux device number.
-func Major(dev uint64) uint32 {
- major := uint32((dev & 0x00000000000fff00) >> 8)
- major |= uint32((dev & 0xfffff00000000000) >> 32)
- return major
-}
-
-// Minor returns the minor component of a Linux device number.
-func Minor(dev uint64) uint32 {
- minor := uint32((dev & 0x00000000000000ff) >> 0)
- minor |= uint32((dev & 0x00000ffffff00000) >> 12)
- return minor
-}
-
-// Mkdev returns a Linux device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- dev := (uint64(major) & 0x00000fff) << 8
- dev |= (uint64(major) & 0xfffff000) << 32
- dev |= (uint64(minor) & 0x000000ff) << 0
- dev |= (uint64(minor) & 0xffffff00) << 12
- return dev
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_netbsd.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_netbsd.go
deleted file mode 100644
index b4a203d..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_netbsd.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in NetBSD's sys/types.h header.
-
-package unix
-
-// Major returns the major component of a NetBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev & 0x000fff00) >> 8)
-}
-
-// Minor returns the minor component of a NetBSD device number.
-func Minor(dev uint64) uint32 {
- minor := uint32((dev & 0x000000ff) >> 0)
- minor |= uint32((dev & 0xfff00000) >> 12)
- return minor
-}
-
-// Mkdev returns a NetBSD device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- dev := (uint64(major) << 8) & 0x000fff00
- dev |= (uint64(minor) << 12) & 0xfff00000
- dev |= (uint64(minor) << 0) & 0x000000ff
- return dev
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_openbsd.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_openbsd.go
deleted file mode 100644
index f3430c4..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_openbsd.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used in OpenBSD's sys/types.h header.
-
-package unix
-
-// Major returns the major component of an OpenBSD device number.
-func Major(dev uint64) uint32 {
- return uint32((dev & 0x0000ff00) >> 8)
-}
-
-// Minor returns the minor component of an OpenBSD device number.
-func Minor(dev uint64) uint32 {
- minor := uint32((dev & 0x000000ff) >> 0)
- minor |= uint32((dev & 0xffff0000) >> 8)
- return minor
-}
-
-// Mkdev returns an OpenBSD device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- dev := (uint64(major) << 8) & 0x0000ff00
- dev |= (uint64(minor) << 8) & 0xffff0000
- dev |= (uint64(minor) << 0) & 0x000000ff
- return dev
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_zos.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_zos.go
deleted file mode 100644
index bb6a64f..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dev_zos.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x
-
-// Functions to access/create device major and minor numbers matching the
-// encoding used by z/OS.
-//
-// The information below is extracted and adapted from macros.
-
-package unix
-
-// Major returns the major component of a z/OS device number.
-func Major(dev uint64) uint32 {
- return uint32((dev >> 16) & 0x0000FFFF)
-}
-
-// Minor returns the minor component of a z/OS device number.
-func Minor(dev uint64) uint32 {
- return uint32(dev & 0x0000FFFF)
-}
-
-// Mkdev returns a z/OS device number generated from the given major and minor
-// components.
-func Mkdev(major, minor uint32) uint64 {
- return (uint64(major) << 16) | uint64(minor)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dirent.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dirent.go
deleted file mode 100644
index 1ebf117..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/dirent.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-
-package unix
-
-import "unsafe"
-
-// readInt returns the size-bytes unsigned integer in native byte order at offset off.
-func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
- if len(b) < int(off+size) {
- return 0, false
- }
- if isBigEndian {
- return readIntBE(b[off:], size), true
- }
- return readIntLE(b[off:], size), true
-}
-
-func readIntBE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[1]) | uint64(b[0])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-func readIntLE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-// ParseDirent parses up to max directory entries in buf,
-// appending the names to names. It returns the number of
-// bytes consumed from buf, the number of entries added
-// to names, and the new names slice.
-func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
- origlen := len(buf)
- count = 0
- for max != 0 && len(buf) > 0 {
- reclen, ok := direntReclen(buf)
- if !ok || reclen > uint64(len(buf)) {
- return origlen, count, names
- }
- rec := buf[:reclen]
- buf = buf[reclen:]
- ino, ok := direntIno(rec)
- if !ok {
- break
- }
- if ino == 0 { // File absent in directory.
- continue
- }
- const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
- namlen, ok := direntNamlen(rec)
- if !ok || namoff+namlen > uint64(len(rec)) {
- break
- }
- name := rec[namoff : namoff+namlen]
- for i, c := range name {
- if c == 0 {
- name = name[:i]
- break
- }
- }
- // Check for useless names before allocating a string.
- if string(name) == "." || string(name) == ".." {
- continue
- }
- max--
- count++
- names = append(names, string(name))
- }
- return origlen - len(buf), count, names
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/endian_big.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/endian_big.go
deleted file mode 100644
index 1095fd3..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/endian_big.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
-
-package unix
-
-const isBigEndian = true
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/endian_little.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/endian_little.go
deleted file mode 100644
index b9f0e27..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/endian_little.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-//
-//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
-
-package unix
-
-const isBigEndian = false
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/env_unix.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/env_unix.go
deleted file mode 100644
index a96da71..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/env_unix.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-
-// Unix environment variables.
-
-package unix
-
-import "syscall"
-
-func Getenv(key string) (value string, found bool) {
- return syscall.Getenv(key)
-}
-
-func Setenv(key, value string) error {
- return syscall.Setenv(key, value)
-}
-
-func Clearenv() {
- syscall.Clearenv()
-}
-
-func Environ() []string {
- return syscall.Environ()
-}
-
-func Unsetenv(key string) error {
- return syscall.Unsetenv(key)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl.go
deleted file mode 100644
index 6200876..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build dragonfly || freebsd || linux || netbsd
-
-package unix
-
-import "unsafe"
-
-// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
-// systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
-var fcntl64Syscall uintptr = SYS_FCNTL
-
-func fcntl(fd int, cmd, arg int) (int, error) {
- valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
- var err error
- if errno != 0 {
- err = errno
- }
- return int(valptr), err
-}
-
-// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
-func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
- return fcntl(int(fd), cmd, arg)
-}
-
-// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
-func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
- _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
- if errno == 0 {
- return nil
- }
- return errno
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl_darwin.go
deleted file mode 100644
index a9911c7..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl_darwin.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unix
-
-import "unsafe"
-
-// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
-func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
- return fcntl(int(fd), cmd, arg)
-}
-
-// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
-func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
- _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
- return err
-}
-
-// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command.
-func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error {
- _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore))))
- return err
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
deleted file mode 100644
index 13b4acd..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc)
-
-package unix
-
-func init() {
- // On 32-bit Linux systems, the fcntl syscall that matches Go's
- // Flock_t type is SYS_FCNTL64, not SYS_FCNTL.
- fcntl64Syscall = SYS_FCNTL64
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fdset.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fdset.go
deleted file mode 100644
index 9e83d18..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/fdset.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-
-package unix
-
-// Set adds fd to the set fds.
-func (fds *FdSet) Set(fd int) {
- fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS))
-}
-
-// Clear removes fd from the set fds.
-func (fds *FdSet) Clear(fd int) {
- fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS))
-}
-
-// IsSet returns whether fd is in the set fds.
-func (fds *FdSet) IsSet(fd int) bool {
- return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0
-}
-
-// Zero clears the set fds.
-func (fds *FdSet) Zero() {
- for i := range fds.Bits {
- fds.Bits[i] = 0
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo.go
deleted file mode 100644
index aca5721..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo && !aix && !hurd
-
-package unix
-
-import "syscall"
-
-// We can't use the gc-syntax .s files for gccgo. On the plus side
-// much of the functionality can be written directly in Go.
-
-func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
-
-func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
-
-func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
- syscall.Entersyscall()
- r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- syscall.Exitsyscall()
- return r, 0
-}
-
-func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- syscall.Entersyscall()
- r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- syscall.Exitsyscall()
- return r, 0, syscall.Errno(errno)
-}
-
-func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- syscall.Entersyscall()
- r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
- syscall.Exitsyscall()
- return r, 0, syscall.Errno(errno)
-}
-
-func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- syscall.Entersyscall()
- r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9)
- syscall.Exitsyscall()
- return r, 0, syscall.Errno(errno)
-}
-
-func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
- r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- return r, 0
-}
-
-func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
- return r, 0, syscall.Errno(errno)
-}
-
-func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) {
- r, errno := realSyscall(trap, a1, a2, a3, a4, a5, a6, 0, 0, 0)
- return r, 0, syscall.Errno(errno)
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo_c.c b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo_c.c
deleted file mode 100644
index d468b7b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo && !aix && !hurd
-
-#include
-#include
-#include
-
-#define _STRINGIFY2_(x) #x
-#define _STRINGIFY_(x) _STRINGIFY2_(x)
-#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
-
-// Call syscall from C code because the gccgo support for calling from
-// Go to C does not support varargs functions.
-
-struct ret {
- uintptr_t r;
- uintptr_t err;
-};
-
-struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
- __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall");
-
-struct ret
-gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
-{
- struct ret r;
-
- errno = 0;
- r.r = syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
- r.err = errno;
- return r;
-}
-
-uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
- __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError");
-
-uintptr_t
-gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
-{
- return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
deleted file mode 100644
index 972d61b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gccgo && linux && amd64
-
-package unix
-
-import "syscall"
-
-//extern gettimeofday
-func realGettimeofday(*Timeval, *byte) int32
-
-func gettimeofday(tv *Timeval) (err syscall.Errno) {
- r := realGettimeofday(tv, nil)
- if r < 0 {
- return syscall.GetErrno()
- }
- return 0
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ifreq_linux.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ifreq_linux.go
deleted file mode 100644
index 848840a..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build linux
-
-package unix
-
-import (
- "unsafe"
-)
-
-// Helpers for dealing with ifreq since it contains a union and thus requires a
-// lot of unsafe.Pointer casts to use properly.
-
-// An Ifreq is a type-safe wrapper around the raw ifreq struct. An Ifreq
-// contains an interface name and a union of arbitrary data which can be
-// accessed using the Ifreq's methods. To create an Ifreq, use the NewIfreq
-// function.
-//
-// Use the Name method to access the stored interface name. The union data
-// fields can be get and set using the following methods:
-// - Uint16/SetUint16: flags
-// - Uint32/SetUint32: ifindex, metric, mtu
-type Ifreq struct{ raw ifreq }
-
-// NewIfreq creates an Ifreq with the input network interface name after
-// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required)
-// bytes.
-func NewIfreq(name string) (*Ifreq, error) {
- // Leave room for terminating NULL byte.
- if len(name) >= IFNAMSIZ {
- return nil, EINVAL
- }
-
- var ifr ifreq
- copy(ifr.Ifrn[:], name)
-
- return &Ifreq{raw: ifr}, nil
-}
-
-// TODO(mdlayher): get/set methods for hardware address sockaddr, char array, etc.
-
-// Name returns the interface name associated with the Ifreq.
-func (ifr *Ifreq) Name() string {
- return ByteSliceToString(ifr.raw.Ifrn[:])
-}
-
-// According to netdevice(7), only AF_INET addresses are returned for numerous
-// sockaddr ioctls. For convenience, we expose these as Inet4Addr since the Port
-// field and other data is always empty.
-
-// Inet4Addr returns the Ifreq union data from an embedded sockaddr as a C
-// in_addr/Go []byte (4-byte IPv4 address) value. If the sockaddr family is not
-// AF_INET, an error is returned.
-func (ifr *Ifreq) Inet4Addr() ([]byte, error) {
- raw := *(*RawSockaddrInet4)(unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]))
- if raw.Family != AF_INET {
- // Cannot safely interpret raw.Addr bytes as an IPv4 address.
- return nil, EINVAL
- }
-
- return raw.Addr[:], nil
-}
-
-// SetInet4Addr sets a C in_addr/Go []byte (4-byte IPv4 address) value in an
-// embedded sockaddr within the Ifreq's union data. v must be 4 bytes in length
-// or an error will be returned.
-func (ifr *Ifreq) SetInet4Addr(v []byte) error {
- if len(v) != 4 {
- return EINVAL
- }
-
- var addr [4]byte
- copy(addr[:], v)
-
- ifr.clear()
- *(*RawSockaddrInet4)(
- unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]),
- ) = RawSockaddrInet4{
- // Always set IP family as ioctls would require it anyway.
- Family: AF_INET,
- Addr: addr,
- }
-
- return nil
-}
-
-// Uint16 returns the Ifreq union data as a C short/Go uint16 value.
-func (ifr *Ifreq) Uint16() uint16 {
- return *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0]))
-}
-
-// SetUint16 sets a C short/Go uint16 value as the Ifreq's union data.
-func (ifr *Ifreq) SetUint16(v uint16) {
- ifr.clear()
- *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) = v
-}
-
-// Uint32 returns the Ifreq union data as a C int/Go uint32 value.
-func (ifr *Ifreq) Uint32() uint32 {
- return *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0]))
-}
-
-// SetUint32 sets a C int/Go uint32 value as the Ifreq's union data.
-func (ifr *Ifreq) SetUint32(v uint32) {
- ifr.clear()
- *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) = v
-}
-
-// clear zeroes the ifreq's union field to prevent trailing garbage data from
-// being sent to the kernel if an ifreq is reused.
-func (ifr *Ifreq) clear() {
- for i := range ifr.raw.Ifru {
- ifr.raw.Ifru[i] = 0
- }
-}
-
-// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
-// IoctlGetEthtoolDrvinfo which use these APIs under the hood.
-
-// An ifreqData is an Ifreq which carries pointer data. To produce an ifreqData,
-// use the Ifreq.withData method.
-type ifreqData struct {
- name [IFNAMSIZ]byte
- // A type separate from ifreq is required in order to comply with the
- // unsafe.Pointer rules since the "pointer-ness" of data would not be
- // preserved if it were cast into the byte array of a raw ifreq.
- data unsafe.Pointer
- // Pad to the same size as ifreq.
- _ [len(ifreq{}.Ifru) - SizeofPtr]byte
-}
-
-// withData produces an ifreqData with the pointer p set for ioctls which require
-// arbitrary pointer data.
-func (ifr Ifreq) withData(p unsafe.Pointer) ifreqData {
- return ifreqData{
- name: ifr.raw.Ifrn,
- data: p,
- }
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_linux.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_linux.go
deleted file mode 100644
index dbe680e..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unix
-
-import "unsafe"
-
-// IoctlRetInt performs an ioctl operation specified by req on a device
-// associated with opened file descriptor fd, and returns a non-negative
-// integer that is returned by the ioctl syscall.
-func IoctlRetInt(fd int, req uint) (int, error) {
- ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0)
- if err != 0 {
- return 0, err
- }
- return int(ret), nil
-}
-
-func IoctlGetUint32(fd int, req uint) (uint32, error) {
- var value uint32
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetRTCTime(fd int) (*RTCTime, error) {
- var value RTCTime
- err := ioctlPtr(fd, RTC_RD_TIME, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlSetRTCTime(fd int, value *RTCTime) error {
- return ioctlPtr(fd, RTC_SET_TIME, unsafe.Pointer(value))
-}
-
-func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
- var value RTCWkAlrm
- err := ioctlPtr(fd, RTC_WKALM_RD, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
- return ioctlPtr(fd, RTC_WKALM_SET, unsafe.Pointer(value))
-}
-
-// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network
-// device specified by ifname.
-func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
- ifr, err := NewIfreq(ifname)
- if err != nil {
- return nil, err
- }
-
- value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO}
- ifrd := ifr.withData(unsafe.Pointer(&value))
-
- err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
- return &value, err
-}
-
-// IoctlGetWatchdogInfo fetches information about a watchdog device from the
-// Linux watchdog API. For more information, see:
-// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
-func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) {
- var value WatchdogInfo
- err := ioctlPtr(fd, WDIOC_GETSUPPORT, unsafe.Pointer(&value))
- return &value, err
-}
-
-// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For
-// more information, see:
-// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
-func IoctlWatchdogKeepalive(fd int) error {
- // arg is ignored and not a pointer, so ioctl is fine instead of ioctlPtr.
- return ioctl(fd, WDIOC_KEEPALIVE, 0)
-}
-
-// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the
-// range of data conveyed in value to the file associated with the file
-// descriptor destFd. See the ioctl_ficlonerange(2) man page for details.
-func IoctlFileCloneRange(destFd int, value *FileCloneRange) error {
- return ioctlPtr(destFd, FICLONERANGE, unsafe.Pointer(value))
-}
-
-// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file
-// associated with the file description srcFd to the file associated with the
-// file descriptor destFd. See the ioctl_ficlone(2) man page for details.
-func IoctlFileClone(destFd, srcFd int) error {
- return ioctl(destFd, FICLONE, uintptr(srcFd))
-}
-
-type FileDedupeRange struct {
- Src_offset uint64
- Src_length uint64
- Reserved1 uint16
- Reserved2 uint32
- Info []FileDedupeRangeInfo
-}
-
-type FileDedupeRangeInfo struct {
- Dest_fd int64
- Dest_offset uint64
- Bytes_deduped uint64
- Status int32
- Reserved uint32
-}
-
-// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the
-// range of data conveyed in value from the file associated with the file
-// descriptor srcFd to the value.Info destinations. See the
-// ioctl_fideduperange(2) man page for details.
-func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error {
- buf := make([]byte, SizeofRawFileDedupeRange+
- len(value.Info)*SizeofRawFileDedupeRangeInfo)
- rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0]))
- rawrange.Src_offset = value.Src_offset
- rawrange.Src_length = value.Src_length
- rawrange.Dest_count = uint16(len(value.Info))
- rawrange.Reserved1 = value.Reserved1
- rawrange.Reserved2 = value.Reserved2
-
- for i := range value.Info {
- rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
- uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
- uintptr(i*SizeofRawFileDedupeRangeInfo)))
- rawinfo.Dest_fd = value.Info[i].Dest_fd
- rawinfo.Dest_offset = value.Info[i].Dest_offset
- rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped
- rawinfo.Status = value.Info[i].Status
- rawinfo.Reserved = value.Info[i].Reserved
- }
-
- err := ioctlPtr(srcFd, FIDEDUPERANGE, unsafe.Pointer(&buf[0]))
-
- // Output
- for i := range value.Info {
- rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
- uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
- uintptr(i*SizeofRawFileDedupeRangeInfo)))
- value.Info[i].Dest_fd = rawinfo.Dest_fd
- value.Info[i].Dest_offset = rawinfo.Dest_offset
- value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped
- value.Info[i].Status = rawinfo.Status
- value.Info[i].Reserved = rawinfo.Reserved
- }
-
- return err
-}
-
-func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error {
- return ioctlPtr(fd, HIDIOCGRDESC, unsafe.Pointer(value))
-}
-
-func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) {
- var value HIDRawDevInfo
- err := ioctlPtr(fd, HIDIOCGRAWINFO, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlHIDGetRawName(fd int) (string, error) {
- var value [_HIDIOCGRAWNAME_LEN]byte
- err := ioctlPtr(fd, _HIDIOCGRAWNAME, unsafe.Pointer(&value[0]))
- return ByteSliceToString(value[:]), err
-}
-
-func IoctlHIDGetRawPhys(fd int) (string, error) {
- var value [_HIDIOCGRAWPHYS_LEN]byte
- err := ioctlPtr(fd, _HIDIOCGRAWPHYS, unsafe.Pointer(&value[0]))
- return ByteSliceToString(value[:]), err
-}
-
-func IoctlHIDGetRawUniq(fd int) (string, error) {
- var value [_HIDIOCGRAWUNIQ_LEN]byte
- err := ioctlPtr(fd, _HIDIOCGRAWUNIQ, unsafe.Pointer(&value[0]))
- return ByteSliceToString(value[:]), err
-}
-
-// IoctlIfreq performs an ioctl using an Ifreq structure for input and/or
-// output. See the netdevice(7) man page for details.
-func IoctlIfreq(fd int, req uint, value *Ifreq) error {
- // It is possible we will add more fields to *Ifreq itself later to prevent
- // misuse, so pass the raw *ifreq directly.
- return ioctlPtr(fd, req, unsafe.Pointer(&value.raw))
-}
-
-// TODO(mdlayher): export if and when IfreqData is exported.
-
-// ioctlIfreqData performs an ioctl using an ifreqData structure for input
-// and/or output. See the netdevice(7) man page for details.
-func ioctlIfreqData(fd int, req uint, value *ifreqData) error {
- // The memory layout of IfreqData (type-safe) and ifreq (not type-safe) are
- // identical so pass *IfreqData directly.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlKCMClone attaches a new file descriptor to a multiplexor by cloning an
-// existing KCM socket, returning a structure containing the file descriptor of
-// the new socket.
-func IoctlKCMClone(fd int) (*KCMClone, error) {
- var info KCMClone
- if err := ioctlPtr(fd, SIOCKCMCLONE, unsafe.Pointer(&info)); err != nil {
- return nil, err
- }
-
- return &info, nil
-}
-
-// IoctlKCMAttach attaches a TCP socket and associated BPF program file
-// descriptor to a multiplexor.
-func IoctlKCMAttach(fd int, info KCMAttach) error {
- return ioctlPtr(fd, SIOCKCMATTACH, unsafe.Pointer(&info))
-}
-
-// IoctlKCMUnattach unattaches a TCP socket file descriptor from a multiplexor.
-func IoctlKCMUnattach(fd int, info KCMUnattach) error {
- return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info))
-}
-
-// IoctlLoopGetStatus64 gets the status of the loop device associated with the
-// file descriptor fd using the LOOP_GET_STATUS64 operation.
-func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
- var value LoopInfo64
- if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil {
- return nil, err
- }
- return &value, nil
-}
-
-// IoctlLoopSetStatus64 sets the status of the loop device associated with the
-// file descriptor fd using the LOOP_SET_STATUS64 operation.
-func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
- return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
-}
-
-// IoctlLoopConfigure configures all loop device parameters in a single step
-func IoctlLoopConfigure(fd int, value *LoopConfig) error {
- return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value))
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_signed.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_signed.go
deleted file mode 100644
index 5b0759b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_signed.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || solaris
-
-package unix
-
-import (
- "unsafe"
-)
-
-// ioctl itself should not be exposed directly, but additional get/set
-// functions for specific types are permissible.
-
-// IoctlSetInt performs an ioctl operation which sets an integer value
-// on fd, using the specified request number.
-func IoctlSetInt(fd int, req int, value int) error {
- return ioctl(fd, req, uintptr(value))
-}
-
-// IoctlSetPointerInt performs an ioctl operation which sets an
-// integer value on fd, using the specified request number. The ioctl
-// argument is called with a pointer to the integer value, rather than
-// passing the integer value directly.
-func IoctlSetPointerInt(fd int, req int, value int) error {
- v := int32(value)
- return ioctlPtr(fd, req, unsafe.Pointer(&v))
-}
-
-// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
-//
-// To change fd's window size, the req argument should be TIOCSWINSZ.
-func IoctlSetWinsize(fd int, req int, value *Winsize) error {
- // TODO: if we get the chance, remove the req parameter and
- // hardcode TIOCSWINSZ.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlSetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value will usually be TCSETA or TIOCSETA.
-func IoctlSetTermios(fd int, req int, value *Termios) error {
- // TODO: if we get the chance, remove the req parameter.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlGetInt performs an ioctl operation which gets an integer value
-// from fd, using the specified request number.
-//
-// A few ioctl requests use the return value as an output parameter;
-// for those, IoctlRetInt should be used instead of this function.
-func IoctlGetInt(fd int, req int) (int, error) {
- var value int
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
- var value Winsize
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlGetTermios(fd int, req int) (*Termios, error) {
- var value Termios
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
deleted file mode 100644
index 20f470b..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd
-
-package unix
-
-import (
- "unsafe"
-)
-
-// ioctl itself should not be exposed directly, but additional get/set
-// functions for specific types are permissible.
-
-// IoctlSetInt performs an ioctl operation which sets an integer value
-// on fd, using the specified request number.
-func IoctlSetInt(fd int, req uint, value int) error {
- return ioctl(fd, req, uintptr(value))
-}
-
-// IoctlSetPointerInt performs an ioctl operation which sets an
-// integer value on fd, using the specified request number. The ioctl
-// argument is called with a pointer to the integer value, rather than
-// passing the integer value directly.
-func IoctlSetPointerInt(fd int, req uint, value int) error {
- v := int32(value)
- return ioctlPtr(fd, req, unsafe.Pointer(&v))
-}
-
-// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
-//
-// To change fd's window size, the req argument should be TIOCSWINSZ.
-func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
- // TODO: if we get the chance, remove the req parameter and
- // hardcode TIOCSWINSZ.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlSetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value will usually be TCSETA or TIOCSETA.
-func IoctlSetTermios(fd int, req uint, value *Termios) error {
- // TODO: if we get the chance, remove the req parameter.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlGetInt performs an ioctl operation which gets an integer value
-// from fd, using the specified request number.
-//
-// A few ioctl requests use the return value as an output parameter;
-// for those, IoctlRetInt should be used instead of this function.
-func IoctlGetInt(fd int, req uint) (int, error) {
- var value int
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
- var value Winsize
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
-
-func IoctlGetTermios(fd int, req uint) (*Termios, error) {
- var value Termios
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_zos.go b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_zos.go
deleted file mode 100644
index c8b2a75..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/ioctl_zos.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build zos && s390x
-
-package unix
-
-import (
- "runtime"
- "unsafe"
-)
-
-// ioctl itself should not be exposed directly, but additional get/set
-// functions for specific types are permissible.
-
-// IoctlSetInt performs an ioctl operation which sets an integer value
-// on fd, using the specified request number.
-func IoctlSetInt(fd int, req int, value int) error {
- return ioctl(fd, req, uintptr(value))
-}
-
-// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
-//
-// To change fd's window size, the req argument should be TIOCSWINSZ.
-func IoctlSetWinsize(fd int, req int, value *Winsize) error {
- // TODO: if we get the chance, remove the req parameter and
- // hardcode TIOCSWINSZ.
- return ioctlPtr(fd, req, unsafe.Pointer(value))
-}
-
-// IoctlSetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value is expected to be TCSETS, TCSETSW, or TCSETSF
-func IoctlSetTermios(fd int, req int, value *Termios) error {
- if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) {
- return ENOSYS
- }
- err := Tcsetattr(fd, int(req), value)
- runtime.KeepAlive(value)
- return err
-}
-
-// IoctlGetInt performs an ioctl operation which gets an integer value
-// from fd, using the specified request number.
-//
-// A few ioctl requests use the return value as an output parameter;
-// for those, IoctlRetInt should be used instead of this function.
-func IoctlGetInt(fd int, req int) (int, error) {
- var value int
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return value, err
-}
-
-func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
- var value Winsize
- err := ioctlPtr(fd, req, unsafe.Pointer(&value))
- return &value, err
-}
-
-// IoctlGetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value is expected to be TCGETS
-func IoctlGetTermios(fd int, req int) (*Termios, error) {
- var value Termios
- if req != TCGETS {
- return &value, ENOSYS
- }
- err := Tcgetattr(fd, &value)
- return &value, err
-}
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/mkall.sh b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/mkall.sh
deleted file mode 100644
index e6f31d3..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/mkall.sh
+++ /dev/null
@@ -1,249 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# This script runs or (given -n) prints suggested commands to generate files for
-# the Architecture/OS specified by the GOARCH and GOOS environment variables.
-# See README.md for more information about how the build system works.
-
-GOOSARCH="${GOOS}_${GOARCH}"
-
-# defaults
-mksyscall="go run mksyscall.go"
-mkerrors="./mkerrors.sh"
-zerrors="zerrors_$GOOSARCH.go"
-mksysctl=""
-zsysctl="zsysctl_$GOOSARCH.go"
-mksysnum=
-mktypes=
-mkasm=
-run="sh"
-cmd=""
-
-case "$1" in
--syscalls)
- for i in zsyscall*go
- do
- # Run the command line that appears in the first line
- # of the generated file to regenerate it.
- sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
- rm _$i
- done
- exit 0
- ;;
--n)
- run="cat"
- cmd="echo"
- shift
-esac
-
-case "$#" in
-0)
- ;;
-*)
- echo 'usage: mkall.sh [-n]' 1>&2
- exit 2
-esac
-
-if [[ "$GOOS" = "linux" ]]; then
- # Use the Docker-based build system
- # Files generated through docker (use $cmd so you can Ctl-C the build or run)
- $cmd docker build --tag generate:$GOOS $GOOS
- $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
- exit
-fi
-
-GOOSARCH_in=syscall_$GOOSARCH.go
-case "$GOOSARCH" in
-_* | *_ | _)
- echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
- exit 1
- ;;
-aix_ppc)
- mkerrors="$mkerrors -maix32"
- mksyscall="go run mksyscall_aix_ppc.go -aix"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-aix_ppc64)
- mkerrors="$mkerrors -maix64"
- mksyscall="go run mksyscall_aix_ppc64.go -aix"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-darwin_amd64)
- mkerrors="$mkerrors -m64"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm.go"
- ;;
-darwin_arm64)
- mkerrors="$mkerrors -m64"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm.go"
- ;;
-dragonfly_amd64)
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -dragonfly"
- mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_386)
- mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_amd64)
- mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_arm)
- mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -arm"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-freebsd_arm64)
- mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-freebsd_riscv64)
- mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-netbsd_386)
- mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32 -netbsd"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-netbsd_amd64)
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -netbsd"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-netbsd_arm)
- mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -netbsd -arm"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-netbsd_arm64)
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -netbsd"
- mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_386)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32 -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_amd64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_arm)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_arm64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_mips64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_ppc64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_riscv64)
- mkasm="go run mkasm.go"
- mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd -libc"
- mksysctl="go run mksysctl_openbsd.go"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-solaris_amd64)
- mksyscall="go run mksyscall_solaris.go"
- mkerrors="$mkerrors -m64"
- mksysnum=
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-illumos_amd64)
- mksyscall="go run mksyscall_solaris.go"
- mkerrors=
- mksysnum=
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-*)
- echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
- exit 1
- ;;
-esac
-
-(
- if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
- case "$GOOS" in
- *)
- syscall_goos="syscall_$GOOS.go"
- case "$GOOS" in
- darwin | dragonfly | freebsd | netbsd | openbsd)
- syscall_goos="syscall_bsd.go $syscall_goos"
- ;;
- esac
- if [ -n "$mksyscall" ]; then
- if [ "$GOOSARCH" == "aix_ppc64" ]; then
- # aix/ppc64 script generates files instead of writing to stdin.
- echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
- elif [ "$GOOS" == "illumos" ]; then
- # illumos code generation requires a --illumos switch
- echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
- # illumos implies solaris, so solaris code generation is also required
- echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go";
- else
- echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
- fi
- fi
- esac
- if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
- if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
- if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
- if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi
-) | $run
diff --git a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/mkerrors.sh b/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/mkerrors.sh
deleted file mode 100644
index fdcaa97..0000000
--- a/backend/services/mtp/stomp-adapter/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ /dev/null
@@ -1,789 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# Generate Go code listing errors and other #defined constant
-# values (ENAMETOOLONG etc.), by asking the preprocessor
-# about the definitions.
-
-unset LANG
-export LC_ALL=C
-export LC_CTYPE=C
-
-if test -z "$GOARCH" -o -z "$GOOS"; then
- echo 1>&2 "GOARCH or GOOS not defined in environment"
- exit 1
-fi
-
-# Check that we are using the new build system if we should
-if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
- echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
- echo 1>&2 "See README.md"
- exit 1
-fi
-
-if [[ "$GOOS" = "aix" ]]; then
- CC=${CC:-gcc}
-else
- CC=${CC:-cc}
-fi
-
-if [[ "$GOOS" = "solaris" ]]; then
- # Assumes GNU versions of utilities in PATH.
- export PATH=/usr/gnu/bin:$PATH
-fi
-
-uname=$(uname)
-
-includes_AIX='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define AF_LOCAL AF_UNIX
-'
-
-includes_Darwin='
-#define _DARWIN_C_SOURCE
-#define KERNEL 1
-#define _DARWIN_USE_64_BIT_INODE
-#define __APPLE_USE_RFC_3542
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-// for backwards compatibility because moved TIOCREMOTE to Kernel.framework after MacOSX12.0.sdk.
-#define TIOCREMOTE 0x80047469
-'
-
-includes_DragonFly='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-'
-
-includes_FreeBSD='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#if __FreeBSD__ >= 10
-#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
-#undef SIOCAIFADDR
-#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
-#undef SIOCSIFPHYADDR
-#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
-#endif
-'
-
-includes_Linux='
-#define _LARGEFILE_SOURCE
-#define _LARGEFILE64_SOURCE
-#ifndef __LP64__
-#define _FILE_OFFSET_BITS 64
-#endif
-#define _GNU_SOURCE
-
-// is broken on powerpc64, as it fails to include definitions of
-// these structures. We just include them copied from .
-#if defined(__powerpc__)
-struct sgttyb {
- char sg_ispeed;
- char sg_ospeed;
- char sg_erase;
- char sg_kill;
- short sg_flags;
-};
-
-struct tchars {
- char t_intrc;
- char t_quitc;
- char t_startc;
- char t_stopc;
- char t_eofc;
- char t_brkc;
-};
-
-struct ltchars {
- char t_suspc;
- char t_dsuspc;
- char t_rprntc;
- char t_flushc;
- char t_werasc;
- char t_lnextc;
-};
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-
-#if defined(__sparc__)
-// On sparc{,64}, the kernel defines struct termios2 itself which clashes with the
-// definition in glibc. As only the error constants are needed here, include the
-// generic termibits.h (which is included by termbits.h on sparc).
-#include
-#else
-#include
-#endif
-
-#ifndef PTRACE_GETREGS
-#define PTRACE_GETREGS 0xc
-#endif
-
-#ifndef PTRACE_SETREGS
-#define PTRACE_SETREGS 0xd
-#endif
-
-#ifdef SOL_BLUETOOTH
-// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
-// but it is already in bluetooth_linux.go
-#undef SOL_BLUETOOTH
-#endif
-
-// Certain constants are missing from the fs/crypto UAPI
-#define FS_KEY_DESC_PREFIX "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE 8
-#define FS_MAX_KEY_SIZE 64
-
-// The code generator produces -0x1 for (~0), but an unsigned value is necessary
-// for the tipc_subscr timeout __u32 field.
-#undef TIPC_WAIT_FOREVER
-#define TIPC_WAIT_FOREVER 0xffffffff
-
-// Copied from linux/netfilter/nf_nat.h
-// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h
-// and netinet/in.h.
-#define NF_NAT_RANGE_MAP_IPS (1 << 0)
-#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1)
-#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2)
-#define NF_NAT_RANGE_PERSISTENT (1 << 3)
-#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
-#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5)
-#define NF_NAT_RANGE_NETMAP (1 << 6)
-#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
- (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
-#define NF_NAT_RANGE_MASK \
- (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
- NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
- NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \
- NF_NAT_RANGE_NETMAP)
-
-// Copied from linux/hid.h.
-// Keep in sync with the size of the referenced fields.
-#define _HIDIOCGRAWNAME_LEN 128 // sizeof_field(struct hid_device, name)
-#define _HIDIOCGRAWPHYS_LEN 64 // sizeof_field(struct hid_device, phys)
-#define _HIDIOCGRAWUNIQ_LEN 64 // sizeof_field(struct hid_device, uniq)
-
-#define _HIDIOCGRAWNAME HIDIOCGRAWNAME(_HIDIOCGRAWNAME_LEN)
-#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
-#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
-
-'
-
-includes_NetBSD='
-#include
-#include
-#include
-#include
-#include
-#include
-#include